mirror of
https://github.com/astaxie/beego.git
synced 2025-06-27 16:40:19 +00:00
add vendor
This commit is contained in:
27
vendor/github.com/belogik/goes/LICENSE
generated
vendored
Normal file
27
vendor/github.com/belogik/goes/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2013 Belogik. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Belogik nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
10
vendor/github.com/belogik/goes/Makefile
generated
vendored
Normal file
10
vendor/github.com/belogik/goes/Makefile
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
help:
|
||||
@echo "Available targets:"
|
||||
@echo "- test: run tests"
|
||||
@echo "- installdependencies: installs dependencies declared in dependencies.txt"
|
||||
|
||||
installdependencies:
|
||||
cat dependencies.txt | xargs go get
|
||||
|
||||
test: installdependencies
|
||||
go test -i && go test
|
7
vendor/github.com/belogik/goes/README
generated
vendored
Normal file
7
vendor/github.com/belogik/goes/README
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
There is a new maintener for this library.
|
||||
|
||||
Please go here : https://github.com/OwnLocal/goes
|
||||
|
||||
!!!!! By using this repo you are running on thin ice !!!!!!
|
||||
|
||||
https://github.com/belogik/goes/issues/40 might be of interest for you.
|
1
vendor/github.com/belogik/goes/TODO
generated
vendored
Normal file
1
vendor/github.com/belogik/goes/TODO
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
- Add Gzip support to bulk data to save bandwith
|
1
vendor/github.com/belogik/goes/dependencies.txt
generated
vendored
Normal file
1
vendor/github.com/belogik/goes/dependencies.txt
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
launchpad.net/gocheck
|
603
vendor/github.com/belogik/goes/goes.go
generated
vendored
Normal file
603
vendor/github.com/belogik/goes/goes.go
generated
vendored
Normal file
@ -0,0 +1,603 @@
|
||||
// Copyright 2013 Belogik. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package goes provides an API to access Elasticsearch.
|
||||
package goes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
BULK_COMMAND_INDEX = "index"
|
||||
BULK_COMMAND_DELETE = "delete"
|
||||
)
|
||||
|
||||
func (err *SearchError) Error() string {
|
||||
return fmt.Sprintf("[%d] %s", err.StatusCode, err.Msg)
|
||||
}
|
||||
|
||||
// NewConnection initiates a new Connection to an elasticsearch server
|
||||
//
|
||||
// This function is pretty useless for now but might be useful in a near future
|
||||
// if wee need more features like connection pooling or load balancing.
|
||||
func NewConnection(host string, port string) *Connection {
|
||||
return &Connection{host, port, http.DefaultClient}
|
||||
}
|
||||
|
||||
func (c *Connection) WithClient(cl *http.Client) *Connection {
|
||||
c.Client = cl
|
||||
return c
|
||||
}
|
||||
|
||||
// CreateIndex creates a new index represented by a name and a mapping
|
||||
func (c *Connection) CreateIndex(name string, mapping interface{}) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: mapping,
|
||||
IndexList: []string{name},
|
||||
method: "PUT",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// DeleteIndex deletes an index represented by a name
|
||||
func (c *Connection) DeleteIndex(name string) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: []string{name},
|
||||
method: "DELETE",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// RefreshIndex refreshes an index represented by a name
|
||||
func (c *Connection) RefreshIndex(name string) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: []string{name},
|
||||
method: "POST",
|
||||
api: "_refresh",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// UpdateIndexSettings updates settings for existing index represented by a name and a settings
|
||||
// as described here: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html
|
||||
func (c *Connection) UpdateIndexSettings(name string, settings interface{}) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: settings,
|
||||
IndexList: []string{name},
|
||||
method: "PUT",
|
||||
api: "_settings",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Optimize an index represented by a name, extra args are also allowed please check:
|
||||
// http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/indices-optimize.html#indices-optimize
|
||||
func (c *Connection) Optimize(indexList []string, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: indexList,
|
||||
ExtraArgs: extraArgs,
|
||||
method: "POST",
|
||||
api: "_optimize",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Stats fetches statistics (_stats) for the current elasticsearch server
|
||||
func (c *Connection) Stats(indexList []string, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: indexList,
|
||||
ExtraArgs: extraArgs,
|
||||
method: "GET",
|
||||
api: "_stats",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// IndexStatus fetches the status (_status) for the indices defined in
|
||||
// indexList. Use _all in indexList to get stats for all indices
|
||||
func (c *Connection) IndexStatus(indexList []string) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: indexList,
|
||||
method: "GET",
|
||||
api: "_status",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Bulk adds multiple documents in bulk mode
|
||||
func (c *Connection) BulkSend(documents []Document) (*Response, error) {
|
||||
// We do not generate a traditional JSON here (often a one liner)
|
||||
// Elasticsearch expects one line of JSON per line (EOL = \n)
|
||||
// plus an extra \n at the very end of the document
|
||||
//
|
||||
// More informations about the Bulk JSON format for Elasticsearch:
|
||||
//
|
||||
// - http://www.elasticsearch.org/guide/reference/api/bulk.html
|
||||
//
|
||||
// This is quite annoying for us as we can not use the simple JSON
|
||||
// Marshaler available in Run().
|
||||
//
|
||||
// We have to generate this special JSON by ourselves which leads to
|
||||
// the code below.
|
||||
//
|
||||
// I know it is unreadable I must find an elegant way to fix this.
|
||||
|
||||
// len(documents) * 2 : action + optional_sources
|
||||
// + 1 : room for the trailing \n
|
||||
bulkData := make([][]byte, len(documents)*2+1)
|
||||
i := 0
|
||||
|
||||
for _, doc := range documents {
|
||||
action, err := json.Marshal(map[string]interface{}{
|
||||
doc.BulkCommand: map[string]interface{}{
|
||||
"_index": doc.Index,
|
||||
"_type": doc.Type,
|
||||
"_id": doc.Id,
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return &Response{}, err
|
||||
}
|
||||
|
||||
bulkData[i] = action
|
||||
i++
|
||||
|
||||
if doc.Fields != nil {
|
||||
if docFields, ok := doc.Fields.(map[string]interface{}); ok {
|
||||
if len(docFields) == 0 {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
typeOfFields := reflect.TypeOf(doc.Fields)
|
||||
if typeOfFields.Kind() == reflect.Ptr {
|
||||
typeOfFields = typeOfFields.Elem()
|
||||
}
|
||||
if typeOfFields.Kind() != reflect.Struct {
|
||||
return &Response{}, fmt.Errorf("Document fields not in struct or map[string]interface{} format")
|
||||
}
|
||||
if typeOfFields.NumField() == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
sources, err := json.Marshal(doc.Fields)
|
||||
if err != nil {
|
||||
return &Response{}, err
|
||||
}
|
||||
|
||||
bulkData[i] = sources
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// forces an extra trailing \n absolutely necessary for elasticsearch
|
||||
bulkData[len(bulkData)-1] = []byte(nil)
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
method: "POST",
|
||||
api: "_bulk",
|
||||
bulkData: bytes.Join(bulkData, []byte("\n")),
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Search executes a search query against an index
|
||||
func (c *Connection) Search(query interface{}, indexList []string, typeList []string, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: query,
|
||||
IndexList: indexList,
|
||||
TypeList: typeList,
|
||||
method: "POST",
|
||||
api: "_search",
|
||||
ExtraArgs: extraArgs,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Count executes a count query against an index, use the Count field in the response for the result
|
||||
func (c *Connection) Count(query interface{}, indexList []string, typeList []string, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: query,
|
||||
IndexList: indexList,
|
||||
TypeList: typeList,
|
||||
method: "POST",
|
||||
api: "_count",
|
||||
ExtraArgs: extraArgs,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
//Query runs a query against an index using the provided http method.
|
||||
//This method can be used to execute a delete by query, just pass in "DELETE"
|
||||
//for the HTTP method.
|
||||
func (c *Connection) Query(query interface{}, indexList []string, typeList []string, httpMethod string, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: query,
|
||||
IndexList: indexList,
|
||||
TypeList: typeList,
|
||||
method: httpMethod,
|
||||
api: "_query",
|
||||
ExtraArgs: extraArgs,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Scan starts scroll over an index
|
||||
func (c *Connection) Scan(query interface{}, indexList []string, typeList []string, timeout string, size int) (*Response, error) {
|
||||
v := url.Values{}
|
||||
v.Add("search_type", "scan")
|
||||
v.Add("scroll", timeout)
|
||||
v.Add("size", strconv.Itoa(size))
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: query,
|
||||
IndexList: indexList,
|
||||
TypeList: typeList,
|
||||
method: "POST",
|
||||
api: "_search",
|
||||
ExtraArgs: v,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Scroll fetches data by scroll id
|
||||
func (c *Connection) Scroll(scrollId string, timeout string) (*Response, error) {
|
||||
v := url.Values{}
|
||||
v.Add("scroll", timeout)
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
method: "POST",
|
||||
api: "_search/scroll",
|
||||
ExtraArgs: v,
|
||||
Body: []byte(scrollId),
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Get a typed document by its id
|
||||
func (c *Connection) Get(index string, documentType string, id string, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: []string{index},
|
||||
method: "GET",
|
||||
api: documentType + "/" + id,
|
||||
ExtraArgs: extraArgs,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Index indexes a Document
|
||||
// The extraArgs is a list of url.Values that you can send to elasticsearch as
|
||||
// URL arguments, for example, to control routing, ttl, version, op_type, etc.
|
||||
func (c *Connection) Index(d Document, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: d.Fields,
|
||||
IndexList: []string{d.Index.(string)},
|
||||
TypeList: []string{d.Type},
|
||||
ExtraArgs: extraArgs,
|
||||
method: "POST",
|
||||
}
|
||||
|
||||
if d.Id != nil {
|
||||
r.method = "PUT"
|
||||
r.id = d.Id.(string)
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Delete deletes a Document d
|
||||
// The extraArgs is a list of url.Values that you can send to elasticsearch as
|
||||
// URL arguments, for example, to control routing.
|
||||
func (c *Connection) Delete(d Document, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: []string{d.Index.(string)},
|
||||
TypeList: []string{d.Type},
|
||||
ExtraArgs: extraArgs,
|
||||
method: "DELETE",
|
||||
id: d.Id.(string),
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// Run executes an elasticsearch Request. It converts data to Json, sends the
|
||||
// request and returns the Response obtained
|
||||
func (req *Request) Run() (*Response, error) {
|
||||
body, statusCode, err := req.run()
|
||||
esResp := &Response{Status: statusCode}
|
||||
|
||||
if err != nil {
|
||||
return esResp, err
|
||||
}
|
||||
|
||||
if req.method != "HEAD" {
|
||||
err = json.Unmarshal(body, &esResp)
|
||||
if err != nil {
|
||||
return esResp, err
|
||||
}
|
||||
err = json.Unmarshal(body, &esResp.Raw)
|
||||
if err != nil {
|
||||
return esResp, err
|
||||
}
|
||||
}
|
||||
|
||||
if req.api == "_bulk" && esResp.Errors {
|
||||
for _, item := range esResp.Items {
|
||||
for _, i := range item {
|
||||
if i.Error != "" {
|
||||
return esResp, &SearchError{i.Error, i.Status}
|
||||
}
|
||||
}
|
||||
}
|
||||
return esResp, &SearchError{Msg: "Unknown error while bulk indexing"}
|
||||
}
|
||||
|
||||
if esResp.Error != "" {
|
||||
return esResp, &SearchError{esResp.Error, esResp.Status}
|
||||
}
|
||||
|
||||
return esResp, nil
|
||||
}
|
||||
|
||||
func (req *Request) run() ([]byte, uint64, error) {
|
||||
postData := []byte{}
|
||||
|
||||
// XXX : refactor this
|
||||
if len(req.Body) > 0 {
|
||||
postData = req.Body
|
||||
} else if req.api == "_bulk" {
|
||||
postData = req.bulkData
|
||||
} else {
|
||||
b, err := json.Marshal(req.Query)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
postData = b
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(postData)
|
||||
|
||||
newReq, err := http.NewRequest(req.method, req.Url(), reader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if req.method == "POST" || req.method == "PUT" {
|
||||
newReq.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
resp, err := req.Conn.Client.Do(newReq)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, uint64(resp.StatusCode), err
|
||||
}
|
||||
|
||||
if resp.StatusCode > 201 && resp.StatusCode < 400 {
|
||||
return nil, uint64(resp.StatusCode), errors.New(string(body))
|
||||
}
|
||||
|
||||
return body, uint64(resp.StatusCode), nil
|
||||
}
|
||||
|
||||
// Url builds a Request for a URL
|
||||
func (r *Request) Url() string {
|
||||
path := "/" + strings.Join(r.IndexList, ",")
|
||||
|
||||
if len(r.TypeList) > 0 {
|
||||
path += "/" + strings.Join(r.TypeList, ",")
|
||||
}
|
||||
|
||||
// XXX : for indexing documents using the normal (non bulk) API
|
||||
if len(r.id) > 0 {
|
||||
path += "/" + r.id
|
||||
}
|
||||
|
||||
path += "/" + r.api
|
||||
|
||||
u := url.URL{
|
||||
Scheme: "http",
|
||||
Host: fmt.Sprintf("%s:%s", r.Conn.Host, r.Conn.Port),
|
||||
Path: path,
|
||||
RawQuery: r.ExtraArgs.Encode(),
|
||||
}
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// Buckets returns list of buckets in aggregation
|
||||
func (a Aggregation) Buckets() []Bucket {
|
||||
result := []Bucket{}
|
||||
if buckets, ok := a["buckets"]; ok {
|
||||
for _, bucket := range buckets.([]interface{}) {
|
||||
result = append(result, bucket.(map[string]interface{}))
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Key returns key for aggregation bucket
|
||||
func (b Bucket) Key() interface{} {
|
||||
return b["key"]
|
||||
}
|
||||
|
||||
// DocCount returns count of documents in this bucket
|
||||
func (b Bucket) DocCount() uint64 {
|
||||
return uint64(b["doc_count"].(float64))
|
||||
}
|
||||
|
||||
// Aggregation returns aggregation by name from bucket
|
||||
func (b Bucket) Aggregation(name string) Aggregation {
|
||||
if agg, ok := b[name]; ok {
|
||||
return agg.(map[string]interface{})
|
||||
} else {
|
||||
return Aggregation{}
|
||||
}
|
||||
}
|
||||
|
||||
// PutMapping registers a specific mapping for one or more types in one or more indexes
|
||||
func (c *Connection) PutMapping(typeName string, mapping interface{}, indexes []string) (*Response, error) {
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: mapping,
|
||||
IndexList: indexes,
|
||||
method: "PUT",
|
||||
api: "_mappings/" + typeName,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
func (c *Connection) GetMapping(types []string, indexes []string) (*Response, error) {
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: indexes,
|
||||
method: "GET",
|
||||
api: "_mapping/" + strings.Join(types, ","),
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// IndicesExist checks whether index (or indices) exist on the server
|
||||
func (c *Connection) IndicesExist(indexes []string) (bool, error) {
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: indexes,
|
||||
method: "HEAD",
|
||||
}
|
||||
|
||||
resp, err := r.Run()
|
||||
|
||||
return resp.Status == 200, err
|
||||
}
|
||||
|
||||
func (c *Connection) Update(d Document, query interface{}, extraArgs url.Values) (*Response, error) {
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: query,
|
||||
IndexList: []string{d.Index.(string)},
|
||||
TypeList: []string{d.Type},
|
||||
ExtraArgs: extraArgs,
|
||||
method: "POST",
|
||||
api: "_update",
|
||||
}
|
||||
|
||||
if d.Id != nil {
|
||||
r.id = d.Id.(string)
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// DeleteMapping deletes a mapping along with all data in the type
|
||||
func (c *Connection) DeleteMapping(typeName string, indexes []string) (*Response, error) {
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
IndexList: indexes,
|
||||
method: "DELETE",
|
||||
api: "_mappings/" + typeName,
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
func (c *Connection) modifyAlias(action string, alias string, indexes []string) (*Response, error) {
|
||||
command := map[string]interface{}{
|
||||
"actions": make([]map[string]interface{}, 1),
|
||||
}
|
||||
|
||||
for _, index := range indexes {
|
||||
command["actions"] = append(command["actions"].([]map[string]interface{}), map[string]interface{}{
|
||||
action: map[string]interface{}{
|
||||
"index": index,
|
||||
"alias": alias,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
Query: command,
|
||||
method: "POST",
|
||||
api: "_aliases",
|
||||
}
|
||||
|
||||
return r.Run()
|
||||
}
|
||||
|
||||
// AddAlias creates an alias to one or more indexes
|
||||
func (c *Connection) AddAlias(alias string, indexes []string) (*Response, error) {
|
||||
return c.modifyAlias("add", alias, indexes)
|
||||
}
|
||||
|
||||
// RemoveAlias removes an alias to one or more indexes
|
||||
func (c *Connection) RemoveAlias(alias string, indexes []string) (*Response, error) {
|
||||
return c.modifyAlias("remove", alias, indexes)
|
||||
}
|
||||
|
||||
// AliasExists checks whether alias is defined on the server
|
||||
func (c *Connection) AliasExists(alias string) (bool, error) {
|
||||
|
||||
r := Request{
|
||||
Conn: c,
|
||||
method: "HEAD",
|
||||
api: "_alias/" + alias,
|
||||
}
|
||||
|
||||
resp, err := r.Run()
|
||||
|
||||
return resp.Status == 200, err
|
||||
}
|
184
vendor/github.com/belogik/goes/structs.go
generated
vendored
Normal file
184
vendor/github.com/belogik/goes/structs.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
// Copyright 2013 Belogik. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package goes
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// Represents a Connection object to elasticsearch
|
||||
type Connection struct {
|
||||
// The host to connect to
|
||||
Host string
|
||||
|
||||
// The port to use
|
||||
Port string
|
||||
|
||||
// Client is the http client used to make requests, allowing settings things
|
||||
// such as timeouts etc
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
// Represents a Request to elasticsearch
|
||||
type Request struct {
|
||||
// Which connection will be used
|
||||
Conn *Connection
|
||||
|
||||
// A search query
|
||||
Query interface{}
|
||||
|
||||
// Which index to search into
|
||||
IndexList []string
|
||||
|
||||
// Which type to search into
|
||||
TypeList []string
|
||||
|
||||
// HTTP Method to user (GET, POST ...)
|
||||
method string
|
||||
|
||||
// Which api keyword (_search, _bulk, etc) to use
|
||||
api string
|
||||
|
||||
// Bulk data
|
||||
bulkData []byte
|
||||
|
||||
// Request body
|
||||
Body []byte
|
||||
|
||||
// A list of extra URL arguments
|
||||
ExtraArgs url.Values
|
||||
|
||||
// Used for the id field when indexing a document
|
||||
id string
|
||||
}
|
||||
|
||||
// Represents a Response from elasticsearch
|
||||
type Response struct {
|
||||
Acknowledged bool
|
||||
Error string
|
||||
Errors bool
|
||||
Status uint64
|
||||
Took uint64
|
||||
TimedOut bool `json:"timed_out"`
|
||||
Shards Shard `json:"_shards"`
|
||||
Hits Hits
|
||||
Index string `json:"_index"`
|
||||
Id string `json:"_id"`
|
||||
Type string `json:"_type"`
|
||||
Version int `json:"_version"`
|
||||
Found bool
|
||||
Count int
|
||||
|
||||
// Used by the _stats API
|
||||
All All `json:"_all"`
|
||||
|
||||
// Used by the _bulk API
|
||||
Items []map[string]Item `json:"items,omitempty"`
|
||||
|
||||
// Used by the GET API
|
||||
Source map[string]interface{} `json:"_source"`
|
||||
Fields map[string]interface{} `json:"fields"`
|
||||
|
||||
// Used by the _status API
|
||||
Indices map[string]IndexStatus
|
||||
|
||||
// Scroll id for iteration
|
||||
ScrollId string `json:"_scroll_id"`
|
||||
|
||||
Aggregations map[string]Aggregation `json:"aggregations,omitempty"`
|
||||
|
||||
Raw map[string]interface{}
|
||||
}
|
||||
|
||||
// Represents an aggregation from response
|
||||
type Aggregation map[string]interface{}
|
||||
|
||||
// Represents a bucket for aggregation
|
||||
type Bucket map[string]interface{}
|
||||
|
||||
// Represents a document to send to elasticsearch
|
||||
type Document struct {
|
||||
// XXX : interface as we can support nil values
|
||||
Index interface{}
|
||||
Type string
|
||||
Id interface{}
|
||||
BulkCommand string
|
||||
Fields interface{}
|
||||
}
|
||||
|
||||
// Represents the "items" field in a _bulk response
|
||||
type Item struct {
|
||||
Type string `json:"_type"`
|
||||
Id string `json:"_id"`
|
||||
Index string `json:"_index"`
|
||||
Version int `json:"_version"`
|
||||
Error string `json:"error"`
|
||||
Status uint64 `json:"status"`
|
||||
}
|
||||
|
||||
// Represents the "_all" field when calling the _stats API
|
||||
// This is minimal but this is what I only need
|
||||
type All struct {
|
||||
Indices map[string]StatIndex `json:"indices"`
|
||||
Primaries map[string]StatPrimary `json:"primaries"`
|
||||
}
|
||||
|
||||
type StatIndex struct {
|
||||
Primaries map[string]StatPrimary `json:"primaries"`
|
||||
}
|
||||
|
||||
type StatPrimary struct {
|
||||
// primary/docs:
|
||||
Count int
|
||||
Deleted int
|
||||
}
|
||||
|
||||
// Represents the "shard" struct as returned by elasticsearch
|
||||
type Shard struct {
|
||||
Total uint64
|
||||
Successful uint64
|
||||
Failed uint64
|
||||
}
|
||||
|
||||
// Represent a hit returned by a search
|
||||
type Hit struct {
|
||||
Index string `json:"_index"`
|
||||
Type string `json:"_type"`
|
||||
Id string `json:"_id"`
|
||||
Score float64 `json:"_score"`
|
||||
Source map[string]interface{} `json:"_source"`
|
||||
Highlight map[string]interface{} `json:"highlight"`
|
||||
Fields map[string]interface{} `json:"fields"`
|
||||
}
|
||||
|
||||
// Represent the hits structure as returned by elasticsearch
|
||||
type Hits struct {
|
||||
Total uint64
|
||||
// max_score may contain the "null" value
|
||||
MaxScore interface{} `json:"max_score"`
|
||||
Hits []Hit
|
||||
}
|
||||
|
||||
type SearchError struct {
|
||||
Msg string
|
||||
StatusCode uint64
|
||||
}
|
||||
|
||||
// Represent the status for a given index for the _status command
|
||||
type IndexStatus struct {
|
||||
// XXX : problem, int will be marshaled to a float64 which seems logical
|
||||
// XXX : is it better to use strings even for int values or to keep
|
||||
// XXX : interfaces and deal with float64 ?
|
||||
Index map[string]interface{}
|
||||
|
||||
Translog map[string]uint64
|
||||
Docs map[string]uint64
|
||||
Merges map[string]interface{}
|
||||
Refresh map[string]interface{}
|
||||
Flush map[string]interface{}
|
||||
|
||||
// TODO: add shards support later, we do not need it for the moment
|
||||
}
|
Reference in New Issue
Block a user