1
0
mirror of https://github.com/astaxie/beego.git synced 2025-06-28 07:00:20 +00:00

add vendor

This commit is contained in:
astaxie
2018-07-30 12:05:51 +08:00
parent d55f54a8ab
commit 48acfa08be
496 changed files with 327583 additions and 0 deletions

19
vendor/github.com/couchbase/go-couchbase/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2013 Couchbase, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,37 @@
# A smart client for couchbase in go
This is a *unoffical* version of a Couchbase Golang client. If you are
looking for the *Offical* Couchbase Golang client please see
[CB-go])[https://github.com/couchbaselabs/gocb].
This is an evolving package, but does provide a useful interface to a
[couchbase](http://www.couchbase.com/) server including all of the
pool/bucket discovery features, compatible key distribution with other
clients, and vbucket motion awareness so application can continue to
operate during rebalances.
It also supports view querying with source node randomization so you
don't bang on all one node to do all the work.
## Install
go get github.com/couchbase/go-couchbase
## Example
c, err := couchbase.Connect("http://dev-couchbase.example.com:8091/")
if err != nil {
log.Fatalf("Error connecting: %v", err)
}
pool, err := c.GetPool("default")
if err != nil {
log.Fatalf("Error getting pool: %v", err)
}
bucket, err := pool.GetBucket("default")
if err != nil {
log.Fatalf("Error getting bucket: %v", err)
}
bucket.Set("someKey", 0, []string{"an", "example", "list"})

1237
vendor/github.com/couchbase/go-couchbase/client.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

259
vendor/github.com/couchbase/go-couchbase/conn_pool.go generated vendored Normal file
View File

@ -0,0 +1,259 @@
package couchbase
import (
"errors"
"github.com/couchbase/goutils/logging"
"time"
"github.com/couchbase/gomemcached"
"github.com/couchbase/gomemcached/client"
)
// GenericMcdAuthHandler is a kind of AuthHandler that performs
// special auth exchange (like non-standard auth, possibly followed by
// select-bucket).
type GenericMcdAuthHandler interface {
AuthHandler
AuthenticateMemcachedConn(host string, conn *memcached.Client) error
}
// Error raised when a connection can't be retrieved from a pool.
var TimeoutError = errors.New("timeout waiting to build connection")
var errClosedPool = errors.New("the connection pool is closed")
var errNoPool = errors.New("no connection pool")
// Default timeout for retrieving a connection from the pool.
var ConnPoolTimeout = time.Hour * 24 * 30
// ConnPoolAvailWaitTime is the amount of time to wait for an existing
// connection from the pool before considering the creation of a new
// one.
var ConnPoolAvailWaitTime = time.Millisecond
type connectionPool struct {
host string
mkConn func(host string, ah AuthHandler) (*memcached.Client, error)
auth AuthHandler
connections chan *memcached.Client
createsem chan bool
inUse bool
}
func newConnectionPool(host string, ah AuthHandler, poolSize, poolOverflow int) *connectionPool {
return &connectionPool{
host: host,
connections: make(chan *memcached.Client, poolSize),
createsem: make(chan bool, poolSize+poolOverflow),
mkConn: defaultMkConn,
auth: ah,
}
}
// ConnPoolTimeout is notified whenever connections are acquired from a pool.
var ConnPoolCallback func(host string, source string, start time.Time, err error)
func defaultMkConn(host string, ah AuthHandler) (*memcached.Client, error) {
conn, err := memcached.Connect("tcp", host)
if err != nil {
return nil, err
}
if TCPKeepalive == true {
conn.SetKeepAliveOptions(time.Duration(TCPKeepaliveInterval) * time.Second)
}
if EnableMutationToken == true {
res, err := conn.EnableMutationToken()
if err != nil || res.Status != gomemcached.SUCCESS {
logging.Warnf("Unable to enable mutation token %v", err)
}
}
if gah, ok := ah.(GenericMcdAuthHandler); ok {
err = gah.AuthenticateMemcachedConn(host, conn)
if err != nil {
conn.Close()
return nil, err
}
return conn, nil
}
name, pass, bucket := ah.GetCredentials()
if name != "default" {
_, err = conn.Auth(name, pass)
if err != nil {
conn.Close()
return nil, err
}
// Select bucket (Required for cb_auth creds)
// Required when doing auth with _admin credentials
if bucket != "" && bucket != name {
_, err = conn.SelectBucket(bucket)
if err != nil {
conn.Close()
return nil, err
}
}
}
return conn, nil
}
func (cp *connectionPool) Close() (err error) {
defer func() {
if recover() != nil {
err = errors.New("connectionPool.Close error")
}
}()
close(cp.connections)
for c := range cp.connections {
c.Close()
}
return
}
func (cp *connectionPool) GetWithTimeout(d time.Duration) (rv *memcached.Client, err error) {
if cp == nil {
return nil, errNoPool
}
path := ""
if ConnPoolCallback != nil {
defer func(path *string, start time.Time) {
ConnPoolCallback(cp.host, *path, start, err)
}(&path, time.Now())
}
path = "short-circuit"
// short-circuit available connetions.
select {
case rv, isopen := <-cp.connections:
if !isopen {
return nil, errClosedPool
}
return rv, nil
default:
}
t := time.NewTimer(ConnPoolAvailWaitTime)
defer t.Stop()
// Try to grab an available connection within 1ms
select {
case rv, isopen := <-cp.connections:
path = "avail1"
if !isopen {
return nil, errClosedPool
}
return rv, nil
case <-t.C:
// No connection came around in time, let's see
// whether we can get one or build a new one first.
t.Reset(d) // Reuse the timer for the full timeout.
select {
case rv, isopen := <-cp.connections:
path = "avail2"
if !isopen {
return nil, errClosedPool
}
return rv, nil
case cp.createsem <- true:
path = "create"
// Build a connection if we can't get a real one.
// This can potentially be an overflow connection, or
// a pooled connection.
rv, err := cp.mkConn(cp.host, cp.auth)
if err != nil {
// On error, release our create hold
<-cp.createsem
}
return rv, err
case <-t.C:
return nil, ErrTimeout
}
}
}
func (cp *connectionPool) Get() (*memcached.Client, error) {
return cp.GetWithTimeout(ConnPoolTimeout)
}
func (cp *connectionPool) Return(c *memcached.Client) {
if c == nil {
return
}
if cp == nil {
c.Close()
}
if c.IsHealthy() {
defer func() {
if recover() != nil {
// This happens when the pool has already been
// closed and we're trying to return a
// connection to it anyway. Just close the
// connection.
c.Close()
}
}()
select {
case cp.connections <- c:
default:
// Overflow connection.
<-cp.createsem
c.Close()
}
} else {
<-cp.createsem
c.Close()
}
}
func (cp *connectionPool) StartTapFeed(args *memcached.TapArguments) (*memcached.TapFeed, error) {
if cp == nil {
return nil, errNoPool
}
mc, err := cp.Get()
if err != nil {
return nil, err
}
// A connection can't be used after TAP; Dont' count it against the
// connection pool capacity
<-cp.createsem
return mc.StartTapFeed(*args)
}
const DEFAULT_WINDOW_SIZE = 20 * 1024 * 1024 // 20 Mb
func (cp *connectionPool) StartUprFeed(name string, sequence uint32, dcp_buffer_size uint32, data_chan_size int) (*memcached.UprFeed, error) {
if cp == nil {
return nil, errNoPool
}
mc, err := cp.Get()
if err != nil {
return nil, err
}
// A connection can't be used after it has been allocated to UPR;
// Dont' count it against the connection pool capacity
<-cp.createsem
uf, err := mc.NewUprFeed()
if err != nil {
return nil, err
}
if err := uf.UprOpen(name, sequence, dcp_buffer_size); err != nil {
return nil, err
}
if err := uf.StartFeedWithConfig(data_chan_size); err != nil {
return nil, err
}
return uf, nil
}

288
vendor/github.com/couchbase/go-couchbase/ddocs.go generated vendored Normal file
View File

@ -0,0 +1,288 @@
package couchbase
import (
"bytes"
"encoding/json"
"fmt"
"github.com/couchbase/goutils/logging"
"io/ioutil"
"net/http"
)
// ViewDefinition represents a single view within a design document.
type ViewDefinition struct {
Map string `json:"map"`
Reduce string `json:"reduce,omitempty"`
}
// DDoc is the document body of a design document specifying a view.
type DDoc struct {
Language string `json:"language,omitempty"`
Views map[string]ViewDefinition `json:"views"`
}
// DDocsResult represents the result from listing the design
// documents.
type DDocsResult struct {
Rows []struct {
DDoc struct {
Meta map[string]interface{}
JSON DDoc
} `json:"doc"`
} `json:"rows"`
}
// GetDDocs lists all design documents
func (b *Bucket) GetDDocs() (DDocsResult, error) {
var ddocsResult DDocsResult
b.RLock()
pool := b.pool
uri := b.DDocs.URI
b.RUnlock()
// MB-23555 ephemeral buckets have no ddocs
if uri == "" {
return DDocsResult{}, nil
}
err := pool.client.parseURLResponse(uri, &ddocsResult)
if err != nil {
return DDocsResult{}, err
}
return ddocsResult, nil
}
func (b *Bucket) GetDDocWithRetry(docname string, into interface{}) error {
ddocURI := fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
err := b.parseAPIResponse(ddocURI, &into)
if err != nil {
return err
}
return nil
}
func (b *Bucket) GetDDocsWithRetry() (DDocsResult, error) {
var ddocsResult DDocsResult
b.RLock()
uri := b.DDocs.URI
b.RUnlock()
// MB-23555 ephemeral buckets have no ddocs
if uri == "" {
return DDocsResult{}, nil
}
err := b.parseURLResponse(uri, &ddocsResult)
if err != nil {
return DDocsResult{}, err
}
return ddocsResult, nil
}
func (b *Bucket) ddocURL(docname string) (string, error) {
u, err := b.randomBaseURL()
if err != nil {
return "", err
}
u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
return u.String(), nil
}
func (b *Bucket) ddocURLNext(nodeId int, docname string) (string, int, error) {
u, selected, err := b.randomNextURL(nodeId)
if err != nil {
return "", -1, err
}
u.Path = fmt.Sprintf("/%s/_design/%s", b.GetName(), docname)
return u.String(), selected, nil
}
const ABS_MAX_RETRIES = 10
const ABS_MIN_RETRIES = 3
func (b *Bucket) getMaxRetries() (int, error) {
maxRetries := len(b.Nodes())
if maxRetries == 0 {
return 0, fmt.Errorf("No available Couch rest URLs")
}
if maxRetries > ABS_MAX_RETRIES {
maxRetries = ABS_MAX_RETRIES
} else if maxRetries < ABS_MIN_RETRIES {
maxRetries = ABS_MIN_RETRIES
}
return maxRetries, nil
}
// PutDDoc installs a design document.
func (b *Bucket) PutDDoc(docname string, value interface{}) error {
var Err error
maxRetries, err := b.getMaxRetries()
if err != nil {
return err
}
lastNode := START_NODE_ID
for retryCount := 0; retryCount < maxRetries; retryCount++ {
Err = nil
ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
if err != nil {
return err
}
lastNode = selectedNode
logging.Infof(" Trying with selected node %d", selectedNode)
j, err := json.Marshal(value)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", ddocU, bytes.NewReader(j))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */))
if err != nil {
return err
}
res, err := doHTTPRequest(req)
if err != nil {
return err
}
if res.StatusCode != 201 {
body, _ := ioutil.ReadAll(res.Body)
Err = fmt.Errorf("error installing view: %v / %s",
res.Status, body)
logging.Errorf(" Error in PutDDOC %v. Retrying...", Err)
res.Body.Close()
b.Refresh()
continue
}
res.Body.Close()
break
}
return Err
}
// GetDDoc retrieves a specific a design doc.
func (b *Bucket) GetDDoc(docname string, into interface{}) error {
var Err error
var res *http.Response
maxRetries, err := b.getMaxRetries()
if err != nil {
return err
}
lastNode := START_NODE_ID
for retryCount := 0; retryCount < maxRetries; retryCount++ {
Err = nil
ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
if err != nil {
return err
}
lastNode = selectedNode
logging.Infof(" Trying with selected node %d", selectedNode)
req, err := http.NewRequest("GET", ddocU, nil)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
err = maybeAddAuth(req, b.authHandler(false /* bucket not yet locked */))
if err != nil {
return err
}
res, err = doHTTPRequest(req)
if err != nil {
return err
}
if res.StatusCode != 200 {
body, _ := ioutil.ReadAll(res.Body)
Err = fmt.Errorf("error reading view: %v / %s",
res.Status, body)
logging.Errorf(" Error in GetDDOC %v Retrying...", Err)
b.Refresh()
res.Body.Close()
continue
}
defer res.Body.Close()
break
}
if Err != nil {
return Err
}
d := json.NewDecoder(res.Body)
return d.Decode(into)
}
// DeleteDDoc removes a design document.
func (b *Bucket) DeleteDDoc(docname string) error {
var Err error
maxRetries, err := b.getMaxRetries()
if err != nil {
return err
}
lastNode := START_NODE_ID
for retryCount := 0; retryCount < maxRetries; retryCount++ {
Err = nil
ddocU, selectedNode, err := b.ddocURLNext(lastNode, docname)
if err != nil {
return err
}
lastNode = selectedNode
logging.Infof(" Trying with selected node %d", selectedNode)
req, err := http.NewRequest("DELETE", ddocU, nil)
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/json")
err = maybeAddAuth(req, b.authHandler(false /* bucket not already locked */))
if err != nil {
return err
}
res, err := doHTTPRequest(req)
if err != nil {
return err
}
if res.StatusCode != 200 {
body, _ := ioutil.ReadAll(res.Body)
Err = fmt.Errorf("error deleting view : %v / %s", res.Status, body)
logging.Errorf(" Error in DeleteDDOC %v. Retrying ... ", Err)
b.Refresh()
res.Body.Close()
continue
}
res.Body.Close()
break
}
return Err
}

300
vendor/github.com/couchbase/go-couchbase/observe.go generated vendored Normal file
View File

@ -0,0 +1,300 @@
package couchbase
import (
"fmt"
"github.com/couchbase/goutils/logging"
"sync"
)
type PersistTo uint8
const (
PersistNone = PersistTo(0x00)
PersistMaster = PersistTo(0x01)
PersistOne = PersistTo(0x02)
PersistTwo = PersistTo(0x03)
PersistThree = PersistTo(0x04)
PersistFour = PersistTo(0x05)
)
type ObserveTo uint8
const (
ObserveNone = ObserveTo(0x00)
ObserveReplicateOne = ObserveTo(0x01)
ObserveReplicateTwo = ObserveTo(0x02)
ObserveReplicateThree = ObserveTo(0x03)
ObserveReplicateFour = ObserveTo(0x04)
)
type JobType uint8
const (
OBSERVE = JobType(0x00)
PERSIST = JobType(0x01)
)
type ObservePersistJob struct {
vb uint16
vbuuid uint64
hostname string
jobType JobType
failover uint8
lastPersistedSeqNo uint64
currentSeqNo uint64
resultChan chan *ObservePersistJob
errorChan chan *OPErrResponse
}
type OPErrResponse struct {
vb uint16
vbuuid uint64
err error
job *ObservePersistJob
}
var ObservePersistPool = NewPool(1024)
var OPJobChan = make(chan *ObservePersistJob, 1024)
var OPJobDone = make(chan bool)
var wg sync.WaitGroup
func (b *Bucket) StartOPPollers(maxWorkers int) {
for i := 0; i < maxWorkers; i++ {
go b.OPJobPoll()
wg.Add(1)
}
wg.Wait()
}
func (b *Bucket) SetObserveAndPersist(nPersist PersistTo, nObserve ObserveTo) (err error) {
numNodes := len(b.Nodes())
if int(nPersist) > numNodes || int(nObserve) > numNodes {
return fmt.Errorf("Not enough healthy nodes in the cluster")
}
if int(nPersist) > (b.Replicas+1) || int(nObserve) > b.Replicas {
return fmt.Errorf("Not enough replicas in the cluster")
}
if EnableMutationToken == false {
return fmt.Errorf("Mutation Tokens not enabled ")
}
b.ds = &DurablitySettings{Persist: PersistTo(nPersist), Observe: ObserveTo(nObserve)}
return
}
func (b *Bucket) ObserveAndPersistPoll(vb uint16, vbuuid uint64, seqNo uint64) (err error, failover bool) {
b.RLock()
ds := b.ds
b.RUnlock()
if ds == nil {
return
}
nj := 0 // total number of jobs
resultChan := make(chan *ObservePersistJob, 10)
errChan := make(chan *OPErrResponse, 10)
nodes := b.GetNodeList(vb)
if int(ds.Observe) > len(nodes) || int(ds.Persist) > len(nodes) {
return fmt.Errorf("Not enough healthy nodes in the cluster"), false
}
logging.Infof("Node list %v", nodes)
if ds.Observe >= ObserveReplicateOne {
// create a job for each host
for i := ObserveReplicateOne; i < ds.Observe+1; i++ {
opJob := ObservePersistPool.Get()
opJob.vb = vb
opJob.vbuuid = vbuuid
opJob.jobType = OBSERVE
opJob.hostname = nodes[i]
opJob.resultChan = resultChan
opJob.errorChan = errChan
OPJobChan <- opJob
nj++
}
}
if ds.Persist >= PersistMaster {
for i := PersistMaster; i < ds.Persist+1; i++ {
opJob := ObservePersistPool.Get()
opJob.vb = vb
opJob.vbuuid = vbuuid
opJob.jobType = PERSIST
opJob.hostname = nodes[i]
opJob.resultChan = resultChan
opJob.errorChan = errChan
OPJobChan <- opJob
nj++
}
}
ok := true
for ok {
select {
case res := <-resultChan:
jobDone := false
if res.failover == 0 {
// no failover
if res.jobType == PERSIST {
if res.lastPersistedSeqNo >= seqNo {
jobDone = true
}
} else {
if res.currentSeqNo >= seqNo {
jobDone = true
}
}
if jobDone == true {
nj--
ObservePersistPool.Put(res)
} else {
// requeue this job
OPJobChan <- res
}
} else {
// Not currently handling failover scenarios TODO
nj--
ObservePersistPool.Put(res)
failover = true
}
if nj == 0 {
// done with all the jobs
ok = false
close(resultChan)
close(errChan)
}
case Err := <-errChan:
logging.Errorf("Error in Observe/Persist %v", Err.err)
err = fmt.Errorf("Error in Observe/Persist job %v", Err.err)
nj--
ObservePersistPool.Put(Err.job)
if nj == 0 {
close(resultChan)
close(errChan)
ok = false
}
}
}
return
}
func (b *Bucket) OPJobPoll() {
ok := true
for ok == true {
select {
case job := <-OPJobChan:
pool := b.getConnPoolByHost(job.hostname, false /* bucket not already locked */)
if pool == nil {
errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
errRes.err = fmt.Errorf("Pool not found for host %v", job.hostname)
errRes.job = job
job.errorChan <- errRes
continue
}
conn, err := pool.Get()
if err != nil {
errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
errRes.err = fmt.Errorf("Unable to get connection from pool %v", err)
errRes.job = job
job.errorChan <- errRes
continue
}
res, err := conn.ObserveSeq(job.vb, job.vbuuid)
if err != nil {
errRes := &OPErrResponse{vb: job.vb, vbuuid: job.vbuuid}
errRes.err = fmt.Errorf("Command failed %v", err)
errRes.job = job
job.errorChan <- errRes
continue
}
pool.Return(conn)
job.lastPersistedSeqNo = res.LastPersistedSeqNo
job.currentSeqNo = res.CurrentSeqNo
job.failover = res.Failover
job.resultChan <- job
case <-OPJobDone:
logging.Infof("Observe Persist Poller exitting")
ok = false
}
}
wg.Done()
}
func (b *Bucket) GetNodeList(vb uint16) []string {
vbm := b.VBServerMap()
if len(vbm.VBucketMap) < int(vb) {
logging.Infof("vbmap smaller than vblist")
return nil
}
nodes := make([]string, len(vbm.VBucketMap[vb]))
for i := 0; i < len(vbm.VBucketMap[vb]); i++ {
n := vbm.VBucketMap[vb][i]
if n < 0 {
continue
}
node := b.getMasterNode(n)
if len(node) > 1 {
nodes[i] = node
}
continue
}
return nodes
}
//pool of ObservePersist Jobs
type OPpool struct {
pool chan *ObservePersistJob
}
// NewPool creates a new pool of jobs
func NewPool(max int) *OPpool {
return &OPpool{
pool: make(chan *ObservePersistJob, max),
}
}
// Borrow a Client from the pool.
func (p *OPpool) Get() *ObservePersistJob {
var o *ObservePersistJob
select {
case o = <-p.pool:
default:
o = &ObservePersistJob{}
}
return o
}
// Return returns a Client to the pool.
func (p *OPpool) Put(o *ObservePersistJob) {
select {
case p.pool <- o:
default:
// let it go, let it go...
}
}

1088
vendor/github.com/couchbase/go-couchbase/pools.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

198
vendor/github.com/couchbase/go-couchbase/streaming.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package couchbase
import (
"encoding/json"
"fmt"
"github.com/couchbase/goutils/logging"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"time"
"unsafe"
)
// Bucket auto-updater gets the latest version of the bucket config from
// the server. If the configuration has changed then updated the local
// bucket information. If the bucket has been deleted then notify anyone
// who is holding a reference to this bucket
const MAX_RETRY_COUNT = 5
const DISCONNECT_PERIOD = 120 * time.Second
type NotifyFn func(bucket string, err error)
// Use TCP keepalive to detect half close sockets
var updaterTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
}
var updaterHTTPClient = &http.Client{Transport: updaterTransport}
func doHTTPRequestForUpdate(req *http.Request) (*http.Response, error) {
var err error
var res *http.Response
for i := 0; i < HTTP_MAX_RETRY; i++ {
res, err = updaterHTTPClient.Do(req)
if err != nil && isHttpConnError(err) {
continue
}
break
}
if err != nil {
return nil, err
}
return res, err
}
func (b *Bucket) RunBucketUpdater(notify NotifyFn) {
go func() {
err := b.UpdateBucket()
if err != nil {
if notify != nil {
notify(b.GetName(), err)
}
logging.Errorf(" Bucket Updater exited with err %v", err)
}
}()
}
func (b *Bucket) replaceConnPools2(with []*connectionPool, bucketLocked bool) {
if !bucketLocked {
b.Lock()
defer b.Unlock()
}
old := b.connPools
b.connPools = unsafe.Pointer(&with)
if old != nil {
for _, pool := range *(*[]*connectionPool)(old) {
if pool != nil && pool.inUse == false {
pool.Close()
}
}
}
return
}
func (b *Bucket) UpdateBucket() error {
var failures int
var returnErr error
for {
if failures == MAX_RETRY_COUNT {
logging.Errorf(" Maximum failures reached. Exiting loop...")
return fmt.Errorf("Max failures reached. Last Error %v", returnErr)
}
nodes := b.Nodes()
if len(nodes) < 1 {
return fmt.Errorf("No healthy nodes found")
}
startNode := rand.Intn(len(nodes))
node := nodes[(startNode)%len(nodes)]
streamUrl := fmt.Sprintf("http://%s/pools/default/bucketsStreaming/%s", node.Hostname, b.GetName())
logging.Infof(" Trying with %s", streamUrl)
req, err := http.NewRequest("GET", streamUrl, nil)
if err != nil {
return err
}
// Lock here to avoid having pool closed under us.
b.RLock()
err = maybeAddAuth(req, b.pool.client.ah)
b.RUnlock()
if err != nil {
return err
}
res, err := doHTTPRequestForUpdate(req)
if err != nil {
return err
}
if res.StatusCode != 200 {
bod, _ := ioutil.ReadAll(io.LimitReader(res.Body, 512))
logging.Errorf("Failed to connect to host, unexpected status code: %v. Body %s", res.StatusCode, bod)
res.Body.Close()
returnErr = fmt.Errorf("Failed to connect to host. Status %v Body %s", res.StatusCode, bod)
failures++
continue
}
dec := json.NewDecoder(res.Body)
tmpb := &Bucket{}
for {
err := dec.Decode(&tmpb)
if err != nil {
returnErr = err
res.Body.Close()
break
}
// if we got here, reset failure count
failures = 0
b.Lock()
// mark all the old connection pools for deletion
pools := b.getConnPools(true /* already locked */)
for _, pool := range pools {
if pool != nil {
pool.inUse = false
}
}
newcps := make([]*connectionPool, len(tmpb.VBSMJson.ServerList))
for i := range newcps {
// get the old connection pool and check if it is still valid
pool := b.getConnPoolByHost(tmpb.VBSMJson.ServerList[i], true /* bucket already locked */)
if pool != nil && pool.inUse == false {
// if the hostname and index is unchanged then reuse this pool
newcps[i] = pool
pool.inUse = true
continue
}
// else create a new pool
if b.ah != nil {
newcps[i] = newConnectionPool(
tmpb.VBSMJson.ServerList[i],
b.ah, PoolSize, PoolOverflow)
} else {
newcps[i] = newConnectionPool(
tmpb.VBSMJson.ServerList[i],
b.authHandler(true /* bucket already locked */), PoolSize, PoolOverflow)
}
}
b.replaceConnPools2(newcps, true /* bucket already locked */)
tmpb.ah = b.ah
b.vBucketServerMap = unsafe.Pointer(&tmpb.VBSMJson)
b.nodeList = unsafe.Pointer(&tmpb.NodesJSON)
b.Unlock()
logging.Infof("Got new configuration for bucket %s", b.GetName())
}
// we are here because of an error
failures++
continue
}
return nil
}

143
vendor/github.com/couchbase/go-couchbase/tap.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package couchbase
import (
"github.com/couchbase/gomemcached/client"
"github.com/couchbase/goutils/logging"
"sync"
"time"
)
const initialRetryInterval = 1 * time.Second
const maximumRetryInterval = 30 * time.Second
// A TapFeed streams mutation events from a bucket.
//
// Events from the bucket can be read from the channel 'C'. Remember
// to call Close() on it when you're done, unless its channel has
// closed itself already.
type TapFeed struct {
C <-chan memcached.TapEvent
bucket *Bucket
args *memcached.TapArguments
nodeFeeds []*memcached.TapFeed // The TAP feeds of the individual nodes
output chan memcached.TapEvent // Same as C but writeably-typed
wg sync.WaitGroup
quit chan bool
}
// StartTapFeed creates and starts a new Tap feed
func (b *Bucket) StartTapFeed(args *memcached.TapArguments) (*TapFeed, error) {
if args == nil {
defaultArgs := memcached.DefaultTapArguments()
args = &defaultArgs
}
feed := &TapFeed{
bucket: b,
args: args,
output: make(chan memcached.TapEvent, 10),
quit: make(chan bool),
}
go feed.run()
feed.C = feed.output
return feed, nil
}
// Goroutine that runs the feed
func (feed *TapFeed) run() {
retryInterval := initialRetryInterval
bucketOK := true
for {
// Connect to the TAP feed of each server node:
if bucketOK {
killSwitch, err := feed.connectToNodes()
if err == nil {
// Run until one of the sub-feeds fails:
select {
case <-killSwitch:
case <-feed.quit:
return
}
feed.closeNodeFeeds()
retryInterval = initialRetryInterval
}
}
// On error, try to refresh the bucket in case the list of nodes changed:
logging.Infof("go-couchbase: TAP connection lost; reconnecting to bucket %q in %v",
feed.bucket.Name, retryInterval)
err := feed.bucket.Refresh()
bucketOK = err == nil
select {
case <-time.After(retryInterval):
case <-feed.quit:
return
}
if retryInterval *= 2; retryInterval > maximumRetryInterval {
retryInterval = maximumRetryInterval
}
}
}
func (feed *TapFeed) connectToNodes() (killSwitch chan bool, err error) {
killSwitch = make(chan bool)
for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) {
var singleFeed *memcached.TapFeed
singleFeed, err = serverConn.StartTapFeed(feed.args)
if err != nil {
logging.Errorf("go-couchbase: Error connecting to tap feed of %s: %v", serverConn.host, err)
feed.closeNodeFeeds()
return
}
feed.nodeFeeds = append(feed.nodeFeeds, singleFeed)
go feed.forwardTapEvents(singleFeed, killSwitch, serverConn.host)
feed.wg.Add(1)
}
return
}
// Goroutine that forwards Tap events from a single node's feed to the aggregate feed.
func (feed *TapFeed) forwardTapEvents(singleFeed *memcached.TapFeed, killSwitch chan bool, host string) {
defer feed.wg.Done()
for {
select {
case event, ok := <-singleFeed.C:
if !ok {
if singleFeed.Error != nil {
logging.Errorf("go-couchbase: Tap feed from %s failed: %v", host, singleFeed.Error)
}
killSwitch <- true
return
}
feed.output <- event
case <-feed.quit:
return
}
}
}
func (feed *TapFeed) closeNodeFeeds() {
for _, f := range feed.nodeFeeds {
f.Close()
}
feed.nodeFeeds = nil
}
// Close a Tap feed.
func (feed *TapFeed) Close() error {
select {
case <-feed.quit:
return nil
default:
}
feed.closeNodeFeeds()
close(feed.quit)
feed.wg.Wait()
close(feed.output)
return nil
}

398
vendor/github.com/couchbase/go-couchbase/upr.go generated vendored Normal file
View File

@ -0,0 +1,398 @@
package couchbase
import (
"log"
"sync"
"time"
"fmt"
"github.com/couchbase/gomemcached"
"github.com/couchbase/gomemcached/client"
"github.com/couchbase/goutils/logging"
)
// A UprFeed streams mutation events from a bucket.
//
// Events from the bucket can be read from the channel 'C'. Remember
// to call Close() on it when you're done, unless its channel has
// closed itself already.
type UprFeed struct {
C <-chan *memcached.UprEvent
bucket *Bucket
nodeFeeds map[string]*FeedInfo // The UPR feeds of the individual nodes
output chan *memcached.UprEvent // Same as C but writeably-typed
outputClosed bool
quit chan bool
name string // name of this UPR feed
sequence uint32 // sequence number for this feed
connected bool
killSwitch chan bool
closing bool
wg sync.WaitGroup
dcp_buffer_size uint32
data_chan_size int
}
// UprFeed from a single connection
type FeedInfo struct {
uprFeed *memcached.UprFeed // UPR feed handle
host string // hostname
connected bool // connected
quit chan bool // quit channel
}
type FailoverLog map[uint16]memcached.FailoverLog
// GetFailoverLogs, get the failover logs for a set of vbucket ids
func (b *Bucket) GetFailoverLogs(vBuckets []uint16) (FailoverLog, error) {
// map vbids to their corresponding hosts
vbHostList := make(map[string][]uint16)
vbm := b.VBServerMap()
if len(vbm.VBucketMap) < len(vBuckets) {
return nil, fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
vbm.VBucketMap, vBuckets)
}
for _, vb := range vBuckets {
masterID := vbm.VBucketMap[vb][0]
master := b.getMasterNode(masterID)
if master == "" {
return nil, fmt.Errorf("No master found for vb %d", vb)
}
vbList := vbHostList[master]
if vbList == nil {
vbList = make([]uint16, 0)
}
vbList = append(vbList, vb)
vbHostList[master] = vbList
}
failoverLogMap := make(FailoverLog)
for _, serverConn := range b.getConnPools(false /* not already locked */) {
vbList := vbHostList[serverConn.host]
if vbList == nil {
continue
}
mc, err := serverConn.Get()
if err != nil {
logging.Infof("No Free connections for vblist %v", vbList)
return nil, fmt.Errorf("No Free connections for host %s",
serverConn.host)
}
// close the connection so that it doesn't get reused for upr data
// connection
defer mc.Close()
failoverlogs, err := mc.UprGetFailoverLog(vbList)
if err != nil {
return nil, fmt.Errorf("Error getting failover log %s host %s",
err.Error(), serverConn.host)
}
for vb, log := range failoverlogs {
failoverLogMap[vb] = *log
}
}
return failoverLogMap, nil
}
func (b *Bucket) StartUprFeed(name string, sequence uint32) (*UprFeed, error) {
return b.StartUprFeedWithConfig(name, sequence, 10, DEFAULT_WINDOW_SIZE)
}
// StartUprFeed creates and starts a new Upr feed
// No data will be sent on the channel unless vbuckets streams are requested
func (b *Bucket) StartUprFeedWithConfig(name string, sequence uint32, data_chan_size int, dcp_buffer_size uint32) (*UprFeed, error) {
feed := &UprFeed{
bucket: b,
output: make(chan *memcached.UprEvent, data_chan_size),
quit: make(chan bool),
nodeFeeds: make(map[string]*FeedInfo, 0),
name: name,
sequence: sequence,
killSwitch: make(chan bool),
dcp_buffer_size: dcp_buffer_size,
data_chan_size: data_chan_size,
}
err := feed.connectToNodes()
if err != nil {
return nil, fmt.Errorf("Cannot connect to bucket %s", err.Error())
}
feed.connected = true
go feed.run()
feed.C = feed.output
return feed, nil
}
// UprRequestStream starts a stream for a vb on a feed
func (feed *UprFeed) UprRequestStream(vb uint16, opaque uint16, flags uint32,
vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
defer func() {
if r := recover(); r != nil {
log.Panic("Panic in UprRequestStream. Feed %v Bucket %v ", feed, feed.bucket)
}
}()
vbm := feed.bucket.VBServerMap()
if len(vbm.VBucketMap) < int(vb) {
return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
vb, vbm.VBucketMap)
}
if int(vb) >= len(vbm.VBucketMap) {
return fmt.Errorf("Invalid vbucket id %d", vb)
}
masterID := vbm.VBucketMap[vb][0]
master := feed.bucket.getMasterNode(masterID)
if master == "" {
return fmt.Errorf("Master node not found for vbucket %d", vb)
}
singleFeed := feed.nodeFeeds[master]
if singleFeed == nil {
return fmt.Errorf("UprFeed for this host not found")
}
if err := singleFeed.uprFeed.UprRequestStream(vb, opaque, flags,
vuuid, startSequence, endSequence, snapStart, snapEnd); err != nil {
return err
}
return nil
}
// UprCloseStream ends a vbucket stream.
func (feed *UprFeed) UprCloseStream(vb, opaqueMSB uint16) error {
defer func() {
if r := recover(); r != nil {
log.Panic("Panic in UprCloseStream. Feed %v Bucket %v ", feed, feed.bucket)
}
}()
vbm := feed.bucket.VBServerMap()
if len(vbm.VBucketMap) < int(vb) {
return fmt.Errorf("vbmap smaller than vbucket list: %v vs. %v",
vb, vbm.VBucketMap)
}
if int(vb) >= len(vbm.VBucketMap) {
return fmt.Errorf("Invalid vbucket id %d", vb)
}
masterID := vbm.VBucketMap[vb][0]
master := feed.bucket.getMasterNode(masterID)
if master == "" {
return fmt.Errorf("Master node not found for vbucket %d", vb)
}
singleFeed := feed.nodeFeeds[master]
if singleFeed == nil {
return fmt.Errorf("UprFeed for this host not found")
}
if err := singleFeed.uprFeed.CloseStream(vb, opaqueMSB); err != nil {
return err
}
return nil
}
// Goroutine that runs the feed
func (feed *UprFeed) run() {
retryInterval := initialRetryInterval
bucketOK := true
for {
// Connect to the UPR feed of each server node:
if bucketOK {
// Run until one of the sub-feeds fails:
select {
case <-feed.killSwitch:
case <-feed.quit:
return
}
//feed.closeNodeFeeds()
retryInterval = initialRetryInterval
}
if feed.closing == true {
// we have been asked to shut down
return
}
// On error, try to refresh the bucket in case the list of nodes changed:
logging.Infof("go-couchbase: UPR connection lost; reconnecting to bucket %q in %v",
feed.bucket.Name, retryInterval)
if err := feed.bucket.Refresh(); err != nil {
// if we fail to refresh the bucket, exit the feed
// MB-14917
logging.Infof("Unable to refresh bucket %s ", err.Error())
close(feed.output)
feed.outputClosed = true
feed.closeNodeFeeds()
return
}
// this will only connect to nodes that are not connected or changed
// user will have to reconnect the stream
err := feed.connectToNodes()
if err != nil {
logging.Infof("Unable to connect to nodes..exit ")
close(feed.output)
feed.outputClosed = true
feed.closeNodeFeeds()
return
}
bucketOK = err == nil
select {
case <-time.After(retryInterval):
case <-feed.quit:
return
}
if retryInterval *= 2; retryInterval > maximumRetryInterval {
retryInterval = maximumRetryInterval
}
}
}
func (feed *UprFeed) connectToNodes() (err error) {
nodeCount := 0
for _, serverConn := range feed.bucket.getConnPools(false /* not already locked */) {
// this maybe a reconnection, so check if the connection to the node
// already exists. Connect only if the node is not found in the list
// or connected == false
nodeFeed := feed.nodeFeeds[serverConn.host]
if nodeFeed != nil && nodeFeed.connected == true {
continue
}
var singleFeed *memcached.UprFeed
var name string
if feed.name == "" {
name = "DefaultUprClient"
} else {
name = feed.name
}
singleFeed, err = serverConn.StartUprFeed(name, feed.sequence, feed.dcp_buffer_size, feed.data_chan_size)
if err != nil {
logging.Errorf("go-couchbase: Error connecting to upr feed of %s: %v", serverConn.host, err)
feed.closeNodeFeeds()
return
}
// add the node to the connection map
feedInfo := &FeedInfo{
uprFeed: singleFeed,
connected: true,
host: serverConn.host,
quit: make(chan bool),
}
feed.nodeFeeds[serverConn.host] = feedInfo
go feed.forwardUprEvents(feedInfo, feed.killSwitch, serverConn.host)
feed.wg.Add(1)
nodeCount++
}
if nodeCount == 0 {
return fmt.Errorf("No connection to bucket")
}
return nil
}
// Goroutine that forwards Upr events from a single node's feed to the aggregate feed.
func (feed *UprFeed) forwardUprEvents(nodeFeed *FeedInfo, killSwitch chan bool, host string) {
singleFeed := nodeFeed.uprFeed
defer func() {
feed.wg.Done()
if r := recover(); r != nil {
//if feed is not closing, re-throw the panic
if feed.outputClosed != true && feed.closing != true {
panic(r)
} else {
logging.Errorf("Panic is recovered. Since feed is closed, exit gracefully")
}
}
}()
for {
select {
case <-nodeFeed.quit:
nodeFeed.connected = false
return
case event, ok := <-singleFeed.C:
if !ok {
if singleFeed.Error != nil {
logging.Errorf("go-couchbase: Upr feed from %s failed: %v", host, singleFeed.Error)
}
killSwitch <- true
return
}
if feed.outputClosed == true {
// someone closed the node feed
logging.Infof("Node need closed, returning from forwardUprEvent")
return
}
feed.output <- event
if event.Status == gomemcached.NOT_MY_VBUCKET {
logging.Infof(" Got a not my vbucket error !! ")
if err := feed.bucket.Refresh(); err != nil {
logging.Errorf("Unable to refresh bucket %s ", err.Error())
feed.closeNodeFeeds()
return
}
// this will only connect to nodes that are not connected or changed
// user will have to reconnect the stream
if err := feed.connectToNodes(); err != nil {
logging.Errorf("Unable to connect to nodes %s", err.Error())
return
}
}
}
}
}
func (feed *UprFeed) closeNodeFeeds() {
for _, f := range feed.nodeFeeds {
logging.Infof(" Sending close to forwardUprEvent ")
close(f.quit)
f.uprFeed.Close()
}
feed.nodeFeeds = nil
}
// Close a Upr feed.
func (feed *UprFeed) Close() error {
select {
case <-feed.quit:
return nil
default:
}
feed.closing = true
feed.closeNodeFeeds()
close(feed.quit)
feed.wg.Wait()
if feed.outputClosed == false {
feed.outputClosed = true
close(feed.output)
}
return nil
}

119
vendor/github.com/couchbase/go-couchbase/users.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package couchbase
import (
"bytes"
"fmt"
)
type User struct {
Name string
Id string
Domain string
Roles []Role
}
type Role struct {
Role string
BucketName string `json:"bucket_name"`
}
// Sample:
// {"role":"admin","name":"Admin","desc":"Can manage ALL cluster features including security.","ce":true}
// {"role":"query_select","bucket_name":"*","name":"Query Select","desc":"Can execute SELECT statement on bucket to retrieve data"}
type RoleDescription struct {
Role string
Name string
Desc string
Ce bool
BucketName string `json:"bucket_name"`
}
// Return user-role data, as parsed JSON.
// Sample:
// [{"id":"ivanivanov","name":"Ivan Ivanov","roles":[{"role":"cluster_admin"},{"bucket_name":"default","role":"bucket_admin"}]},
// {"id":"petrpetrov","name":"Petr Petrov","roles":[{"role":"replication_admin"}]}]
func (c *Client) GetUserRoles() ([]interface{}, error) {
ret := make([]interface{}, 0, 1)
err := c.parseURLResponse("/settings/rbac/users", &ret)
if err != nil {
return nil, err
}
// Get the configured administrator.
// Expected result: {"port":8091,"username":"Administrator"}
adminInfo := make(map[string]interface{}, 2)
err = c.parseURLResponse("/settings/web", &adminInfo)
if err != nil {
return nil, err
}
// Create a special entry for the configured administrator.
adminResult := map[string]interface{}{
"name": adminInfo["username"],
"id": adminInfo["username"],
"domain" : "ns_server",
"roles": []interface{}{
map[string]interface{}{
"role": "admin",
},
},
}
// Add the configured administrator to the list of results.
ret = append(ret, adminResult)
return ret, nil
}
func (c *Client) GetUserInfoAll() ([]User, error) {
ret := make([]User, 0, 16)
err := c.parseURLResponse("/settings/rbac/users", &ret)
if err != nil {
return nil, err
}
return ret, nil
}
func rolesToParamFormat(roles []Role) string {
var buffer bytes.Buffer
for i, role := range roles {
if i > 0 {
buffer.WriteString(",")
}
buffer.WriteString(role.Role)
if role.BucketName != "" {
buffer.WriteString("[")
buffer.WriteString(role.BucketName)
buffer.WriteString("]")
}
}
return buffer.String()
}
func (c *Client) PutUserInfo(u *User) error {
params := map[string]interface{}{
"name": u.Name,
"roles": rolesToParamFormat(u.Roles),
}
var target string
switch u.Domain {
case "external":
target = "/settings/rbac/users/" + u.Id
case "local":
target = "/settings/rbac/users/local/" + u.Id
default:
return fmt.Errorf("Unknown user type: %s", u.Domain)
}
var ret string // PUT returns an empty string. We ignore it.
err := c.parsePutURLResponse(target, params, &ret)
return err
}
func (c *Client) GetRolesAll() ([]RoleDescription, error) {
ret := make([]RoleDescription, 0, 32)
err := c.parseURLResponse("/settings/rbac/roles", &ret)
if err != nil {
return nil, err
}
return ret, nil
}

49
vendor/github.com/couchbase/go-couchbase/util.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
package couchbase
import (
"fmt"
"net/url"
"strings"
)
// CleanupHost returns the hostname with the given suffix removed.
func CleanupHost(h, commonSuffix string) string {
if strings.HasSuffix(h, commonSuffix) {
return h[:len(h)-len(commonSuffix)]
}
return h
}
// FindCommonSuffix returns the longest common suffix from the given
// strings.
func FindCommonSuffix(input []string) string {
rv := ""
if len(input) < 2 {
return ""
}
from := input
for i := len(input[0]); i > 0; i-- {
common := true
suffix := input[0][i:]
for _, s := range from {
if !strings.HasSuffix(s, suffix) {
common = false
break
}
}
if common {
rv = suffix
}
}
return rv
}
// ParseURL is a wrapper around url.Parse with some sanity-checking
func ParseURL(urlStr string) (result *url.URL, err error) {
result, err = url.Parse(urlStr)
if result != nil && result.Scheme == "" {
result = nil
err = fmt.Errorf("invalid URL <%s>", urlStr)
}
return
}

77
vendor/github.com/couchbase/go-couchbase/vbmap.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
package couchbase
var crc32tab = []uint32{
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d}
// VBHash finds the vbucket for the given key.
func (b *Bucket) VBHash(key string) uint32 {
crc := uint32(0xffffffff)
for x := 0; x < len(key); x++ {
crc = (crc >> 8) ^ crc32tab[(uint64(crc)^uint64(key[x]))&0xff]
}
vbm := b.VBServerMap()
return ((^crc) >> 16) & 0x7fff & (uint32(len(vbm.VBucketMap)) - 1)
}

231
vendor/github.com/couchbase/go-couchbase/views.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
package couchbase
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
"time"
)
// ViewRow represents a single result from a view.
//
// Doc is present only if include_docs was set on the request.
type ViewRow struct {
ID string
Key interface{}
Value interface{}
Doc *interface{}
}
// A ViewError is a node-specific error indicating a partial failure
// within a view result.
type ViewError struct {
From string
Reason string
}
func (ve ViewError) Error() string {
return "Node: " + ve.From + ", reason: " + ve.Reason
}
// ViewResult holds the entire result set from a view request,
// including the rows and the errors.
type ViewResult struct {
TotalRows int `json:"total_rows"`
Rows []ViewRow
Errors []ViewError
}
func (b *Bucket) randomBaseURL() (*url.URL, error) {
nodes := b.HealthyNodes()
if len(nodes) == 0 {
return nil, errors.New("no available couch rest URLs")
}
nodeNo := rand.Intn(len(nodes))
node := nodes[nodeNo]
b.RLock()
name := b.Name
pool := b.pool
b.RUnlock()
u, err := ParseURL(node.CouchAPIBase)
if err != nil {
return nil, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
name, nodeNo, node.CouchAPIBase, err)
} else if pool != nil {
u.User = pool.client.BaseURL.User
}
return u, err
}
const START_NODE_ID = -1
func (b *Bucket) randomNextURL(lastNode int) (*url.URL, int, error) {
nodes := b.HealthyNodes()
if len(nodes) == 0 {
return nil, -1, errors.New("no available couch rest URLs")
}
var nodeNo int
if lastNode == START_NODE_ID || lastNode >= len(nodes) {
// randomly select a node if the value of lastNode is invalid
nodeNo = rand.Intn(len(nodes))
} else {
// wrap around the node list
nodeNo = (lastNode + 1) % len(nodes)
}
b.RLock()
name := b.Name
pool := b.pool
b.RUnlock()
node := nodes[nodeNo]
u, err := ParseURL(node.CouchAPIBase)
if err != nil {
return nil, -1, fmt.Errorf("config error: Bucket %q node #%d CouchAPIBase=%q: %v",
name, nodeNo, node.CouchAPIBase, err)
} else if pool != nil {
u.User = pool.client.BaseURL.User
}
return u, nodeNo, err
}
// DocID is the document ID type for the startkey_docid parameter in
// views.
type DocID string
func qParam(k, v string) string {
format := `"%s"`
switch k {
case "startkey_docid", "endkey_docid", "stale":
format = "%s"
}
return fmt.Sprintf(format, v)
}
// ViewURL constructs a URL for a view with the given ddoc, view name,
// and parameters.
func (b *Bucket) ViewURL(ddoc, name string,
params map[string]interface{}) (string, error) {
u, err := b.randomBaseURL()
if err != nil {
return "", err
}
values := url.Values{}
for k, v := range params {
switch t := v.(type) {
case DocID:
values[k] = []string{string(t)}
case string:
values[k] = []string{qParam(k, t)}
case int:
values[k] = []string{fmt.Sprintf(`%d`, t)}
case bool:
values[k] = []string{fmt.Sprintf(`%v`, t)}
default:
b, err := json.Marshal(v)
if err != nil {
return "", fmt.Errorf("unsupported value-type %T in Query, "+
"json encoder said %v", t, err)
}
values[k] = []string{fmt.Sprintf(`%v`, string(b))}
}
}
if ddoc == "" && name == "_all_docs" {
u.Path = fmt.Sprintf("/%s/_all_docs", b.GetName())
} else {
u.Path = fmt.Sprintf("/%s/_design/%s/_view/%s", b.GetName(), ddoc, name)
}
u.RawQuery = values.Encode()
return u.String(), nil
}
// ViewCallback is called for each view invocation.
var ViewCallback func(ddoc, name string, start time.Time, err error)
// ViewCustom performs a view request that can map row values to a
// custom type.
//
// See the source to View for an example usage.
func (b *Bucket) ViewCustom(ddoc, name string, params map[string]interface{},
vres interface{}) (err error) {
if SlowServerCallWarningThreshold > 0 {
defer slowLog(time.Now(), "call to ViewCustom(%q, %q)", ddoc, name)
}
if ViewCallback != nil {
defer func(t time.Time) { ViewCallback(ddoc, name, t, err) }(time.Now())
}
u, err := b.ViewURL(ddoc, name, params)
if err != nil {
return err
}
req, err := http.NewRequest("GET", u, nil)
if err != nil {
return err
}
ah := b.authHandler(false /* bucket not yet locked */)
maybeAddAuth(req, ah)
res, err := doHTTPRequest(req)
if err != nil {
return fmt.Errorf("error starting view req at %v: %v", u, err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
bod := make([]byte, 512)
l, _ := res.Body.Read(bod)
return fmt.Errorf("error executing view req at %v: %v - %s",
u, res.Status, bod[:l])
}
body, err := ioutil.ReadAll(res.Body)
if err := json.Unmarshal(body, vres); err != nil {
return nil
}
return nil
}
// View executes a view.
//
// The ddoc parameter is just the bare name of your design doc without
// the "_design/" prefix.
//
// Parameters are string keys with values that correspond to couchbase
// view parameters. Primitive should work fairly naturally (booleans,
// ints, strings, etc...) and other values will attempt to be JSON
// marshaled (useful for array indexing on on view keys, for example).
//
// Example:
//
// res, err := couchbase.View("myddoc", "myview", map[string]interface{}{
// "group_level": 2,
// "startkey_docid": []interface{}{"thing"},
// "endkey_docid": []interface{}{"thing", map[string]string{}},
// "stale": false,
// })
func (b *Bucket) View(ddoc, name string, params map[string]interface{}) (ViewResult, error) {
vres := ViewResult{}
if err := b.ViewCustom(ddoc, name, params, &vres); err != nil {
//error in accessing views. Retry once after a bucket refresh
b.Refresh()
return vres, b.ViewCustom(ddoc, name, params, &vres)
} else {
return vres, nil
}
}

19
vendor/github.com/couchbase/gomemcached/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2013 Dustin Sallings
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,32 @@
# gomemcached
This is a memcached binary protocol toolkit in [go][go].
It provides client and server functionality as well as a little sample
server showing how I might make a server if I valued purity over
performance.
## Server Design
<div>
<img src="http://dustin.github.com/images/gomemcached.png"
alt="overview" style="float: right"/>
</div>
The basic design can be seen in [gocache]. A [storage
server][storage] is run as a goroutine that receives a `MCRequest` on
a channel, and then issues an `MCResponse` to a channel contained
within the request.
Each connection is a separate goroutine, of course, and is responsible
for all IO for that connection until the connection drops or the
`dataServer` decides it's stupid and sends a fatal response back over
the channel.
There is currently no work at all in making the thing perform (there
are specific areas I know need work). This is just my attempt to
learn the language somewhat.
[go]: http://golang.org/
[gocache]: gomemcached/blob/master/gocache/gocache.go
[storage]: gomemcached/blob/master/gocache/mc_storage.go

1146
vendor/github.com/couchbase/gomemcached/client/mc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,333 @@
package memcached
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"math"
"github.com/couchbase/gomemcached"
"github.com/couchbase/goutils/logging"
)
// TAP protocol docs: <http://www.couchbase.com/wiki/display/couchbase/TAP+Protocol>
// TapOpcode is the tap operation type (found in TapEvent)
type TapOpcode uint8
// Tap opcode values.
const (
TapBeginBackfill = TapOpcode(iota)
TapEndBackfill
TapMutation
TapDeletion
TapCheckpointStart
TapCheckpointEnd
tapEndStream
)
const tapMutationExtraLen = 16
var tapOpcodeNames map[TapOpcode]string
func init() {
tapOpcodeNames = map[TapOpcode]string{
TapBeginBackfill: "BeginBackfill",
TapEndBackfill: "EndBackfill",
TapMutation: "Mutation",
TapDeletion: "Deletion",
TapCheckpointStart: "TapCheckpointStart",
TapCheckpointEnd: "TapCheckpointEnd",
tapEndStream: "EndStream",
}
}
func (opcode TapOpcode) String() string {
name := tapOpcodeNames[opcode]
if name == "" {
name = fmt.Sprintf("#%d", opcode)
}
return name
}
// TapEvent is a TAP notification of an operation on the server.
type TapEvent struct {
Opcode TapOpcode // Type of event
VBucket uint16 // VBucket this event applies to
Flags uint32 // Item flags
Expiry uint32 // Item expiration time
Key, Value []byte // Item key/value
Cas uint64
}
func makeTapEvent(req gomemcached.MCRequest) *TapEvent {
event := TapEvent{
VBucket: req.VBucket,
}
switch req.Opcode {
case gomemcached.TAP_MUTATION:
event.Opcode = TapMutation
event.Key = req.Key
event.Value = req.Body
event.Cas = req.Cas
case gomemcached.TAP_DELETE:
event.Opcode = TapDeletion
event.Key = req.Key
event.Cas = req.Cas
case gomemcached.TAP_CHECKPOINT_START:
event.Opcode = TapCheckpointStart
case gomemcached.TAP_CHECKPOINT_END:
event.Opcode = TapCheckpointEnd
case gomemcached.TAP_OPAQUE:
if len(req.Extras) < 8+4 {
return nil
}
switch op := int(binary.BigEndian.Uint32(req.Extras[8:])); op {
case gomemcached.TAP_OPAQUE_INITIAL_VBUCKET_STREAM:
event.Opcode = TapBeginBackfill
case gomemcached.TAP_OPAQUE_CLOSE_BACKFILL:
event.Opcode = TapEndBackfill
case gomemcached.TAP_OPAQUE_CLOSE_TAP_STREAM:
event.Opcode = tapEndStream
case gomemcached.TAP_OPAQUE_ENABLE_AUTO_NACK:
return nil
case gomemcached.TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC:
return nil
default:
logging.Infof("TapFeed: Ignoring TAP_OPAQUE/%d", op)
return nil // unknown opaque event
}
case gomemcached.NOOP:
return nil // ignore
default:
logging.Infof("TapFeed: Ignoring %s", req.Opcode)
return nil // unknown event
}
if len(req.Extras) >= tapMutationExtraLen &&
(event.Opcode == TapMutation || event.Opcode == TapDeletion) {
event.Flags = binary.BigEndian.Uint32(req.Extras[8:])
event.Expiry = binary.BigEndian.Uint32(req.Extras[12:])
}
return &event
}
func (event TapEvent) String() string {
switch event.Opcode {
case TapBeginBackfill, TapEndBackfill, TapCheckpointStart, TapCheckpointEnd:
return fmt.Sprintf("<TapEvent %s, vbucket=%d>",
event.Opcode, event.VBucket)
default:
return fmt.Sprintf("<TapEvent %s, key=%q (%d bytes) flags=%x, exp=%d>",
event.Opcode, event.Key, len(event.Value),
event.Flags, event.Expiry)
}
}
// TapArguments are parameters for requesting a TAP feed.
//
// Call DefaultTapArguments to get a default one.
type TapArguments struct {
// Timestamp of oldest item to send.
//
// Use TapNoBackfill to suppress all past items.
Backfill uint64
// If set, server will disconnect after sending existing items.
Dump bool
// The indices of the vbuckets to watch; empty/nil to watch all.
VBuckets []uint16
// Transfers ownership of vbuckets during cluster rebalance.
Takeover bool
// If true, server will wait for client ACK after every notification.
SupportAck bool
// If true, client doesn't want values so server shouldn't send them.
KeysOnly bool
// If true, client wants the server to send checkpoint events.
Checkpoint bool
// Optional identifier to use for this client, to allow reconnects
ClientName string
// Registers this client (by name) till explicitly deregistered.
RegisteredClient bool
}
// Value for TapArguments.Backfill denoting that no past events at all
// should be sent.
const TapNoBackfill = math.MaxUint64
// DefaultTapArguments returns a default set of parameter values to
// pass to StartTapFeed.
func DefaultTapArguments() TapArguments {
return TapArguments{
Backfill: TapNoBackfill,
}
}
func (args *TapArguments) flags() []byte {
var flags gomemcached.TapConnectFlag
if args.Backfill != 0 {
flags |= gomemcached.BACKFILL
}
if args.Dump {
flags |= gomemcached.DUMP
}
if len(args.VBuckets) > 0 {
flags |= gomemcached.LIST_VBUCKETS
}
if args.Takeover {
flags |= gomemcached.TAKEOVER_VBUCKETS
}
if args.SupportAck {
flags |= gomemcached.SUPPORT_ACK
}
if args.KeysOnly {
flags |= gomemcached.REQUEST_KEYS_ONLY
}
if args.Checkpoint {
flags |= gomemcached.CHECKPOINT
}
if args.RegisteredClient {
flags |= gomemcached.REGISTERED_CLIENT
}
encoded := make([]byte, 4)
binary.BigEndian.PutUint32(encoded, uint32(flags))
return encoded
}
func must(err error) {
if err != nil {
panic(err)
}
}
func (args *TapArguments) bytes() (rv []byte) {
buf := bytes.NewBuffer([]byte{})
if args.Backfill > 0 {
must(binary.Write(buf, binary.BigEndian, uint64(args.Backfill)))
}
if len(args.VBuckets) > 0 {
must(binary.Write(buf, binary.BigEndian, uint16(len(args.VBuckets))))
for i := 0; i < len(args.VBuckets); i++ {
must(binary.Write(buf, binary.BigEndian, uint16(args.VBuckets[i])))
}
}
return buf.Bytes()
}
// TapFeed represents a stream of events from a server.
type TapFeed struct {
C <-chan TapEvent
Error error
closer chan bool
}
// StartTapFeed starts a TAP feed on a client connection.
//
// The events can be read from the returned channel. The connection
// can no longer be used for other purposes; it's now reserved for
// receiving the TAP messages. To stop receiving events, close the
// client connection.
func (mc *Client) StartTapFeed(args TapArguments) (*TapFeed, error) {
rq := &gomemcached.MCRequest{
Opcode: gomemcached.TAP_CONNECT,
Key: []byte(args.ClientName),
Extras: args.flags(),
Body: args.bytes()}
err := mc.Transmit(rq)
if err != nil {
return nil, err
}
ch := make(chan TapEvent)
feed := &TapFeed{
C: ch,
closer: make(chan bool),
}
go mc.runFeed(ch, feed)
return feed, nil
}
// TapRecvHook is called after every incoming tap packet is received.
var TapRecvHook func(*gomemcached.MCRequest, int, error)
// Internal goroutine that reads from the socket and writes events to
// the channel
func (mc *Client) runFeed(ch chan TapEvent, feed *TapFeed) {
defer close(ch)
var headerBuf [gomemcached.HDR_LEN]byte
loop:
for {
// Read the next request from the server.
//
// (Can't call mc.Receive() because it reads a
// _response_ not a request.)
var pkt gomemcached.MCRequest
n, err := pkt.Receive(mc.conn, headerBuf[:])
if TapRecvHook != nil {
TapRecvHook(&pkt, n, err)
}
if err != nil {
if err != io.EOF {
feed.Error = err
}
break loop
}
//logging.Infof("** TapFeed received %#v : %q", pkt, pkt.Body)
if pkt.Opcode == gomemcached.TAP_CONNECT {
// This is not an event from the server; it's
// an error response to my connect request.
feed.Error = fmt.Errorf("tap connection failed: %s", pkt.Body)
break loop
}
event := makeTapEvent(pkt)
if event != nil {
if event.Opcode == tapEndStream {
break loop
}
select {
case ch <- *event:
case <-feed.closer:
break loop
}
}
if len(pkt.Extras) >= 4 {
reqFlags := binary.BigEndian.Uint16(pkt.Extras[2:])
if reqFlags&gomemcached.TAP_ACK != 0 {
if _, err := mc.sendAck(&pkt); err != nil {
feed.Error = err
break loop
}
}
}
}
if err := mc.Close(); err != nil {
logging.Errorf("Error closing memcached client: %v", err)
}
}
func (mc *Client) sendAck(pkt *gomemcached.MCRequest) (int, error) {
res := gomemcached.MCResponse{
Opcode: pkt.Opcode,
Opaque: pkt.Opaque,
Status: gomemcached.SUCCESS,
}
return res.Transmit(mc.conn)
}
// Close terminates a TapFeed.
//
// Call this if you stop using a TapFeed before its channel ends.
func (feed *TapFeed) Close() {
close(feed.closer)
}

View File

@ -0,0 +1,67 @@
package memcached
import (
"errors"
"io"
"github.com/couchbase/gomemcached"
)
var errNoConn = errors.New("no connection")
// UnwrapMemcachedError converts memcached errors to normal responses.
//
// If the error is a memcached response, declare the error to be nil
// so a client can handle the status without worrying about whether it
// indicates success or failure.
func UnwrapMemcachedError(rv *gomemcached.MCResponse,
err error) (*gomemcached.MCResponse, error) {
if rv == err {
return rv, nil
}
return rv, err
}
// ReceiveHook is called after every packet is received (or attempted to be)
var ReceiveHook func(*gomemcached.MCResponse, int, error)
func getResponse(s io.Reader, hdrBytes []byte) (rv *gomemcached.MCResponse, n int, err error) {
if s == nil {
return nil, 0, errNoConn
}
rv = &gomemcached.MCResponse{}
n, err = rv.Receive(s, hdrBytes)
if ReceiveHook != nil {
ReceiveHook(rv, n, err)
}
if err == nil && (rv.Status != gomemcached.SUCCESS && rv.Status != gomemcached.AUTH_CONTINUE) {
err = rv
}
return rv, n, err
}
// TransmitHook is called after each packet is transmitted.
var TransmitHook func(*gomemcached.MCRequest, int, error)
func transmitRequest(o io.Writer, req *gomemcached.MCRequest) (int, error) {
if o == nil {
return 0, errNoConn
}
n, err := req.Transmit(o)
if TransmitHook != nil {
TransmitHook(req, n, err)
}
return n, err
}
func transmitResponse(o io.Writer, res *gomemcached.MCResponse) (int, error) {
if o == nil {
return 0, errNoConn
}
n, err := res.Transmit(o)
return n, err
}

View File

@ -0,0 +1,800 @@
// go implementation of upr client.
// See https://github.com/couchbaselabs/cbupr/blob/master/transport-spec.md
// TODO
// 1. Use a pool allocator to avoid garbage
package memcached
import (
"encoding/binary"
"errors"
"fmt"
"github.com/couchbase/gomemcached"
"github.com/couchbase/goutils/logging"
"strconv"
"sync"
)
const uprMutationExtraLen = 30
const uprDeletetionExtraLen = 18
const uprSnapshotExtraLen = 20
const bufferAckThreshold = 0.2
const opaqueOpen = 0xBEAF0001
const opaqueFailover = 0xDEADBEEF
const uprDefaultNoopInterval = 120
// UprEvent memcached events for UPR streams.
type UprEvent struct {
Opcode gomemcached.CommandCode // Type of event
Status gomemcached.Status // Response status
VBucket uint16 // VBucket this event applies to
DataType uint8 // data type
Opaque uint16 // 16 MSB of opaque
VBuuid uint64 // This field is set by downstream
Flags uint32 // Item flags
Expiry uint32 // Item expiration time
Key, Value []byte // Item key/value
OldValue []byte // TODO: TBD: old document value
Cas uint64 // CAS value of the item
Seqno uint64 // sequence number of the mutation
RevSeqno uint64 // rev sequence number : deletions
LockTime uint32 // Lock time
MetadataSize uint16 // Metadata size
SnapstartSeq uint64 // start sequence number of this snapshot
SnapendSeq uint64 // End sequence number of the snapshot
SnapshotType uint32 // 0: disk 1: memory
FailoverLog *FailoverLog // Failover log containing vvuid and sequnce number
Error error // Error value in case of a failure
ExtMeta []byte
AckSize uint32 // The number of bytes that can be Acked to DCP
}
// UprStream is per stream data structure over an UPR Connection.
type UprStream struct {
Vbucket uint16 // Vbucket id
Vbuuid uint64 // vbucket uuid
StartSeq uint64 // start sequence number
EndSeq uint64 // end sequence number
connected bool
}
// UprFeed represents an UPR feed. A feed contains a connection to a single
// host and multiple vBuckets
type UprFeed struct {
// lock for feed.vbstreams
muVbstreams sync.RWMutex
// lock for feed.closed
muClosed sync.RWMutex
C <-chan *UprEvent // Exported channel for receiving UPR events
vbstreams map[uint16]*UprStream // vb->stream mapping
closer chan bool // closer
conn *Client // connection to UPR producer
Error error // error
bytesRead uint64 // total bytes read on this connection
toAckBytes uint32 // bytes client has read
maxAckBytes uint32 // Max buffer control ack bytes
stats UprStats // Stats for upr client
transmitCh chan *gomemcached.MCRequest // transmit command channel
transmitCl chan bool // closer channel for transmit go-routine
closed bool // flag indicating whether the feed has been closed
// flag indicating whether client of upr feed will send ack to upr feed
// if flag is true, upr feed will use ack from client to determine whether/when to send ack to DCP
// if flag is false, upr feed will track how many bytes it has sent to client
// and use that to determine whether/when to send ack to DCP
ackByClient bool
}
// Exported interface - to allow for mocking
type UprFeedIface interface {
Close()
Closed() bool
CloseStream(vbno, opaqueMSB uint16) error
GetError() error
GetUprStats() *UprStats
IncrementAckBytes(bytes uint32) error
GetUprEventCh() <-chan *UprEvent
StartFeed() error
StartFeedWithConfig(datachan_len int) error
UprOpen(name string, sequence uint32, bufSize uint32) error
UprOpenWithXATTR(name string, sequence uint32, bufSize uint32) error
UprRequestStream(vbno, opaqueMSB uint16, flags uint32, vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error
}
type UprStats struct {
TotalBytes uint64
TotalMutation uint64
TotalBufferAckSent uint64
TotalSnapShot uint64
}
// FailoverLog containing vvuid and sequnce number
type FailoverLog [][2]uint64
// error codes
var ErrorInvalidLog = errors.New("couchbase.errorInvalidLog")
func (flogp *FailoverLog) Latest() (vbuuid, seqno uint64, err error) {
if flogp != nil {
flog := *flogp
latest := flog[len(flog)-1]
return latest[0], latest[1], nil
}
return vbuuid, seqno, ErrorInvalidLog
}
func makeUprEvent(rq gomemcached.MCRequest, stream *UprStream) *UprEvent {
event := &UprEvent{
Opcode: rq.Opcode,
VBucket: stream.Vbucket,
VBuuid: stream.Vbuuid,
Key: rq.Key,
Value: rq.Body,
Cas: rq.Cas,
ExtMeta: rq.ExtMeta,
DataType: rq.DataType,
AckSize: uint32(rq.Size()),
}
// 16 LSBits are used by client library to encode vbucket number.
// 16 MSBits are left for application to multiplex on opaque value.
event.Opaque = appOpaque(rq.Opaque)
if len(rq.Extras) >= uprMutationExtraLen &&
event.Opcode == gomemcached.UPR_MUTATION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.Flags = binary.BigEndian.Uint32(rq.Extras[16:20])
event.Expiry = binary.BigEndian.Uint32(rq.Extras[20:24])
event.LockTime = binary.BigEndian.Uint32(rq.Extras[24:28])
event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[28:30])
} else if len(rq.Extras) >= uprDeletetionExtraLen &&
event.Opcode == gomemcached.UPR_DELETION ||
event.Opcode == gomemcached.UPR_EXPIRATION {
event.Seqno = binary.BigEndian.Uint64(rq.Extras[:8])
event.RevSeqno = binary.BigEndian.Uint64(rq.Extras[8:16])
event.MetadataSize = binary.BigEndian.Uint16(rq.Extras[16:18])
} else if len(rq.Extras) >= uprSnapshotExtraLen &&
event.Opcode == gomemcached.UPR_SNAPSHOT {
event.SnapstartSeq = binary.BigEndian.Uint64(rq.Extras[:8])
event.SnapendSeq = binary.BigEndian.Uint64(rq.Extras[8:16])
event.SnapshotType = binary.BigEndian.Uint32(rq.Extras[16:20])
}
return event
}
func (event *UprEvent) String() string {
name := gomemcached.CommandNames[event.Opcode]
if name == "" {
name = fmt.Sprintf("#%d", event.Opcode)
}
return name
}
func (feed *UprFeed) sendCommands(mc *Client) {
transmitCh := feed.transmitCh
transmitCl := feed.transmitCl
loop:
for {
select {
case command := <-transmitCh:
if err := mc.Transmit(command); err != nil {
logging.Errorf("Failed to transmit command %s. Error %s", command.Opcode.String(), err.Error())
// get feed to close and runFeed routine to exit
feed.Close()
break loop
}
case <-transmitCl:
break loop
}
}
// After sendCommands exits, write to transmitCh will block forever
// when we write to transmitCh, e.g., at CloseStream(), we need to check feed closure to have an exit route
logging.Infof("sendCommands exiting")
}
// NewUprFeed creates a new UPR Feed.
// TODO: Describe side-effects on bucket instance and its connection pool.
func (mc *Client) NewUprFeed() (*UprFeed, error) {
return mc.NewUprFeedWithConfig(false /*ackByClient*/)
}
func (mc *Client) NewUprFeedWithConfig(ackByClient bool) (*UprFeed, error) {
feed := &UprFeed{
conn: mc,
closer: make(chan bool, 1),
vbstreams: make(map[uint16]*UprStream),
transmitCh: make(chan *gomemcached.MCRequest),
transmitCl: make(chan bool),
ackByClient: ackByClient,
}
go feed.sendCommands(mc)
return feed, nil
}
func (mc *Client) NewUprFeedIface() (UprFeedIface, error) {
return mc.NewUprFeed()
}
func (mc *Client) NewUprFeedWithConfigIface(ackByClient bool) (UprFeedIface, error) {
return mc.NewUprFeedWithConfig(ackByClient)
}
func doUprOpen(mc *Client, name string, sequence uint32, enableXATTR bool) error {
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_OPEN,
Key: []byte(name),
Opaque: opaqueOpen,
}
rq.Extras = make([]byte, 8)
binary.BigEndian.PutUint32(rq.Extras[:4], sequence)
// opens a producer type connection
flags := gomemcached.DCP_PRODUCER
if enableXATTR {
// set DCP_OPEN_INCLUDE_XATTRS bit in flags
flags = flags | gomemcached.DCP_OPEN_INCLUDE_XATTRS
}
binary.BigEndian.PutUint32(rq.Extras[4:], flags)
if err := mc.Transmit(rq); err != nil {
return err
}
if res, err := mc.Receive(); err != nil {
return err
} else if res.Opcode != gomemcached.UPR_OPEN {
return fmt.Errorf("unexpected #opcode %v", res.Opcode)
} else if rq.Opaque != res.Opaque {
return fmt.Errorf("opaque mismatch, %v over %v", res.Opaque, res.Opaque)
} else if res.Status != gomemcached.SUCCESS {
return fmt.Errorf("error %v", res.Status)
}
return nil
}
// UprOpen to connect with a UPR producer.
// Name: name of te UPR connection
// sequence: sequence number for the connection
// bufsize: max size of the application
func (feed *UprFeed) UprOpen(name string, sequence uint32, bufSize uint32) error {
return feed.uprOpen(name, sequence, bufSize, false /*enableXATTR*/)
}
// UprOpen with XATTR enabled.
func (feed *UprFeed) UprOpenWithXATTR(name string, sequence uint32, bufSize uint32) error {
return feed.uprOpen(name, sequence, bufSize, true /*enableXATTR*/)
}
func (feed *UprFeed) uprOpen(name string, sequence uint32, bufSize uint32, enableXATTR bool) error {
mc := feed.conn
var err error
if err = doUprOpen(mc, name, sequence, enableXATTR); err != nil {
return err
}
// send a UPR control message to set the window size for the this connection
if bufSize > 0 {
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_CONTROL,
Key: []byte("connection_buffer_size"),
Body: []byte(strconv.Itoa(int(bufSize))),
}
err = feed.writeToTransmitCh(rq)
if err != nil {
return err
}
feed.maxAckBytes = uint32(bufferAckThreshold * float32(bufSize))
}
// enable noop and set noop interval
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_CONTROL,
Key: []byte("enable_noop"),
Body: []byte("true"),
}
err = feed.writeToTransmitCh(rq)
if err != nil {
return err
}
rq = &gomemcached.MCRequest{
Opcode: gomemcached.UPR_CONTROL,
Key: []byte("set_noop_interval"),
Body: []byte(strconv.Itoa(int(uprDefaultNoopInterval))),
}
err = feed.writeToTransmitCh(rq)
if err != nil {
return err
}
return nil
}
// UprGetFailoverLog for given list of vbuckets.
func (mc *Client) UprGetFailoverLog(
vb []uint16) (map[uint16]*FailoverLog, error) {
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_FAILOVERLOG,
Opaque: opaqueFailover,
}
if err := doUprOpen(mc, "FailoverLog", 0, false); err != nil {
return nil, fmt.Errorf("UPR_OPEN Failed %s", err.Error())
}
failoverLogs := make(map[uint16]*FailoverLog)
for _, vBucket := range vb {
rq.VBucket = vBucket
if err := mc.Transmit(rq); err != nil {
return nil, err
}
res, err := mc.Receive()
if err != nil {
return nil, fmt.Errorf("failed to receive %s", err.Error())
} else if res.Opcode != gomemcached.UPR_FAILOVERLOG || res.Status != gomemcached.SUCCESS {
return nil, fmt.Errorf("unexpected #opcode %v", res.Opcode)
}
flog, err := parseFailoverLog(res.Body)
if err != nil {
return nil, fmt.Errorf("unable to parse failover logs for vb %d", vb)
}
failoverLogs[vBucket] = flog
}
return failoverLogs, nil
}
// UprRequestStream for a single vbucket.
func (feed *UprFeed) UprRequestStream(vbno, opaqueMSB uint16, flags uint32,
vuuid, startSequence, endSequence, snapStart, snapEnd uint64) error {
rq := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_STREAMREQ,
VBucket: vbno,
Opaque: composeOpaque(vbno, opaqueMSB),
}
rq.Extras = make([]byte, 48) // #Extras
binary.BigEndian.PutUint32(rq.Extras[:4], flags)
binary.BigEndian.PutUint32(rq.Extras[4:8], uint32(0))
binary.BigEndian.PutUint64(rq.Extras[8:16], startSequence)
binary.BigEndian.PutUint64(rq.Extras[16:24], endSequence)
binary.BigEndian.PutUint64(rq.Extras[24:32], vuuid)
binary.BigEndian.PutUint64(rq.Extras[32:40], snapStart)
binary.BigEndian.PutUint64(rq.Extras[40:48], snapEnd)
stream := &UprStream{
Vbucket: vbno,
Vbuuid: vuuid,
StartSeq: startSequence,
EndSeq: endSequence,
}
feed.muVbstreams.Lock()
// Any client that has ever called this method, regardless of return code,
// should expect a potential UPR_CLOSESTREAM message due to this new map entry prior to Transmit.
feed.vbstreams[vbno] = stream
feed.muVbstreams.Unlock()
if err := feed.conn.Transmit(rq); err != nil {
logging.Errorf("Error in StreamRequest %s", err.Error())
// If an error occurs during transmit, then the UPRFeed will keep the stream
// in the vbstreams map. This is to prevent nil lookup from any previously
// sent stream requests.
return err
}
return nil
}
// CloseStream for specified vbucket.
func (feed *UprFeed) CloseStream(vbno, opaqueMSB uint16) error {
err := feed.validateCloseStream(vbno)
if err != nil {
logging.Infof("CloseStream for %v has been skipped because of error %v", vbno, err)
return err
}
closeStream := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_CLOSESTREAM,
VBucket: vbno,
Opaque: composeOpaque(vbno, opaqueMSB),
}
feed.writeToTransmitCh(closeStream)
return nil
}
func (feed *UprFeed) GetUprEventCh() <-chan *UprEvent {
return feed.C
}
func (feed *UprFeed) GetError() error {
return feed.Error
}
func (feed *UprFeed) validateCloseStream(vbno uint16) error {
feed.muVbstreams.RLock()
defer feed.muVbstreams.RUnlock()
if feed.vbstreams[vbno] == nil {
return fmt.Errorf("Stream for vb %d has not been requested", vbno)
}
return nil
}
func (feed *UprFeed) writeToTransmitCh(rq *gomemcached.MCRequest) error {
// write to transmitCh may block forever if sendCommands has exited
// check for feed closure to have an exit route in this case
select {
case <-feed.closer:
errMsg := fmt.Sprintf("Abort sending request to transmitCh because feed has been closed. request=%v", rq)
logging.Infof(errMsg)
return errors.New(errMsg)
case feed.transmitCh <- rq:
}
return nil
}
// StartFeed to start the upper feed.
func (feed *UprFeed) StartFeed() error {
return feed.StartFeedWithConfig(10)
}
func (feed *UprFeed) StartFeedWithConfig(datachan_len int) error {
ch := make(chan *UprEvent, datachan_len)
feed.C = ch
go feed.runFeed(ch)
return nil
}
func parseFailoverLog(body []byte) (*FailoverLog, error) {
if len(body)%16 != 0 {
err := fmt.Errorf("invalid body length %v, in failover-log", len(body))
return nil, err
}
log := make(FailoverLog, len(body)/16)
for i, j := 0, 0; i < len(body); i += 16 {
vuuid := binary.BigEndian.Uint64(body[i : i+8])
seqno := binary.BigEndian.Uint64(body[i+8 : i+16])
log[j] = [2]uint64{vuuid, seqno}
j++
}
return &log, nil
}
func handleStreamRequest(
res *gomemcached.MCResponse,
headerBuf []byte,
) (gomemcached.Status, uint64, *FailoverLog, error) {
var rollback uint64
var err error
switch {
case res.Status == gomemcached.ROLLBACK:
logging.Infof("Rollback response. body=%v, headerBuf=%v\n", res.Body, headerBuf)
rollback = binary.BigEndian.Uint64(res.Body)
logging.Infof("Rollback %v for vb %v\n", rollback, res.Opaque)
return res.Status, rollback, nil, nil
case res.Status != gomemcached.SUCCESS:
err = fmt.Errorf("unexpected status %v, for %v", res.Status, res.Opaque)
return res.Status, 0, nil, err
}
flog, err := parseFailoverLog(res.Body[:])
return res.Status, rollback, flog, err
}
// generate stream end responses for all active vb streams
func (feed *UprFeed) doStreamClose(ch chan *UprEvent) {
feed.muVbstreams.RLock()
uprEvents := make([]*UprEvent, len(feed.vbstreams))
index := 0
for vbno, stream := range feed.vbstreams {
uprEvent := &UprEvent{
VBucket: vbno,
VBuuid: stream.Vbuuid,
Opcode: gomemcached.UPR_STREAMEND,
}
uprEvents[index] = uprEvent
index++
}
// release the lock before sending uprEvents to ch, which may block
feed.muVbstreams.RUnlock()
loop:
for _, uprEvent := range uprEvents {
select {
case ch <- uprEvent:
case <-feed.closer:
logging.Infof("Feed has been closed. Aborting doStreamClose.")
break loop
}
}
}
func (feed *UprFeed) runFeed(ch chan *UprEvent) {
defer close(ch)
var headerBuf [gomemcached.HDR_LEN]byte
var pkt gomemcached.MCRequest
var event *UprEvent
mc := feed.conn.Hijack()
uprStats := &feed.stats
loop:
for {
select {
case <-feed.closer:
logging.Infof("Feed has been closed. Exiting.")
break loop
default:
sendAck := false
bytes, err := pkt.Receive(mc, headerBuf[:])
if err != nil {
logging.Errorf("Error in receive %s", err.Error())
feed.Error = err
// send all the stream close messages to the client
feed.doStreamClose(ch)
break loop
} else {
event = nil
res := &gomemcached.MCResponse{
Opcode: pkt.Opcode,
Cas: pkt.Cas,
Opaque: pkt.Opaque,
Status: gomemcached.Status(pkt.VBucket),
Extras: pkt.Extras,
Key: pkt.Key,
Body: pkt.Body,
}
vb := vbOpaque(pkt.Opaque)
uprStats.TotalBytes = uint64(bytes)
feed.muVbstreams.RLock()
stream := feed.vbstreams[vb]
feed.muVbstreams.RUnlock()
switch pkt.Opcode {
case gomemcached.UPR_STREAMREQ:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
status, rb, flog, err := handleStreamRequest(res, headerBuf[:])
if status == gomemcached.ROLLBACK {
event = makeUprEvent(pkt, stream)
event.Status = status
// rollback stream
logging.Infof("UPR_STREAMREQ with rollback %d for vb %d Failed: %v", rb, vb, err)
// delete the stream from the vbmap for the feed
feed.muVbstreams.Lock()
delete(feed.vbstreams, vb)
feed.muVbstreams.Unlock()
} else if status == gomemcached.SUCCESS {
event = makeUprEvent(pkt, stream)
event.Seqno = stream.StartSeq
event.FailoverLog = flog
event.Status = status
stream.connected = true
logging.Infof("UPR_STREAMREQ for vb %d successful", vb)
} else if err != nil {
logging.Errorf("UPR_STREAMREQ for vbucket %d erro %s", vb, err.Error())
event = &UprEvent{
Opcode: gomemcached.UPR_STREAMREQ,
Status: status,
VBucket: vb,
Error: err,
}
// delete the stream
feed.muVbstreams.Lock()
delete(feed.vbstreams, vb)
feed.muVbstreams.Unlock()
}
case gomemcached.UPR_MUTATION,
gomemcached.UPR_DELETION,
gomemcached.UPR_EXPIRATION:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
event = makeUprEvent(pkt, stream)
uprStats.TotalMutation++
sendAck = true
case gomemcached.UPR_STREAMEND:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
//stream has ended
event = makeUprEvent(pkt, stream)
logging.Infof("Stream Ended for vb %d", vb)
sendAck = true
feed.muVbstreams.Lock()
delete(feed.vbstreams, vb)
feed.muVbstreams.Unlock()
case gomemcached.UPR_SNAPSHOT:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
// snapshot marker
event = makeUprEvent(pkt, stream)
uprStats.TotalSnapShot++
sendAck = true
case gomemcached.UPR_FLUSH:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
// special processing for flush ?
event = makeUprEvent(pkt, stream)
case gomemcached.UPR_CLOSESTREAM:
if stream == nil {
logging.Infof("Stream not found for vb %d: %#v", vb, pkt)
break loop
}
event = makeUprEvent(pkt, stream)
event.Opcode = gomemcached.UPR_STREAMEND // opcode re-write !!
logging.Infof("Stream Closed for vb %d StreamEnd simulated", vb)
sendAck = true
feed.muVbstreams.Lock()
delete(feed.vbstreams, vb)
feed.muVbstreams.Unlock()
case gomemcached.UPR_ADDSTREAM:
logging.Infof("Opcode %v not implemented", pkt.Opcode)
case gomemcached.UPR_CONTROL, gomemcached.UPR_BUFFERACK:
if res.Status != gomemcached.SUCCESS {
logging.Infof("Opcode %v received status %d", pkt.Opcode.String(), res.Status)
}
case gomemcached.UPR_NOOP:
// send a NOOP back
noop := &gomemcached.MCResponse{
Opcode: gomemcached.UPR_NOOP,
Opaque: pkt.Opaque,
}
if err := feed.conn.TransmitResponse(noop); err != nil {
logging.Warnf("failed to transmit command %s. Error %s", noop.Opcode.String(), err.Error())
}
default:
logging.Infof("Recived an unknown response for vbucket %d", vb)
}
}
if event != nil {
select {
case ch <- event:
case <-feed.closer:
logging.Infof("Feed has been closed. Skip sending events. Exiting.")
break loop
}
feed.muVbstreams.RLock()
l := len(feed.vbstreams)
feed.muVbstreams.RUnlock()
if event.Opcode == gomemcached.UPR_CLOSESTREAM && l == 0 {
logging.Infof("No more streams")
}
}
if !feed.ackByClient {
// if client does not ack, use the size of data sent to client to determine if ack to dcp is needed
feed.sendBufferAckIfNeeded(sendAck, uint32(bytes))
}
}
}
// make sure that feed is closed before we signal transmitCl and exit runFeed
feed.Close()
close(feed.transmitCl)
logging.Infof("runFeed exiting")
}
// Client, after setting ackByClient flag to true in NewUprFeedWithConfig() call,
// can call this API to notify gomemcached that the client has completed processing
// of a number of bytes
// This API is not thread safe. Caller should NOT have more than one go rountine calling this API
func (feed *UprFeed) IncrementAckBytes(bytes uint32) error {
if !feed.ackByClient {
return errors.New("Upr feed does not have ackByclient flag set")
}
feed.sendBufferAckIfNeeded(true, bytes)
return nil
}
// send buffer ack if enough ack bytes have been accumulated
func (feed *UprFeed) sendBufferAckIfNeeded(sendAck bool, bytes uint32) {
if sendAck {
totalBytes := feed.toAckBytes + bytes
if totalBytes > feed.maxAckBytes {
feed.toAckBytes = 0
feed.sendBufferAck(totalBytes)
} else {
feed.toAckBytes = totalBytes
}
}
}
// send buffer ack to dcp
func (feed *UprFeed) sendBufferAck(sendSize uint32) {
bufferAck := &gomemcached.MCRequest{
Opcode: gomemcached.UPR_BUFFERACK,
}
bufferAck.Extras = make([]byte, 4)
binary.BigEndian.PutUint32(bufferAck.Extras[:4], uint32(sendSize))
feed.writeToTransmitCh(bufferAck)
feed.stats.TotalBufferAckSent++
}
func (feed *UprFeed) GetUprStats() *UprStats {
return &feed.stats
}
func composeOpaque(vbno, opaqueMSB uint16) uint32 {
return (uint32(opaqueMSB) << 16) | uint32(vbno)
}
func appOpaque(opq32 uint32) uint16 {
return uint16((opq32 & 0xFFFF0000) >> 16)
}
func vbOpaque(opq32 uint32) uint16 {
return uint16(opq32 & 0xFFFF)
}
// Close this UprFeed.
func (feed *UprFeed) Close() {
feed.muClosed.Lock()
defer feed.muClosed.Unlock()
if !feed.closed {
close(feed.closer)
feed.closed = true
}
}
// check if the UprFeed has been closed
func (feed *UprFeed) Closed() bool {
feed.muClosed.RLock()
defer feed.muClosed.RUnlock()
return feed.closed
}

286
vendor/github.com/couchbase/gomemcached/mc_constants.go generated vendored Normal file
View File

@ -0,0 +1,286 @@
// Package gomemcached is binary protocol packet formats and constants.
package gomemcached
import (
"fmt"
)
const (
REQ_MAGIC = 0x80
RES_MAGIC = 0x81
)
// CommandCode for memcached packets.
type CommandCode uint8
const (
GET = CommandCode(0x00)
SET = CommandCode(0x01)
ADD = CommandCode(0x02)
REPLACE = CommandCode(0x03)
DELETE = CommandCode(0x04)
INCREMENT = CommandCode(0x05)
DECREMENT = CommandCode(0x06)
QUIT = CommandCode(0x07)
FLUSH = CommandCode(0x08)
GETQ = CommandCode(0x09)
NOOP = CommandCode(0x0a)
VERSION = CommandCode(0x0b)
GETK = CommandCode(0x0c)
GETKQ = CommandCode(0x0d)
APPEND = CommandCode(0x0e)
PREPEND = CommandCode(0x0f)
STAT = CommandCode(0x10)
SETQ = CommandCode(0x11)
ADDQ = CommandCode(0x12)
REPLACEQ = CommandCode(0x13)
DELETEQ = CommandCode(0x14)
INCREMENTQ = CommandCode(0x15)
DECREMENTQ = CommandCode(0x16)
QUITQ = CommandCode(0x17)
FLUSHQ = CommandCode(0x18)
APPENDQ = CommandCode(0x19)
PREPENDQ = CommandCode(0x1a)
GAT = CommandCode(0x1d)
HELLO = CommandCode(0x1f)
RGET = CommandCode(0x30)
RSET = CommandCode(0x31)
RSETQ = CommandCode(0x32)
RAPPEND = CommandCode(0x33)
RAPPENDQ = CommandCode(0x34)
RPREPEND = CommandCode(0x35)
RPREPENDQ = CommandCode(0x36)
RDELETE = CommandCode(0x37)
RDELETEQ = CommandCode(0x38)
RINCR = CommandCode(0x39)
RINCRQ = CommandCode(0x3a)
RDECR = CommandCode(0x3b)
RDECRQ = CommandCode(0x3c)
SASL_LIST_MECHS = CommandCode(0x20)
SASL_AUTH = CommandCode(0x21)
SASL_STEP = CommandCode(0x22)
TAP_CONNECT = CommandCode(0x40) // Client-sent request to initiate Tap feed
TAP_MUTATION = CommandCode(0x41) // Notification of a SET/ADD/REPLACE/etc. on the server
TAP_DELETE = CommandCode(0x42) // Notification of a DELETE on the server
TAP_FLUSH = CommandCode(0x43) // Replicates a flush_all command
TAP_OPAQUE = CommandCode(0x44) // Opaque control data from the engine
TAP_VBUCKET_SET = CommandCode(0x45) // Sets state of vbucket in receiver (used in takeover)
TAP_CHECKPOINT_START = CommandCode(0x46) // Notifies start of new checkpoint
TAP_CHECKPOINT_END = CommandCode(0x47) // Notifies end of checkpoint
UPR_OPEN = CommandCode(0x50) // Open a UPR connection with a name
UPR_ADDSTREAM = CommandCode(0x51) // Sent by ebucketMigrator to UPR Consumer
UPR_CLOSESTREAM = CommandCode(0x52) // Sent by eBucketMigrator to UPR Consumer
UPR_FAILOVERLOG = CommandCode(0x54) // Request failover logs
UPR_STREAMREQ = CommandCode(0x53) // Stream request from consumer to producer
UPR_STREAMEND = CommandCode(0x55) // Sent by producer when it has no more messages to stream
UPR_SNAPSHOT = CommandCode(0x56) // Start of a new snapshot
UPR_MUTATION = CommandCode(0x57) // Key mutation
UPR_DELETION = CommandCode(0x58) // Key deletion
UPR_EXPIRATION = CommandCode(0x59) // Key expiration
UPR_FLUSH = CommandCode(0x5a) // Delete all the data for a vbucket
UPR_NOOP = CommandCode(0x5c) // UPR NOOP
UPR_BUFFERACK = CommandCode(0x5d) // UPR Buffer Acknowledgement
UPR_CONTROL = CommandCode(0x5e) // Set flow control params
SELECT_BUCKET = CommandCode(0x89) // Select bucket
OBSERVE_SEQNO = CommandCode(0x91) // Sequence Number based Observe
OBSERVE = CommandCode(0x92)
GET_META = CommandCode(0xA0) // Get meta. returns with expiry, flags, cas etc
)
// Status field for memcached response.
type Status uint16
const (
SUCCESS = Status(0x00)
KEY_ENOENT = Status(0x01)
KEY_EEXISTS = Status(0x02)
E2BIG = Status(0x03)
EINVAL = Status(0x04)
NOT_STORED = Status(0x05)
DELTA_BADVAL = Status(0x06)
NOT_MY_VBUCKET = Status(0x07)
NO_BUCKET = Status(0x08)
AUTH_STALE = Status(0x1f)
AUTH_ERROR = Status(0x20)
AUTH_CONTINUE = Status(0x21)
ERANGE = Status(0x22)
ROLLBACK = Status(0x23)
EACCESS = Status(0x24)
NOT_INITIALIZED = Status(0x25)
UNKNOWN_COMMAND = Status(0x81)
ENOMEM = Status(0x82)
NOT_SUPPORTED = Status(0x83)
EINTERNAL = Status(0x84)
EBUSY = Status(0x85)
TMPFAIL = Status(0x86)
)
// the producer/consumer bit in dcp flags
var DCP_PRODUCER uint32 = 0x01
// the include XATTRS bit in dcp flags
var DCP_OPEN_INCLUDE_XATTRS uint32 = 0x04
// MCItem is an internal representation of an item.
type MCItem struct {
Cas uint64
Flags, Expiration uint32
Data []byte
}
// Number of bytes in a binary protocol header.
const HDR_LEN = 24
// Mapping of CommandCode -> name of command (not exhaustive)
var CommandNames map[CommandCode]string
// StatusNames human readable names for memcached response.
var StatusNames map[Status]string
func init() {
CommandNames = make(map[CommandCode]string)
CommandNames[GET] = "GET"
CommandNames[SET] = "SET"
CommandNames[ADD] = "ADD"
CommandNames[REPLACE] = "REPLACE"
CommandNames[DELETE] = "DELETE"
CommandNames[INCREMENT] = "INCREMENT"
CommandNames[DECREMENT] = "DECREMENT"
CommandNames[QUIT] = "QUIT"
CommandNames[FLUSH] = "FLUSH"
CommandNames[GETQ] = "GETQ"
CommandNames[NOOP] = "NOOP"
CommandNames[VERSION] = "VERSION"
CommandNames[GETK] = "GETK"
CommandNames[GETKQ] = "GETKQ"
CommandNames[APPEND] = "APPEND"
CommandNames[PREPEND] = "PREPEND"
CommandNames[STAT] = "STAT"
CommandNames[SETQ] = "SETQ"
CommandNames[ADDQ] = "ADDQ"
CommandNames[REPLACEQ] = "REPLACEQ"
CommandNames[DELETEQ] = "DELETEQ"
CommandNames[INCREMENTQ] = "INCREMENTQ"
CommandNames[DECREMENTQ] = "DECREMENTQ"
CommandNames[QUITQ] = "QUITQ"
CommandNames[FLUSHQ] = "FLUSHQ"
CommandNames[APPENDQ] = "APPENDQ"
CommandNames[PREPENDQ] = "PREPENDQ"
CommandNames[RGET] = "RGET"
CommandNames[RSET] = "RSET"
CommandNames[RSETQ] = "RSETQ"
CommandNames[RAPPEND] = "RAPPEND"
CommandNames[RAPPENDQ] = "RAPPENDQ"
CommandNames[RPREPEND] = "RPREPEND"
CommandNames[RPREPENDQ] = "RPREPENDQ"
CommandNames[RDELETE] = "RDELETE"
CommandNames[RDELETEQ] = "RDELETEQ"
CommandNames[RINCR] = "RINCR"
CommandNames[RINCRQ] = "RINCRQ"
CommandNames[RDECR] = "RDECR"
CommandNames[RDECRQ] = "RDECRQ"
CommandNames[SASL_LIST_MECHS] = "SASL_LIST_MECHS"
CommandNames[SASL_AUTH] = "SASL_AUTH"
CommandNames[SASL_STEP] = "SASL_STEP"
CommandNames[TAP_CONNECT] = "TAP_CONNECT"
CommandNames[TAP_MUTATION] = "TAP_MUTATION"
CommandNames[TAP_DELETE] = "TAP_DELETE"
CommandNames[TAP_FLUSH] = "TAP_FLUSH"
CommandNames[TAP_OPAQUE] = "TAP_OPAQUE"
CommandNames[TAP_VBUCKET_SET] = "TAP_VBUCKET_SET"
CommandNames[TAP_CHECKPOINT_START] = "TAP_CHECKPOINT_START"
CommandNames[TAP_CHECKPOINT_END] = "TAP_CHECKPOINT_END"
CommandNames[UPR_OPEN] = "UPR_OPEN"
CommandNames[UPR_ADDSTREAM] = "UPR_ADDSTREAM"
CommandNames[UPR_CLOSESTREAM] = "UPR_CLOSESTREAM"
CommandNames[UPR_FAILOVERLOG] = "UPR_FAILOVERLOG"
CommandNames[UPR_STREAMREQ] = "UPR_STREAMREQ"
CommandNames[UPR_STREAMEND] = "UPR_STREAMEND"
CommandNames[UPR_SNAPSHOT] = "UPR_SNAPSHOT"
CommandNames[UPR_MUTATION] = "UPR_MUTATION"
CommandNames[UPR_DELETION] = "UPR_DELETION"
CommandNames[UPR_EXPIRATION] = "UPR_EXPIRATION"
CommandNames[UPR_FLUSH] = "UPR_FLUSH"
CommandNames[UPR_NOOP] = "UPR_NOOP"
CommandNames[UPR_BUFFERACK] = "UPR_BUFFERACK"
CommandNames[UPR_CONTROL] = "UPR_CONTROL"
StatusNames = make(map[Status]string)
StatusNames[SUCCESS] = "SUCCESS"
StatusNames[KEY_ENOENT] = "KEY_ENOENT"
StatusNames[KEY_EEXISTS] = "KEY_EEXISTS"
StatusNames[E2BIG] = "E2BIG"
StatusNames[EINVAL] = "EINVAL"
StatusNames[NOT_STORED] = "NOT_STORED"
StatusNames[DELTA_BADVAL] = "DELTA_BADVAL"
StatusNames[NOT_MY_VBUCKET] = "NOT_MY_VBUCKET"
StatusNames[NO_BUCKET] = "NO_BUCKET"
StatusNames[AUTH_STALE] = "AUTH_STALE"
StatusNames[AUTH_ERROR] = "AUTH_ERROR"
StatusNames[AUTH_CONTINUE] = "AUTH_CONTINUE"
StatusNames[ERANGE] = "ERANGE"
StatusNames[ROLLBACK] = "ROLLBACK"
StatusNames[EACCESS] = "EACCESS"
StatusNames[NOT_INITIALIZED] = "NOT_INITIALIZED"
StatusNames[UNKNOWN_COMMAND] = "UNKNOWN_COMMAND"
StatusNames[ENOMEM] = "ENOMEM"
StatusNames[NOT_SUPPORTED] = "NOT_SUPPORTED"
StatusNames[EINTERNAL] = "EINTERNAL"
StatusNames[EBUSY] = "EBUSY"
StatusNames[TMPFAIL] = "TMPFAIL"
}
// String an op code.
func (o CommandCode) String() (rv string) {
rv = CommandNames[o]
if rv == "" {
rv = fmt.Sprintf("0x%02x", int(o))
}
return rv
}
// String an op code.
func (s Status) String() (rv string) {
rv = StatusNames[s]
if rv == "" {
rv = fmt.Sprintf("0x%02x", int(s))
}
return rv
}
// IsQuiet will return true if a command is a "quiet" command.
func (o CommandCode) IsQuiet() bool {
switch o {
case GETQ,
GETKQ,
SETQ,
ADDQ,
REPLACEQ,
DELETEQ,
INCREMENTQ,
DECREMENTQ,
QUITQ,
FLUSHQ,
APPENDQ,
PREPENDQ,
RSETQ,
RAPPENDQ,
RPREPENDQ,
RDELETEQ,
RINCRQ,
RDECRQ:
return true
}
return false
}

197
vendor/github.com/couchbase/gomemcached/mc_req.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
package gomemcached
import (
"encoding/binary"
"fmt"
"io"
)
// The maximum reasonable body length to expect.
// Anything larger than this will result in an error.
// The current limit, 20MB, is the size limit supported by ep-engine.
var MaxBodyLen = int(20 * 1024 * 1024)
// MCRequest is memcached Request
type MCRequest struct {
// The command being issued
Opcode CommandCode
// The CAS (if applicable, or 0)
Cas uint64
// An opaque value to be returned with this request
Opaque uint32
// The vbucket to which this command belongs
VBucket uint16
// Command extras, key, and body
Extras, Key, Body, ExtMeta []byte
// Datatype identifier
DataType uint8
}
// Size gives the number of bytes this request requires.
func (req *MCRequest) Size() int {
return HDR_LEN + len(req.Extras) + len(req.Key) + len(req.Body) + len(req.ExtMeta)
}
// A debugging string representation of this request
func (req MCRequest) String() string {
return fmt.Sprintf("{MCRequest opcode=%s, bodylen=%d, key='%s'}",
req.Opcode, len(req.Body), req.Key)
}
func (req *MCRequest) fillHeaderBytes(data []byte) int {
pos := 0
data[pos] = REQ_MAGIC
pos++
data[pos] = byte(req.Opcode)
pos++
binary.BigEndian.PutUint16(data[pos:pos+2],
uint16(len(req.Key)))
pos += 2
// 4
data[pos] = byte(len(req.Extras))
pos++
// Data type
if req.DataType != 0 {
data[pos] = byte(req.DataType)
}
pos++
binary.BigEndian.PutUint16(data[pos:pos+2], req.VBucket)
pos += 2
// 8
binary.BigEndian.PutUint32(data[pos:pos+4],
uint32(len(req.Body)+len(req.Key)+len(req.Extras)+len(req.ExtMeta)))
pos += 4
// 12
binary.BigEndian.PutUint32(data[pos:pos+4], req.Opaque)
pos += 4
// 16
if req.Cas != 0 {
binary.BigEndian.PutUint64(data[pos:pos+8], req.Cas)
}
pos += 8
if len(req.Extras) > 0 {
copy(data[pos:pos+len(req.Extras)], req.Extras)
pos += len(req.Extras)
}
if len(req.Key) > 0 {
copy(data[pos:pos+len(req.Key)], req.Key)
pos += len(req.Key)
}
return pos
}
// HeaderBytes will return the wire representation of the request header
// (with the extras and key).
func (req *MCRequest) HeaderBytes() []byte {
data := make([]byte, HDR_LEN+len(req.Extras)+len(req.Key))
req.fillHeaderBytes(data)
return data
}
// Bytes will return the wire representation of this request.
func (req *MCRequest) Bytes() []byte {
data := make([]byte, req.Size())
pos := req.fillHeaderBytes(data)
if len(req.Body) > 0 {
copy(data[pos:pos+len(req.Body)], req.Body)
}
if len(req.ExtMeta) > 0 {
copy(data[pos+len(req.Body):pos+len(req.Body)+len(req.ExtMeta)], req.ExtMeta)
}
return data
}
// Transmit will send this request message across a writer.
func (req *MCRequest) Transmit(w io.Writer) (n int, err error) {
if len(req.Body) < 128 {
n, err = w.Write(req.Bytes())
} else {
n, err = w.Write(req.HeaderBytes())
if err == nil {
m := 0
m, err = w.Write(req.Body)
n += m
}
}
return
}
// Receive will fill this MCRequest with the data from a reader.
func (req *MCRequest) Receive(r io.Reader, hdrBytes []byte) (int, error) {
if len(hdrBytes) < HDR_LEN {
hdrBytes = []byte{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0}
}
n, err := io.ReadFull(r, hdrBytes)
if err != nil {
return n, err
}
if hdrBytes[0] != RES_MAGIC && hdrBytes[0] != REQ_MAGIC {
return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0])
}
klen := int(binary.BigEndian.Uint16(hdrBytes[2:]))
elen := int(hdrBytes[4])
// Data type at 5
req.DataType = uint8(hdrBytes[5])
req.Opcode = CommandCode(hdrBytes[1])
// Vbucket at 6:7
req.VBucket = binary.BigEndian.Uint16(hdrBytes[6:])
totalBodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:]))
req.Opaque = binary.BigEndian.Uint32(hdrBytes[12:])
req.Cas = binary.BigEndian.Uint64(hdrBytes[16:])
if totalBodyLen > 0 {
buf := make([]byte, totalBodyLen)
m, err := io.ReadFull(r, buf)
n += m
if err == nil {
if req.Opcode >= TAP_MUTATION &&
req.Opcode <= TAP_CHECKPOINT_END &&
len(buf) > 1 {
// In these commands there is "engine private"
// data at the end of the extras. The first 2
// bytes of extra data give its length.
elen += int(binary.BigEndian.Uint16(buf))
}
req.Extras = buf[0:elen]
req.Key = buf[elen : klen+elen]
// get the length of extended metadata
extMetaLen := 0
if elen > 29 {
extMetaLen = int(binary.BigEndian.Uint16(req.Extras[28:30]))
}
bodyLen := totalBodyLen - klen - elen - extMetaLen
if bodyLen > MaxBodyLen {
return n, fmt.Errorf("%d is too big (max %d)",
bodyLen, MaxBodyLen)
}
req.Body = buf[klen+elen : klen+elen+bodyLen]
req.ExtMeta = buf[klen+elen+bodyLen:]
}
}
return n, err
}

267
vendor/github.com/couchbase/gomemcached/mc_res.go generated vendored Normal file
View File

@ -0,0 +1,267 @@
package gomemcached
import (
"encoding/binary"
"fmt"
"io"
"sync"
)
// MCResponse is memcached response
type MCResponse struct {
// The command opcode of the command that sent the request
Opcode CommandCode
// The status of the response
Status Status
// The opaque sent in the request
Opaque uint32
// The CAS identifier (if applicable)
Cas uint64
// Extras, key, and body for this response
Extras, Key, Body []byte
// If true, this represents a fatal condition and we should hang up
Fatal bool
// Datatype identifier
DataType uint8
}
// A debugging string representation of this response
func (res MCResponse) String() string {
return fmt.Sprintf("{MCResponse status=%v keylen=%d, extralen=%d, bodylen=%d}",
res.Status, len(res.Key), len(res.Extras), len(res.Body))
}
// Response as an error.
func (res *MCResponse) Error() string {
return fmt.Sprintf("MCResponse status=%v, opcode=%v, opaque=%v, msg: %s",
res.Status, res.Opcode, res.Opaque, string(res.Body))
}
func errStatus(e error) Status {
status := Status(0xffff)
if res, ok := e.(*MCResponse); ok {
status = res.Status
}
return status
}
// IsNotFound is true if this error represents a "not found" response.
func IsNotFound(e error) bool {
return errStatus(e) == KEY_ENOENT
}
// IsFatal is false if this error isn't believed to be fatal to a connection.
func IsFatal(e error) bool {
if e == nil {
return false
}
switch errStatus(e) {
case KEY_ENOENT, KEY_EEXISTS, NOT_STORED, TMPFAIL:
return false
}
return true
}
// Size is number of bytes this response consumes on the wire.
func (res *MCResponse) Size() int {
return HDR_LEN + len(res.Extras) + len(res.Key) + len(res.Body)
}
func (res *MCResponse) fillHeaderBytes(data []byte) int {
pos := 0
data[pos] = RES_MAGIC
pos++
data[pos] = byte(res.Opcode)
pos++
binary.BigEndian.PutUint16(data[pos:pos+2],
uint16(len(res.Key)))
pos += 2
// 4
data[pos] = byte(len(res.Extras))
pos++
// Data type
if res.DataType != 0 {
data[pos] = byte(res.DataType)
} else {
data[pos] = 0
}
pos++
binary.BigEndian.PutUint16(data[pos:pos+2], uint16(res.Status))
pos += 2
// 8
binary.BigEndian.PutUint32(data[pos:pos+4],
uint32(len(res.Body)+len(res.Key)+len(res.Extras)))
pos += 4
// 12
binary.BigEndian.PutUint32(data[pos:pos+4], res.Opaque)
pos += 4
// 16
binary.BigEndian.PutUint64(data[pos:pos+8], res.Cas)
pos += 8
if len(res.Extras) > 0 {
copy(data[pos:pos+len(res.Extras)], res.Extras)
pos += len(res.Extras)
}
if len(res.Key) > 0 {
copy(data[pos:pos+len(res.Key)], res.Key)
pos += len(res.Key)
}
return pos
}
// HeaderBytes will get just the header bytes for this response.
func (res *MCResponse) HeaderBytes() []byte {
data := make([]byte, HDR_LEN+len(res.Extras)+len(res.Key))
res.fillHeaderBytes(data)
return data
}
// Bytes will return the actual bytes transmitted for this response.
func (res *MCResponse) Bytes() []byte {
data := make([]byte, res.Size())
pos := res.fillHeaderBytes(data)
copy(data[pos:pos+len(res.Body)], res.Body)
return data
}
// Transmit will send this response message across a writer.
func (res *MCResponse) Transmit(w io.Writer) (n int, err error) {
if len(res.Body) < 128 {
n, err = w.Write(res.Bytes())
} else {
n, err = w.Write(res.HeaderBytes())
if err == nil {
m := 0
m, err = w.Write(res.Body)
m += n
}
}
return
}
// Receive will fill this MCResponse with the data from this reader.
func (res *MCResponse) Receive(r io.Reader, hdrBytes []byte) (n int, err error) {
if len(hdrBytes) < HDR_LEN {
hdrBytes = []byte{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0}
}
n, err = io.ReadFull(r, hdrBytes)
if err != nil {
return n, err
}
if hdrBytes[0] != RES_MAGIC && hdrBytes[0] != REQ_MAGIC {
return n, fmt.Errorf("bad magic: 0x%02x", hdrBytes[0])
}
klen := int(binary.BigEndian.Uint16(hdrBytes[2:4]))
elen := int(hdrBytes[4])
res.Opcode = CommandCode(hdrBytes[1])
res.DataType = uint8(hdrBytes[5])
res.Status = Status(binary.BigEndian.Uint16(hdrBytes[6:8]))
res.Opaque = binary.BigEndian.Uint32(hdrBytes[12:16])
res.Cas = binary.BigEndian.Uint64(hdrBytes[16:24])
bodyLen := int(binary.BigEndian.Uint32(hdrBytes[8:12])) - (klen + elen)
//defer function to debug the panic seen with MB-15557
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf(`Panic in Receive. Response %v \n
key len %v extra len %v bodylen %v`, res, klen, elen, bodyLen)
}
}()
buf := make([]byte, klen+elen+bodyLen)
m, err := io.ReadFull(r, buf)
if err == nil {
res.Extras = buf[0:elen]
res.Key = buf[elen : klen+elen]
res.Body = buf[klen+elen:]
}
return n + m, err
}
type MCResponsePool struct {
pool *sync.Pool
}
func NewMCResponsePool() *MCResponsePool {
rv := &MCResponsePool{
pool: &sync.Pool{
New: func() interface{} {
return &MCResponse{}
},
},
}
return rv
}
func (this *MCResponsePool) Get() *MCResponse {
return this.pool.Get().(*MCResponse)
}
func (this *MCResponsePool) Put(r *MCResponse) {
if r == nil {
return
}
r.Extras = nil
r.Key = nil
r.Body = nil
r.Fatal = false
this.pool.Put(r)
}
type StringMCResponsePool struct {
pool *sync.Pool
size int
}
func NewStringMCResponsePool(size int) *StringMCResponsePool {
rv := &StringMCResponsePool{
pool: &sync.Pool{
New: func() interface{} {
return make(map[string]*MCResponse, size)
},
},
size: size,
}
return rv
}
func (this *StringMCResponsePool) Get() map[string]*MCResponse {
return this.pool.Get().(map[string]*MCResponse)
}
func (this *StringMCResponsePool) Put(m map[string]*MCResponse) {
if m == nil || len(m) > 2*this.size {
return
}
for k := range m {
m[k] = nil
delete(m, k)
}
this.pool.Put(m)
}

168
vendor/github.com/couchbase/gomemcached/tap.go generated vendored Normal file
View File

@ -0,0 +1,168 @@
package gomemcached
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"strings"
)
type TapConnectFlag uint32
// Tap connect option flags
const (
BACKFILL = TapConnectFlag(0x01)
DUMP = TapConnectFlag(0x02)
LIST_VBUCKETS = TapConnectFlag(0x04)
TAKEOVER_VBUCKETS = TapConnectFlag(0x08)
SUPPORT_ACK = TapConnectFlag(0x10)
REQUEST_KEYS_ONLY = TapConnectFlag(0x20)
CHECKPOINT = TapConnectFlag(0x40)
REGISTERED_CLIENT = TapConnectFlag(0x80)
FIX_FLAG_BYTEORDER = TapConnectFlag(0x100)
)
// Tap opaque event subtypes
const (
TAP_OPAQUE_ENABLE_AUTO_NACK = 0
TAP_OPAQUE_INITIAL_VBUCKET_STREAM = 1
TAP_OPAQUE_ENABLE_CHECKPOINT_SYNC = 2
TAP_OPAQUE_CLOSE_TAP_STREAM = 7
TAP_OPAQUE_CLOSE_BACKFILL = 8
)
// Tap item flags
const (
TAP_ACK = 1
TAP_NO_VALUE = 2
TAP_FLAG_NETWORK_BYTE_ORDER = 4
)
// TapConnectFlagNames for TapConnectFlag
var TapConnectFlagNames = map[TapConnectFlag]string{
BACKFILL: "BACKFILL",
DUMP: "DUMP",
LIST_VBUCKETS: "LIST_VBUCKETS",
TAKEOVER_VBUCKETS: "TAKEOVER_VBUCKETS",
SUPPORT_ACK: "SUPPORT_ACK",
REQUEST_KEYS_ONLY: "REQUEST_KEYS_ONLY",
CHECKPOINT: "CHECKPOINT",
REGISTERED_CLIENT: "REGISTERED_CLIENT",
FIX_FLAG_BYTEORDER: "FIX_FLAG_BYTEORDER",
}
// TapItemParser is a function to parse a single tap extra.
type TapItemParser func(io.Reader) (interface{}, error)
// TapParseUint64 is a function to parse a single tap uint64.
func TapParseUint64(r io.Reader) (interface{}, error) {
var rv uint64
err := binary.Read(r, binary.BigEndian, &rv)
return rv, err
}
// TapParseUint16 is a function to parse a single tap uint16.
func TapParseUint16(r io.Reader) (interface{}, error) {
var rv uint16
err := binary.Read(r, binary.BigEndian, &rv)
return rv, err
}
// TapParseBool is a function to parse a single tap boolean.
func TapParseBool(r io.Reader) (interface{}, error) {
return true, nil
}
// TapParseVBList parses a list of vBucket numbers as []uint16.
func TapParseVBList(r io.Reader) (interface{}, error) {
num, err := TapParseUint16(r)
if err != nil {
return nil, err
}
n := int(num.(uint16))
rv := make([]uint16, n)
for i := 0; i < n; i++ {
x, err := TapParseUint16(r)
if err != nil {
return nil, err
}
rv[i] = x.(uint16)
}
return rv, err
}
// TapFlagParsers parser functions for TAP fields.
var TapFlagParsers = map[TapConnectFlag]TapItemParser{
BACKFILL: TapParseUint64,
LIST_VBUCKETS: TapParseVBList,
}
// SplitFlags will split the ORed flags into the individual bit flags.
func (f TapConnectFlag) SplitFlags() []TapConnectFlag {
rv := []TapConnectFlag{}
for i := uint32(1); f != 0; i = i << 1 {
if uint32(f)&i == i {
rv = append(rv, TapConnectFlag(i))
}
f = TapConnectFlag(uint32(f) & (^i))
}
return rv
}
func (f TapConnectFlag) String() string {
parts := []string{}
for _, x := range f.SplitFlags() {
p := TapConnectFlagNames[x]
if p == "" {
p = fmt.Sprintf("0x%x", int(x))
}
parts = append(parts, p)
}
return strings.Join(parts, "|")
}
type TapConnect struct {
Flags map[TapConnectFlag]interface{}
RemainingBody []byte
Name string
}
// ParseTapCommands parse the tap request into the interesting bits we may
// need to do something with.
func (req *MCRequest) ParseTapCommands() (TapConnect, error) {
rv := TapConnect{
Flags: map[TapConnectFlag]interface{}{},
Name: string(req.Key),
}
if len(req.Extras) < 4 {
return rv, fmt.Errorf("not enough extra bytes: %x", req.Extras)
}
flags := TapConnectFlag(binary.BigEndian.Uint32(req.Extras))
r := bytes.NewReader(req.Body)
for _, f := range flags.SplitFlags() {
fun := TapFlagParsers[f]
if fun == nil {
fun = TapParseBool
}
val, err := fun(r)
if err != nil {
return rv, err
}
rv.Flags[f] = val
}
var err error
rv.RemainingBody, err = ioutil.ReadAll(r)
return rv, err
}

47
vendor/github.com/couchbase/goutils/LICENSE.md generated vendored Normal file
View File

@ -0,0 +1,47 @@
COUCHBASE INC. COMMUNITY EDITION LICENSE AGREEMENT
IMPORTANT-READ CAREFULLY: BY CLICKING THE "I ACCEPT" BOX OR INSTALLING,
DOWNLOADING OR OTHERWISE USING THIS SOFTWARE AND ANY ASSOCIATED
DOCUMENTATION, YOU, ON BEHALF OF YOURSELF OR AS AN AUTHORIZED
REPRESENTATIVE ON BEHALF OF AN ENTITY ("LICENSEE") AGREE TO ALL THE
TERMS OF THIS COMMUNITY EDITION LICENSE AGREEMENT (THE "AGREEMENT")
REGARDING YOUR USE OF THE SOFTWARE. YOU REPRESENT AND WARRANT THAT YOU
HAVE FULL LEGAL AUTHORITY TO BIND THE LICENSEE TO THIS AGREEMENT. IF YOU
DO NOT AGREE WITH ALL OF THESE TERMS, DO NOT SELECT THE "I ACCEPT" BOX
AND DO NOT INSTALL, DOWNLOAD OR OTHERWISE USE THE SOFTWARE. THE
EFFECTIVE DATE OF THIS AGREEMENT IS THE DATE ON WHICH YOU CLICK "I
ACCEPT" OR OTHERWISE INSTALL, DOWNLOAD OR USE THE SOFTWARE.
1. License Grant. Couchbase Inc. hereby grants Licensee, free of charge,
the non-exclusive right to use, copy, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to
whom the Software is furnished to do so, subject to Licensee including
the following copyright notice in all copies or substantial portions of
the Software:
Couchbase (r) http://www.Couchbase.com Copyright 2016 Couchbase, Inc.
As used in this Agreement, "Software" means the object code version of
the applicable elastic data management server software provided by
Couchbase Inc.
2. Restrictions. Licensee will not reverse engineer, disassemble, or
decompile the Software (except to the extent such restrictions are
prohibited by law).
3. Support. Couchbase, Inc. will provide Licensee with access to, and
use of, the Couchbase, Inc. support forum available at the following
URL: http://www.couchbase.org/forums/. Couchbase, Inc. may, at its
discretion, modify, suspend or terminate support at any time upon notice
to Licensee.
4. Warranty Disclaimer and Limitation of Liability. THE SOFTWARE IS
PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
COUCHBASE INC. OR THE AUTHORS OR COPYRIGHT HOLDERS IN THE SOFTWARE BE
LIABLE FOR ANY CLAIM, DAMAGES (IINCLUDING, WITHOUT LIMITATION, DIRECT,
INDIRECT OR CONSEQUENTIAL DAMAGES) OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

481
vendor/github.com/couchbase/goutils/logging/logger.go generated vendored Normal file
View File

@ -0,0 +1,481 @@
// Copyright (c) 2016 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package logging
import (
"os"
"runtime"
"strings"
"sync"
)
type Level int
const (
NONE = Level(iota) // Disable all logging
FATAL // System is in severe error state and has to abort
SEVERE // System is in severe error state and cannot recover reliably
ERROR // System is in error state but can recover and continue reliably
WARN // System approaching error state, or is in a correct but undesirable state
INFO // System-level events and status, in correct states
REQUEST // Request-level events, with request-specific rlevel
TRACE // Trace detailed system execution, e.g. function entry / exit
DEBUG // Debug
)
type LogEntryFormatter int
const (
TEXTFORMATTER = LogEntryFormatter(iota)
JSONFORMATTER
KVFORMATTER
)
func (level Level) String() string {
return _LEVEL_NAMES[level]
}
var _LEVEL_NAMES = []string{
DEBUG: "DEBUG",
TRACE: "TRACE",
REQUEST: "REQUEST",
INFO: "INFO",
WARN: "WARN",
ERROR: "ERROR",
SEVERE: "SEVERE",
FATAL: "FATAL",
NONE: "NONE",
}
var _LEVEL_MAP = map[string]Level{
"debug": DEBUG,
"trace": TRACE,
"request": REQUEST,
"info": INFO,
"warn": WARN,
"error": ERROR,
"severe": SEVERE,
"fatal": FATAL,
"none": NONE,
}
func ParseLevel(name string) (level Level, ok bool) {
level, ok = _LEVEL_MAP[strings.ToLower(name)]
return
}
/*
Pair supports logging of key-value pairs. Keys beginning with _ are
reserved for the logger, e.g. _time, _level, _msg, and _rlevel. The
Pair APIs are designed to avoid heap allocation and garbage
collection.
*/
type Pairs []Pair
type Pair struct {
Name string
Value interface{}
}
/*
Map allows key-value pairs to be specified using map literals or data
structures. For example:
Errorm(msg, Map{...})
Map incurs heap allocation and garbage collection, so the Pair APIs
should be preferred.
*/
type Map map[string]interface{}
// Logger provides a common interface for logging libraries
type Logger interface {
/*
These APIs write all the given pairs in addition to standard logger keys.
*/
Logp(level Level, msg string, kv ...Pair)
Debugp(msg string, kv ...Pair)
Tracep(msg string, kv ...Pair)
Requestp(rlevel Level, msg string, kv ...Pair)
Infop(msg string, kv ...Pair)
Warnp(msg string, kv ...Pair)
Errorp(msg string, kv ...Pair)
Severep(msg string, kv ...Pair)
Fatalp(msg string, kv ...Pair)
/*
These APIs write the fields in the given kv Map in addition to standard logger keys.
*/
Logm(level Level, msg string, kv Map)
Debugm(msg string, kv Map)
Tracem(msg string, kv Map)
Requestm(rlevel Level, msg string, kv Map)
Infom(msg string, kv Map)
Warnm(msg string, kv Map)
Errorm(msg string, kv Map)
Severem(msg string, kv Map)
Fatalm(msg string, kv Map)
/*
These APIs only write _msg, _time, _level, and other logger keys. If
the msg contains other fields, use the Pair or Map APIs instead.
*/
Logf(level Level, fmt string, args ...interface{})
Debugf(fmt string, args ...interface{})
Tracef(fmt string, args ...interface{})
Requestf(rlevel Level, fmt string, args ...interface{})
Infof(fmt string, args ...interface{})
Warnf(fmt string, args ...interface{})
Errorf(fmt string, args ...interface{})
Severef(fmt string, args ...interface{})
Fatalf(fmt string, args ...interface{})
/*
These APIs control the logging level
*/
SetLevel(Level) // Set the logging level
Level() Level // Get the current logging level
}
var logger Logger = nil
var curLevel Level = DEBUG // initially set to never skip
var loggerMutex sync.RWMutex
// All the methods below first acquire the mutex (mostly in exclusive mode)
// and only then check if logging at the current level is enabled.
// This introduces a fair bottleneck for those log entries that should be
// skipped (the majority, at INFO or below levels)
// We try to predict here if we should lock the mutex at all by caching
// the current log level: while dynamically changing logger, there might
// be the odd entry skipped as the new level is cached.
// Since we seem to never change the logger, this is not an issue.
func skipLogging(level Level) bool {
if logger == nil {
return true
}
return level > curLevel
}
func SetLogger(newLogger Logger) {
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger = newLogger
if logger == nil {
curLevel = NONE
} else {
curLevel = newLogger.Level()
}
}
func Logp(level Level, msg string, kv ...Pair) {
if skipLogging(level) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Logp(level, msg, kv...)
}
func Debugp(msg string, kv ...Pair) {
if skipLogging(DEBUG) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Debugp(msg, kv...)
}
func Tracep(msg string, kv ...Pair) {
if skipLogging(TRACE) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Tracep(msg, kv...)
}
func Requestp(rlevel Level, msg string, kv ...Pair) {
if skipLogging(REQUEST) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Requestp(rlevel, msg, kv...)
}
func Infop(msg string, kv ...Pair) {
if skipLogging(INFO) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Infop(msg, kv...)
}
func Warnp(msg string, kv ...Pair) {
if skipLogging(WARN) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Warnp(msg, kv...)
}
func Errorp(msg string, kv ...Pair) {
if skipLogging(ERROR) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Errorp(msg, kv...)
}
func Severep(msg string, kv ...Pair) {
if skipLogging(SEVERE) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Severep(msg, kv...)
}
func Fatalp(msg string, kv ...Pair) {
if skipLogging(FATAL) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Fatalp(msg, kv...)
}
func Logm(level Level, msg string, kv Map) {
if skipLogging(level) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Logm(level, msg, kv)
}
func Debugm(msg string, kv Map) {
if skipLogging(DEBUG) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Debugm(msg, kv)
}
func Tracem(msg string, kv Map) {
if skipLogging(TRACE) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Tracem(msg, kv)
}
func Requestm(rlevel Level, msg string, kv Map) {
if skipLogging(REQUEST) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Requestm(rlevel, msg, kv)
}
func Infom(msg string, kv Map) {
if skipLogging(INFO) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Infom(msg, kv)
}
func Warnm(msg string, kv Map) {
if skipLogging(WARN) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Warnm(msg, kv)
}
func Errorm(msg string, kv Map) {
if skipLogging(ERROR) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Errorm(msg, kv)
}
func Severem(msg string, kv Map) {
if skipLogging(SEVERE) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Severem(msg, kv)
}
func Fatalm(msg string, kv Map) {
if skipLogging(FATAL) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Fatalm(msg, kv)
}
func Logf(level Level, fmt string, args ...interface{}) {
if skipLogging(level) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Logf(level, fmt, args...)
}
func Debugf(fmt string, args ...interface{}) {
if skipLogging(DEBUG) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Debugf(fmt, args...)
}
func Tracef(fmt string, args ...interface{}) {
if skipLogging(TRACE) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Tracef(fmt, args...)
}
func Requestf(rlevel Level, fmt string, args ...interface{}) {
if skipLogging(REQUEST) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Requestf(rlevel, fmt, args...)
}
func Infof(fmt string, args ...interface{}) {
if skipLogging(INFO) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Infof(fmt, args...)
}
func Warnf(fmt string, args ...interface{}) {
if skipLogging(WARN) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Warnf(fmt, args...)
}
func Errorf(fmt string, args ...interface{}) {
if skipLogging(ERROR) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Errorf(fmt, args...)
}
func Severef(fmt string, args ...interface{}) {
if skipLogging(SEVERE) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Severef(fmt, args...)
}
func Fatalf(fmt string, args ...interface{}) {
if skipLogging(FATAL) {
return
}
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Fatalf(fmt, args...)
}
func SetLevel(level Level) {
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.SetLevel(level)
curLevel = level
}
func LogLevel() Level {
loggerMutex.RLock()
defer loggerMutex.RUnlock()
return logger.Level()
}
func Stackf(level Level, fmt string, args ...interface{}) {
if skipLogging(level) {
return
}
buf := make([]byte, 1<<16)
n := runtime.Stack(buf, false)
s := string(buf[0:n])
loggerMutex.Lock()
defer loggerMutex.Unlock()
logger.Logf(level, fmt, args...)
logger.Logf(level, s)
}
func init() {
logger = NewLogger(os.Stderr, INFO, TEXTFORMATTER)
SetLogger(logger)
}

View File

@ -0,0 +1,318 @@
// Copyright (c) 2016 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package logging
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"time"
)
type goLogger struct {
logger *log.Logger
level Level
entryFormatter formatter
}
const (
_LEVEL = "_level"
_MSG = "_msg"
_TIME = "_time"
_RLEVEL = "_rlevel"
)
func NewLogger(out io.Writer, lvl Level, fmtLogging LogEntryFormatter) *goLogger {
logger := &goLogger{
logger: log.New(out, "", 0),
level: lvl,
}
if fmtLogging == JSONFORMATTER {
logger.entryFormatter = &jsonFormatter{}
} else if fmtLogging == KVFORMATTER {
logger.entryFormatter = &keyvalueFormatter{}
} else {
logger.entryFormatter = &textFormatter{}
}
return logger
}
func (gl *goLogger) Logp(level Level, msg string, kv ...Pair) {
if gl.logger == nil {
return
}
if level <= gl.level {
e := newLogEntry(msg, level)
copyPairs(e, kv)
gl.log(e)
}
}
func (gl *goLogger) Debugp(msg string, kv ...Pair) {
gl.Logp(DEBUG, msg, kv...)
}
func (gl *goLogger) Tracep(msg string, kv ...Pair) {
gl.Logp(TRACE, msg, kv...)
}
func (gl *goLogger) Requestp(rlevel Level, msg string, kv ...Pair) {
if gl.logger == nil {
return
}
if REQUEST <= gl.level {
e := newLogEntry(msg, REQUEST)
e.Rlevel = rlevel
copyPairs(e, kv)
gl.log(e)
}
}
func (gl *goLogger) Infop(msg string, kv ...Pair) {
gl.Logp(INFO, msg, kv...)
}
func (gl *goLogger) Warnp(msg string, kv ...Pair) {
gl.Logp(WARN, msg, kv...)
}
func (gl *goLogger) Errorp(msg string, kv ...Pair) {
gl.Logp(ERROR, msg, kv...)
}
func (gl *goLogger) Severep(msg string, kv ...Pair) {
gl.Logp(SEVERE, msg, kv...)
}
func (gl *goLogger) Fatalp(msg string, kv ...Pair) {
gl.Logp(FATAL, msg, kv...)
}
func (gl *goLogger) Logm(level Level, msg string, kv Map) {
if gl.logger == nil {
return
}
if level <= gl.level {
e := newLogEntry(msg, level)
e.Data = kv
gl.log(e)
}
}
func (gl *goLogger) Debugm(msg string, kv Map) {
gl.Logm(DEBUG, msg, kv)
}
func (gl *goLogger) Tracem(msg string, kv Map) {
gl.Logm(TRACE, msg, kv)
}
func (gl *goLogger) Requestm(rlevel Level, msg string, kv Map) {
if gl.logger == nil {
return
}
if REQUEST <= gl.level {
e := newLogEntry(msg, REQUEST)
e.Rlevel = rlevel
e.Data = kv
gl.log(e)
}
}
func (gl *goLogger) Infom(msg string, kv Map) {
gl.Logm(INFO, msg, kv)
}
func (gl *goLogger) Warnm(msg string, kv Map) {
gl.Logm(WARN, msg, kv)
}
func (gl *goLogger) Errorm(msg string, kv Map) {
gl.Logm(ERROR, msg, kv)
}
func (gl *goLogger) Severem(msg string, kv Map) {
gl.Logm(SEVERE, msg, kv)
}
func (gl *goLogger) Fatalm(msg string, kv Map) {
gl.Logm(FATAL, msg, kv)
}
func (gl *goLogger) Logf(level Level, format string, args ...interface{}) {
if gl.logger == nil {
return
}
if level <= gl.level {
e := newLogEntry(fmt.Sprintf(format, args...), level)
gl.log(e)
}
}
func (gl *goLogger) Debugf(format string, args ...interface{}) {
gl.Logf(DEBUG, format, args...)
}
func (gl *goLogger) Tracef(format string, args ...interface{}) {
gl.Logf(TRACE, format, args...)
}
func (gl *goLogger) Requestf(rlevel Level, format string, args ...interface{}) {
if gl.logger == nil {
return
}
if REQUEST <= gl.level {
e := newLogEntry(fmt.Sprintf(format, args...), REQUEST)
e.Rlevel = rlevel
gl.log(e)
}
}
func (gl *goLogger) Infof(format string, args ...interface{}) {
gl.Logf(INFO, format, args...)
}
func (gl *goLogger) Warnf(format string, args ...interface{}) {
gl.Logf(WARN, format, args...)
}
func (gl *goLogger) Errorf(format string, args ...interface{}) {
gl.Logf(ERROR, format, args...)
}
func (gl *goLogger) Severef(format string, args ...interface{}) {
gl.Logf(SEVERE, format, args...)
}
func (gl *goLogger) Fatalf(format string, args ...interface{}) {
gl.Logf(FATAL, format, args...)
}
func (gl *goLogger) Level() Level {
return gl.level
}
func (gl *goLogger) SetLevel(level Level) {
gl.level = level
}
func (gl *goLogger) log(newEntry *logEntry) {
s := gl.entryFormatter.format(newEntry)
gl.logger.Print(s)
}
type logEntry struct {
Time string
Level Level
Rlevel Level
Message string
Data Map
}
func newLogEntry(msg string, level Level) *logEntry {
return &logEntry{
Time: time.Now().Format("2006-01-02T15:04:05.000-07:00"), // time.RFC3339 with milliseconds
Level: level,
Rlevel: NONE,
Message: msg,
}
}
func copyPairs(newEntry *logEntry, pairs []Pair) {
newEntry.Data = make(Map, len(pairs))
for _, p := range pairs {
newEntry.Data[p.Name] = p.Value
}
}
type formatter interface {
format(*logEntry) string
}
type textFormatter struct {
}
// ex. 2016-02-10T09:15:25.498-08:00 [INFO] This is a message from test in text format
func (*textFormatter) format(newEntry *logEntry) string {
b := &bytes.Buffer{}
appendValue(b, newEntry.Time)
if newEntry.Rlevel != NONE {
fmt.Fprintf(b, "[%s,%s] ", newEntry.Level.String(), newEntry.Rlevel.String())
} else {
fmt.Fprintf(b, "[%s] ", newEntry.Level.String())
}
appendValue(b, newEntry.Message)
for key, value := range newEntry.Data {
appendKeyValue(b, key, value)
}
b.WriteByte('\n')
s := bytes.NewBuffer(b.Bytes())
return s.String()
}
func appendValue(b *bytes.Buffer, value interface{}) {
if _, ok := value.(string); ok {
fmt.Fprintf(b, "%s ", value)
} else {
fmt.Fprintf(b, "%v ", value)
}
}
type keyvalueFormatter struct {
}
// ex. _time=2016-02-10T09:15:25.498-08:00 _level=INFO _msg=This is a message from test in key-value format
func (*keyvalueFormatter) format(newEntry *logEntry) string {
b := &bytes.Buffer{}
appendKeyValue(b, _TIME, newEntry.Time)
appendKeyValue(b, _LEVEL, newEntry.Level.String())
if newEntry.Rlevel != NONE {
appendKeyValue(b, _RLEVEL, newEntry.Rlevel.String())
}
appendKeyValue(b, _MSG, newEntry.Message)
for key, value := range newEntry.Data {
appendKeyValue(b, key, value)
}
b.WriteByte('\n')
s := bytes.NewBuffer(b.Bytes())
return s.String()
}
func appendKeyValue(b *bytes.Buffer, key, value interface{}) {
if _, ok := value.(string); ok {
fmt.Fprintf(b, "%v=%s ", key, value)
} else {
fmt.Fprintf(b, "%v=%v ", key, value)
}
}
type jsonFormatter struct {
}
// ex. {"_level":"INFO","_msg":"This is a message from test in json format","_time":"2016-02-10T09:12:59.518-08:00"}
func (*jsonFormatter) format(newEntry *logEntry) string {
if newEntry.Data == nil {
newEntry.Data = make(Map, 5)
}
newEntry.Data[_TIME] = newEntry.Time
newEntry.Data[_LEVEL] = newEntry.Level.String()
if newEntry.Rlevel != NONE {
newEntry.Data[_RLEVEL] = newEntry.Rlevel.String()
}
newEntry.Data[_MSG] = newEntry.Message
serialized, _ := json.Marshal(newEntry.Data)
s := bytes.NewBuffer(append(serialized, '\n'))
return s.String()
}