1
0
mirror of https://github.com/astaxie/beego.git synced 2025-07-02 03:30:17 +00:00

add vendor

This commit is contained in:
astaxie
2018-07-30 12:05:51 +08:00
parent d55f54a8ab
commit 48acfa08be
496 changed files with 327583 additions and 0 deletions

139
vendor/github.com/siddontang/ledisdb/ledis/batch.go generated vendored Normal file
View File

@ -0,0 +1,139 @@
package ledis
import (
"sync"
"github.com/siddontang/go/log"
"github.com/siddontang/ledisdb/rpl"
"github.com/siddontang/ledisdb/store"
)
type batch struct {
l *Ledis
*store.WriteBatch
sync.Locker
// tx *Tx
}
func (b *batch) Commit() error {
if b.l.cfg.GetReadonly() {
return ErrWriteInROnly
}
return b.l.handleCommit(b.WriteBatch, b.WriteBatch)
// if b.tx == nil {
// return b.l.handleCommit(b.WriteBatch, b.WriteBatch)
// } else {
// if b.l.r != nil {
// if err := b.tx.data.Append(b.WriteBatch.BatchData()); err != nil {
// return err
// }
// }
// return b.WriteBatch.Commit()
// }
}
func (b *batch) Lock() {
b.Locker.Lock()
}
func (b *batch) Unlock() {
b.WriteBatch.Rollback()
b.Locker.Unlock()
}
func (b *batch) Put(key []byte, value []byte) {
b.WriteBatch.Put(key, value)
}
func (b *batch) Delete(key []byte) {
b.WriteBatch.Delete(key)
}
type dbBatchLocker struct {
l *sync.Mutex
wrLock *sync.RWMutex
}
func (l *dbBatchLocker) Lock() {
l.wrLock.RLock()
l.l.Lock()
}
func (l *dbBatchLocker) Unlock() {
l.l.Unlock()
l.wrLock.RUnlock()
}
// type txBatchLocker struct {
// }
// func (l *txBatchLocker) Lock() {}
// func (l *txBatchLocker) Unlock() {}
// type multiBatchLocker struct {
// }
// func (l *multiBatchLocker) Lock() {}
// func (l *multiBatchLocker) Unlock() {}
func (l *Ledis) newBatch(wb *store.WriteBatch, locker sync.Locker) *batch {
b := new(batch)
b.l = l
b.WriteBatch = wb
b.Locker = locker
return b
}
type commiter interface {
Commit() error
}
type commitDataGetter interface {
Data() []byte
}
func (l *Ledis) handleCommit(g commitDataGetter, c commiter) error {
l.commitLock.Lock()
var err error
if l.r != nil {
var rl *rpl.Log
if rl, err = l.r.Log(g.Data()); err != nil {
l.commitLock.Unlock()
log.Fatalf("write wal error %s", err.Error())
return err
}
l.propagate(rl)
if err = c.Commit(); err != nil {
l.commitLock.Unlock()
log.Fatalf("commit error %s", err.Error())
l.noticeReplication()
return err
}
if err = l.r.UpdateCommitID(rl.ID); err != nil {
l.commitLock.Unlock()
log.Fatalf("update commit id error %s", err.Error())
l.noticeReplication()
return err
}
} else {
err = c.Commit()
}
l.commitLock.Unlock()
return err
}

150
vendor/github.com/siddontang/ledisdb/ledis/const.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package ledis
import (
"errors"
)
// Version is for version
const Version = "0.5"
// DataType is defined for the different types
type DataType byte
// for out use
const (
KV DataType = iota
LIST
HASH
SET
ZSET
)
func (d DataType) String() string {
switch d {
case KV:
return KVName
case LIST:
return ListName
case HASH:
return HashName
case SET:
return SetName
case ZSET:
return ZSetName
default:
return "unknown"
}
}
// For different type name
const (
KVName = "KV"
ListName = "LIST"
HashName = "HASH"
SetName = "SET"
ZSetName = "ZSET"
)
// for backend store
const (
NoneType byte = 0
KVType byte = 1
HashType byte = 2
HSizeType byte = 3
ListType byte = 4
LMetaType byte = 5
ZSetType byte = 6
ZSizeType byte = 7
ZScoreType byte = 8
// BitType byte = 9
// BitMetaType byte = 10
SetType byte = 11
SSizeType byte = 12
maxDataType byte = 100
/*
I make a big mistake about TTL time key format and have to use a new one (change 101 to 103).
You must run the ledis-upgrade-ttl to upgrade db.
*/
ObsoleteExpTimeType byte = 101
ExpMetaType byte = 102
ExpTimeType byte = 103
MetaType byte = 201
)
// TypeName is the map of type -> name
var TypeName = map[byte]string{
KVType: "kv",
HashType: "hash",
HSizeType: "hsize",
ListType: "list",
LMetaType: "lmeta",
ZSetType: "zset",
ZSizeType: "zsize",
ZScoreType: "zscore",
// BitType: "bit",
// BitMetaType: "bitmeta",
SetType: "set",
SSizeType: "ssize",
ExpTimeType: "exptime",
ExpMetaType: "expmeta",
}
const (
defaultScanCount int = 10
)
var (
errKeySize = errors.New("invalid key size")
errValueSize = errors.New("invalid value size")
errHashFieldSize = errors.New("invalid hash field size")
errSetMemberSize = errors.New("invalid set member size")
errZSetMemberSize = errors.New("invalid zset member size")
errExpireValue = errors.New("invalid expire value")
errListIndex = errors.New("invalid list index")
)
// For different const size configuration
const (
// max allowed databases
MaxDatabases int = 10240
// max key size
MaxKeySize int = 1024
// max hash field size
MaxHashFieldSize int = 1024
// max zset member size
MaxZSetMemberSize int = 1024
// max set member size
MaxSetMemberSize int = 1024
// max value size
MaxValueSize int = 1024 * 1024 * 1024
)
// For different common errors
var (
ErrScoreMiss = errors.New("zset score miss")
ErrWriteInROnly = errors.New("write not support in readonly mode")
ErrRplInRDWR = errors.New("replication not support in read write mode")
ErrRplNotSupport = errors.New("replication not support")
)
// const (
// DBAutoCommit uint8 = 0x0
// DBInTransaction uint8 = 0x1
// DBInMulti uint8 = 0x2
// )
// For bit operation
const (
BitAND = "and"
BitOR = "or"
BitXOR = "xor"
BitNot = "not"
)

58
vendor/github.com/siddontang/ledisdb/ledis/doc.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
// Package ledis is a high performance embedded NoSQL.
//
// Ledis supports various data structure like kv, list, hash and zset like redis.
//
// Other features include replication, data with a limited time-to-live.
//
// Usage
//
// First create a ledis instance before use:
//
// l := ledis.Open(cfg)
//
// cfg is a Config instance which contains configuration for ledis use,
// like DataDir (root directory for ledis working to store data).
//
// After you create a ledis instance, you can select a DB to store you data:
//
// db, _ := l.Select(0)
//
// DB must be selected by a index, ledis supports only 16 databases, so the index range is [0-15].
//
// KV
//
// KV is the most basic ledis type like any other key-value database.
//
// err := db.Set(key, value)
// value, err := db.Get(key)
//
// List
//
// List is simply lists of values, sorted by insertion order.
// You can push or pop value on the list head (left) or tail (right).
//
// err := db.LPush(key, value1)
// err := db.RPush(key, value2)
// value1, err := db.LPop(key)
// value2, err := db.RPop(key)
//
// Hash
//
// Hash is a map between fields and values.
//
// n, err := db.HSet(key, field1, value1)
// n, err := db.HSet(key, field2, value2)
// value1, err := db.HGet(key, field1)
// value2, err := db.HGet(key, field2)
//
// ZSet
//
// ZSet is a sorted collections of values.
// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score.
// Members are unique, but score may be same.
//
// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2})
// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
//
//
package ledis

220
vendor/github.com/siddontang/ledisdb/ledis/dump.go generated vendored Normal file
View File

@ -0,0 +1,220 @@
package ledis
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"os"
"github.com/siddontang/go/snappy"
"github.com/siddontang/ledisdb/store"
)
// DumpHead is the head of a dump.
type DumpHead struct {
CommitID uint64
}
// Read reads meta from the Reader.
func (h *DumpHead) Read(r io.Reader) error {
return binary.Read(r, binary.BigEndian, &h.CommitID)
}
// Write writes meta to the Writer
func (h *DumpHead) Write(w io.Writer) error {
return binary.Write(w, binary.BigEndian, h.CommitID)
}
// DumpFile dumps data to the file
func (l *Ledis) DumpFile(path string) error {
f, err := os.Create(path)
if err != nil {
return err
}
defer f.Close()
return l.Dump(f)
}
// Dump dumps data to the Writer.
func (l *Ledis) Dump(w io.Writer) error {
var err error
var commitID uint64
var snap *store.Snapshot
l.wLock.Lock()
if l.r != nil {
if commitID, err = l.r.LastCommitID(); err != nil {
l.wLock.Unlock()
return err
}
}
if snap, err = l.ldb.NewSnapshot(); err != nil {
l.wLock.Unlock()
return err
}
defer snap.Close()
l.wLock.Unlock()
wb := bufio.NewWriterSize(w, 4096)
h := &DumpHead{commitID}
if err = h.Write(wb); err != nil {
return err
}
it := snap.NewIterator()
defer it.Close()
it.SeekToFirst()
compressBuf := make([]byte, 4096)
var key []byte
var value []byte
for ; it.Valid(); it.Next() {
key = it.RawKey()
value = it.RawValue()
if key, err = snappy.Encode(compressBuf, key); err != nil {
return err
}
if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil {
return err
}
if _, err = wb.Write(key); err != nil {
return err
}
if value, err = snappy.Encode(compressBuf, value); err != nil {
return err
}
if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil {
return err
}
if _, err = wb.Write(value); err != nil {
return err
}
}
if err = wb.Flush(); err != nil {
return err
}
compressBuf = nil
return nil
}
// LoadDumpFile clears all data and loads dump file to db
func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return l.LoadDump(f)
}
// LoadDump clears all data and loads dump file to db
func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) {
l.wLock.Lock()
defer l.wLock.Unlock()
var err error
if err = l.flushAll(); err != nil {
return nil, err
}
rb := bufio.NewReaderSize(r, 4096)
h := new(DumpHead)
if err = h.Read(rb); err != nil {
return nil, err
}
var keyLen uint16
var valueLen uint32
var keyBuf bytes.Buffer
var valueBuf bytes.Buffer
deKeyBuf := make([]byte, 4096)
deValueBuf := make([]byte, 4096)
var key, value []byte
wb := l.ldb.NewWriteBatch()
defer wb.Close()
n := 0
for {
if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF {
return nil, err
} else if err == io.EOF {
break
}
if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil {
return nil, err
}
if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil {
return nil, err
}
if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil {
return nil, err
}
if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil {
return nil, err
}
if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil {
return nil, err
}
wb.Put(key, value)
n++
if n%1024 == 0 {
if err = wb.Commit(); err != nil {
return nil, err
}
}
// if err = l.ldb.Put(key, value); err != nil {
// return nil, err
// }
keyBuf.Reset()
valueBuf.Reset()
}
if err = wb.Commit(); err != nil {
return nil, err
}
deKeyBuf = nil
deValueBuf = nil
if l.r != nil {
if err := l.r.UpdateCommitID(h.CommitID); err != nil {
return nil, err
}
}
return h, nil
}

136
vendor/github.com/siddontang/ledisdb/ledis/event.go generated vendored Normal file
View File

@ -0,0 +1,136 @@
package ledis
import (
"errors"
"fmt"
"strconv"
"github.com/siddontang/go/hack"
)
var errInvalidEvent = errors.New("invalid event")
func formatEventKey(buf []byte, k []byte) ([]byte, error) {
if len(k) < 2 {
return nil, errInvalidEvent
}
buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...)
buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...)
db := new(DB)
index, _, err := decodeDBIndex(k)
if err != nil {
return nil, err
}
db.setIndex(index)
//to do format at respective place
switch k[1] {
case KVType:
key, err := db.decodeKVKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case HashType:
key, field, err := db.hDecodeHashKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(field))
case HSizeType:
key, err := db.hDecodeSizeKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ListType:
key, seq, err := db.lDecodeListKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, int64(seq), 10)
case LMetaType:
key, err := db.lDecodeMetaKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ZSetType:
key, m, err := db.zDecodeSetKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(m))
case ZSizeType:
key, err := db.zDecodeSizeKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ZScoreType:
key, m, score, err := db.zDecodeScoreKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(m))
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, score, 10)
case SetType:
key, member, err := db.sDecodeSetKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(member))
case SSizeType:
key, err := db.sDecodeSizeKey(k)
if err != nil {
return nil, err
}
buf = strconv.AppendQuote(buf, hack.String(key))
case ExpTimeType:
tp, key, t, err := db.expDecodeTimeKey(k)
if err != nil {
return nil, err
}
buf = append(buf, TypeName[tp]...)
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(key))
buf = append(buf, ' ')
buf = strconv.AppendInt(buf, t, 10)
case ExpMetaType:
tp, key, err := db.expDecodeMetaKey(k)
if err != nil {
return nil, err
}
buf = append(buf, TypeName[tp]...)
buf = append(buf, ' ')
buf = strconv.AppendQuote(buf, hack.String(key))
default:
return nil, errInvalidEvent
}
return buf, nil
}

248
vendor/github.com/siddontang/ledisdb/ledis/ledis.go generated vendored Normal file
View File

@ -0,0 +1,248 @@
package ledis
import (
"fmt"
"io"
"os"
"path"
"sync"
"time"
"github.com/siddontang/go/filelock"
"github.com/siddontang/go/log"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/rpl"
"github.com/siddontang/ledisdb/store"
)
// Ledis is the core structure to handle the database.
type Ledis struct {
cfg *config.Config
ldb *store.DB
dbLock sync.Mutex
dbs map[int]*DB
quit chan struct{}
wg sync.WaitGroup
//for replication
r *rpl.Replication
rc chan struct{}
rbatch *store.WriteBatch
rDoneCh chan struct{}
rhs []NewLogEventHandler
wLock sync.RWMutex //allow one write at same time
commitLock sync.Mutex //allow one write commit at same time
lock io.Closer
ttlCheckers []*ttlChecker
ttlCheckerCh chan *ttlChecker
}
// Open opens the Ledis with a config.
func Open(cfg *config.Config) (*Ledis, error) {
if len(cfg.DataDir) == 0 {
cfg.DataDir = config.DefaultDataDir
}
if cfg.Databases == 0 {
cfg.Databases = 16
} else if cfg.Databases > MaxDatabases {
cfg.Databases = MaxDatabases
}
os.MkdirAll(cfg.DataDir, 0755)
var err error
l := new(Ledis)
l.cfg = cfg
if l.lock, err = filelock.Lock(path.Join(cfg.DataDir, "LOCK")); err != nil {
return nil, err
}
l.quit = make(chan struct{})
if l.ldb, err = store.Open(cfg); err != nil {
return nil, err
}
if cfg.UseReplication {
if l.r, err = rpl.NewReplication(cfg); err != nil {
return nil, err
}
l.rc = make(chan struct{}, 1)
l.rbatch = l.ldb.NewWriteBatch()
l.rDoneCh = make(chan struct{}, 1)
l.wg.Add(1)
go l.onReplication()
//first we must try wait all replication ok
//maybe some logs are not committed
l.WaitReplication()
} else {
l.r = nil
}
l.dbs = make(map[int]*DB, 16)
l.checkTTL()
return l, nil
}
// Close closes the Ledis.
func (l *Ledis) Close() {
close(l.quit)
l.wg.Wait()
l.ldb.Close()
if l.r != nil {
l.r.Close()
//l.r = nil
}
if l.lock != nil {
l.lock.Close()
//l.lock = nil
}
}
// Select chooses a database.
func (l *Ledis) Select(index int) (*DB, error) {
if index < 0 || index >= l.cfg.Databases {
return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1)
}
l.dbLock.Lock()
defer l.dbLock.Unlock()
db, ok := l.dbs[index]
if ok {
return db, nil
}
db = l.newDB(index)
l.dbs[index] = db
go func(db *DB) {
l.ttlCheckerCh <- db.ttlChecker
}(db)
return db, nil
}
// FlushAll will clear all data and replication logs
func (l *Ledis) FlushAll() error {
l.wLock.Lock()
defer l.wLock.Unlock()
return l.flushAll()
}
func (l *Ledis) flushAll() error {
it := l.ldb.NewIterator()
defer it.Close()
it.SeekToFirst()
w := l.ldb.NewWriteBatch()
defer w.Rollback()
n := 0
for ; it.Valid(); it.Next() {
n++
if n == 10000 {
if err := w.Commit(); err != nil {
log.Fatalf("flush all commit error: %s", err.Error())
return err
}
n = 0
}
w.Delete(it.RawKey())
}
if err := w.Commit(); err != nil {
log.Fatalf("flush all commit error: %s", err.Error())
return err
}
if l.r != nil {
if err := l.r.Clear(); err != nil {
log.Fatalf("flush all replication clear error: %s", err.Error())
return err
}
}
return nil
}
// IsReadOnly returns whether Ledis is read only or not.
func (l *Ledis) IsReadOnly() bool {
if l.cfg.GetReadonly() {
return true
} else if l.r != nil {
if b, _ := l.r.CommitIDBehind(); b {
return true
}
}
return false
}
func (l *Ledis) checkTTL() {
l.ttlCheckers = make([]*ttlChecker, 0, 16)
l.ttlCheckerCh = make(chan *ttlChecker, 16)
if l.cfg.TTLCheckInterval == 0 {
l.cfg.TTLCheckInterval = 1
}
l.wg.Add(1)
go func() {
defer l.wg.Done()
tick := time.NewTicker(time.Duration(l.cfg.TTLCheckInterval) * time.Second)
defer tick.Stop()
for {
select {
case <-tick.C:
if l.IsReadOnly() {
break
}
for _, c := range l.ttlCheckers {
c.check()
}
case c := <-l.ttlCheckerCh:
l.ttlCheckers = append(l.ttlCheckers, c)
c.check()
case <-l.quit:
return
}
}
}()
}
// StoreStat returns the statistics.
func (l *Ledis) StoreStat() *store.Stat {
return l.ldb.Stat()
}
// CompactStore compacts the backend storage.
func (l *Ledis) CompactStore() error {
l.wLock.Lock()
defer l.wLock.Unlock()
return l.ldb.Compact()
}

208
vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go generated vendored Normal file
View File

@ -0,0 +1,208 @@
package ledis
import (
"bytes"
"encoding/binary"
"fmt"
"sync"
"github.com/siddontang/ledisdb/store"
)
type ibucket interface {
Get(key []byte) ([]byte, error)
GetSlice(key []byte) (store.Slice, error)
Put(key []byte, value []byte) error
Delete(key []byte) error
NewIterator() *store.Iterator
NewWriteBatch() *store.WriteBatch
RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
}
// DB is the database.
type DB struct {
l *Ledis
sdb *store.DB
bucket ibucket
index int
// buffer to store index varint
indexVarBuf []byte
kvBatch *batch
listBatch *batch
hashBatch *batch
zsetBatch *batch
// binBatch *batch
setBatch *batch
// status uint8
ttlChecker *ttlChecker
lbkeys *lBlockKeys
}
func (l *Ledis) newDB(index int) *DB {
d := new(DB)
d.l = l
d.sdb = l.ldb
d.bucket = d.sdb
// d.status = DBAutoCommit
d.setIndex(index)
d.kvBatch = d.newBatch()
d.listBatch = d.newBatch()
d.hashBatch = d.newBatch()
d.zsetBatch = d.newBatch()
// d.binBatch = d.newBatch()
d.setBatch = d.newBatch()
d.lbkeys = newLBlockKeys()
d.ttlChecker = d.newTTLChecker()
return d
}
func decodeDBIndex(buf []byte) (int, int, error) {
index, n := binary.Uvarint(buf)
if n == 0 {
return 0, 0, fmt.Errorf("buf is too small to save index")
} else if n < 0 {
return 0, 0, fmt.Errorf("value larger than 64 bits")
} else if index > uint64(MaxDatabases) {
return 0, 0, fmt.Errorf("value %d is larger than max databases %d", index, MaxDatabases)
}
return int(index), n, nil
}
func (db *DB) setIndex(index int) {
db.index = index
// the most size for varint is 10 bytes
buf := make([]byte, 10)
n := binary.PutUvarint(buf, uint64(index))
db.indexVarBuf = buf[0:n]
}
func (db *DB) checkKeyIndex(buf []byte) (int, error) {
if len(buf) < len(db.indexVarBuf) {
return 0, fmt.Errorf("key is too small")
} else if !bytes.Equal(db.indexVarBuf, buf[0:len(db.indexVarBuf)]) {
return 0, fmt.Errorf("invalid db index")
}
return len(db.indexVarBuf), nil
}
func (db *DB) newTTLChecker() *ttlChecker {
c := new(ttlChecker)
c.db = db
c.txs = make([]*batch, maxDataType)
c.cbs = make([]onExpired, maxDataType)
c.nc = 0
c.register(KVType, db.kvBatch, db.delete)
c.register(ListType, db.listBatch, db.lDelete)
c.register(HashType, db.hashBatch, db.hDelete)
c.register(ZSetType, db.zsetBatch, db.zDelete)
// c.register(BitType, db.binBatch, db.bDelete)
c.register(SetType, db.setBatch, db.sDelete)
return c
}
func (db *DB) newBatch() *batch {
return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock})
}
// Index gets the index of database.
func (db *DB) Index() int {
return int(db.index)
}
// func (db *DB) IsAutoCommit() bool {
// return db.status == DBAutoCommit
// }
// FlushAll flushes the data.
func (db *DB) FlushAll() (drop int64, err error) {
all := [...](func() (int64, error)){
db.flush,
db.lFlush,
db.hFlush,
db.zFlush,
db.sFlush}
for _, flush := range all {
n, e := flush()
if e != nil {
err = e
return
}
drop += n
}
return
}
func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
var deleteFunc func(t *batch, key []byte) int64
var metaDataType byte
switch dataType {
case KVType:
deleteFunc = db.delete
metaDataType = KVType
case ListType:
deleteFunc = db.lDelete
metaDataType = LMetaType
case HashType:
deleteFunc = db.hDelete
metaDataType = HSizeType
case ZSetType:
deleteFunc = db.zDelete
metaDataType = ZSizeType
// case BitType:
// deleteFunc = db.bDelete
// metaDataType = BitMetaType
case SetType:
deleteFunc = db.sDelete
metaDataType = SSizeType
default:
return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType])
}
var keys [][]byte
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
for len(keys) != 0 || err != nil {
for _, key := range keys {
deleteFunc(t, key)
db.rmExpire(t, dataType, key)
}
if err = t.Commit(); err != nil {
return
}
drop += int64(len(keys))
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
}
return
}

195
vendor/github.com/siddontang/ledisdb/ledis/migrate.go generated vendored Normal file
View File

@ -0,0 +1,195 @@
package ledis
import (
"fmt"
"github.com/siddontang/rdb"
)
/*
To support redis <-> ledisdb, the dump value format is the same as redis.
We will not support bitmap, and may add bit operations for kv later.
But you must know that we use int64 for zset score, not double.
Only support rdb version 6.
*/
// Dump dumps the KV value of key
func (db *DB) Dump(key []byte) ([]byte, error) {
v, err := db.Get(key)
if err != nil {
return nil, err
} else if v == nil {
return nil, err
}
return rdb.Dump(rdb.String(v))
}
// LDump dumps the list value of key
func (db *DB) LDump(key []byte) ([]byte, error) {
v, err := db.LRange(key, 0, -1)
if err != nil {
return nil, err
} else if len(v) == 0 {
return nil, err
}
return rdb.Dump(rdb.List(v))
}
// HDump dumps the hash value of key
func (db *DB) HDump(key []byte) ([]byte, error) {
v, err := db.HGetAll(key)
if err != nil {
return nil, err
} else if len(v) == 0 {
return nil, err
}
o := make(rdb.Hash, len(v))
for i := 0; i < len(v); i++ {
o[i].Field = v[i].Field
o[i].Value = v[i].Value
}
return rdb.Dump(o)
}
// SDump dumps the set value of key
func (db *DB) SDump(key []byte) ([]byte, error) {
v, err := db.SMembers(key)
if err != nil {
return nil, err
} else if len(v) == 0 {
return nil, err
}
return rdb.Dump(rdb.Set(v))
}
// ZDump dumps the zset value of key
func (db *DB) ZDump(key []byte) ([]byte, error) {
v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1)
if err != nil {
return nil, err
} else if len(v) == 0 {
return nil, err
}
o := make(rdb.ZSet, len(v))
for i := 0; i < len(v); i++ {
o[i].Member = v[i].Member
o[i].Score = float64(v[i].Score)
}
return rdb.Dump(o)
}
// Restore restores a key into database.
func (db *DB) Restore(key []byte, ttl int64, data []byte) error {
d, err := rdb.DecodeDump(data)
if err != nil {
return err
}
//ttl is milliseconds, but we only support seconds
//later may support milliseconds
if ttl > 0 {
ttl = ttl / 1e3
if ttl == 0 {
ttl = 1
}
}
switch value := d.(type) {
case rdb.String:
if _, err = db.Del(key); err != nil {
return err
}
if err = db.Set(key, value); err != nil {
return err
}
if ttl > 0 {
if _, err = db.Expire(key, ttl); err != nil {
return err
}
}
case rdb.Hash:
//first clear old key
if _, err = db.HClear(key); err != nil {
return err
}
fv := make([]FVPair, len(value))
for i := 0; i < len(value); i++ {
fv[i] = FVPair{Field: value[i].Field, Value: value[i].Value}
}
if err = db.HMset(key, fv...); err != nil {
return err
}
if ttl > 0 {
if _, err = db.HExpire(key, ttl); err != nil {
return err
}
}
case rdb.List:
//first clear old key
if _, err = db.LClear(key); err != nil {
return err
}
if _, err = db.RPush(key, value...); err != nil {
return err
}
if ttl > 0 {
if _, err = db.LExpire(key, ttl); err != nil {
return err
}
}
case rdb.ZSet:
//first clear old key
if _, err = db.ZClear(key); err != nil {
return err
}
sp := make([]ScorePair, len(value))
for i := 0; i < len(value); i++ {
sp[i] = ScorePair{int64(value[i].Score), value[i].Member}
}
if _, err = db.ZAdd(key, sp...); err != nil {
return err
}
if ttl > 0 {
if _, err = db.ZExpire(key, ttl); err != nil {
return err
}
}
case rdb.Set:
//first clear old key
if _, err = db.SClear(key); err != nil {
return err
}
if _, err = db.SAdd(key, value...); err != nil {
return err
}
if ttl > 0 {
if _, err = db.SExpire(key, ttl); err != nil {
return err
}
}
default:
return fmt.Errorf("invalid data type %T", d)
}
return nil
}

View File

@ -0,0 +1,259 @@
package ledis
import (
"bytes"
"errors"
"io"
"time"
"github.com/siddontang/go/log"
"github.com/siddontang/go/snappy"
"github.com/siddontang/ledisdb/rpl"
"github.com/siddontang/ledisdb/store"
)
const (
maxReplLogSize = 1 * 1024 * 1024
)
// For replication error.
var (
ErrLogMissed = errors.New("log is pured in server")
)
// ReplicationUsed returns whether replication is used or not.
func (l *Ledis) ReplicationUsed() bool {
return l.r != nil
}
func (l *Ledis) handleReplication() error {
l.wLock.Lock()
defer l.wLock.Unlock()
defer AsyncNotify(l.rDoneCh)
rl := &rpl.Log{}
var err error
for {
if err = l.r.NextNeedCommitLog(rl); err != nil {
if err != rpl.ErrNoBehindLog {
log.Errorf("get next commit log err, %s", err.Error)
return err
}
return nil
}
l.rbatch.Rollback()
if rl.Compression == 1 {
//todo optimize
if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil {
log.Errorf("decode log error %s", err.Error())
return err
}
}
if bd, err := store.NewBatchData(rl.Data); err != nil {
log.Errorf("decode batch log error %s", err.Error())
return err
} else if err = bd.Replay(l.rbatch); err != nil {
log.Errorf("replay batch log error %s", err.Error())
}
l.commitLock.Lock()
if err = l.rbatch.Commit(); err != nil {
log.Errorf("commit log error %s", err.Error())
} else if err = l.r.UpdateCommitID(rl.ID); err != nil {
log.Errorf("update commit id error %s", err.Error())
}
l.commitLock.Unlock()
if err != nil {
return err
}
}
}
func (l *Ledis) onReplication() {
defer l.wg.Done()
l.noticeReplication()
for {
select {
case <-l.rc:
l.handleReplication()
case <-l.quit:
return
}
}
}
// WaitReplication waits replication done
func (l *Ledis) WaitReplication() error {
if !l.ReplicationUsed() {
return ErrRplNotSupport
}
for i := 0; i < 100; i++ {
l.noticeReplication()
select {
case <-l.rDoneCh:
case <-l.quit:
return nil
}
time.Sleep(100 * time.Millisecond)
b, err := l.r.CommitIDBehind()
if err != nil {
return err
} else if !b {
return nil
}
}
return errors.New("wait replication too many times")
}
// StoreLogsFromReader stores logs from the Reader
func (l *Ledis) StoreLogsFromReader(rb io.Reader) error {
if !l.ReplicationUsed() {
return ErrRplNotSupport
} else if !l.cfg.Readonly {
return ErrRplInRDWR
}
log := &rpl.Log{}
for {
if err := log.Decode(rb); err != nil {
if err == io.EOF {
break
} else {
return err
}
}
if err := l.r.StoreLog(log); err != nil {
return err
}
}
l.noticeReplication()
return nil
}
func (l *Ledis) noticeReplication() {
AsyncNotify(l.rc)
}
// StoreLogsFromData stores logs from data.
func (l *Ledis) StoreLogsFromData(data []byte) error {
rb := bytes.NewReader(data)
return l.StoreLogsFromReader(rb)
}
// ReadLogsTo reads logs and write to the Writer.
func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) {
if !l.ReplicationUsed() {
// no replication log
nextLogID = 0
err = ErrRplNotSupport
return
}
var firtID, lastID uint64
firtID, err = l.r.FirstLogID()
if err != nil {
return
}
if startLogID < firtID {
err = ErrLogMissed
return
}
lastID, err = l.r.LastLogID()
if err != nil {
return
}
nextLogID = startLogID
log := &rpl.Log{}
for i := startLogID; i <= lastID; i++ {
if err = l.r.GetLog(i, log); err != nil {
return
}
if err = log.Encode(w); err != nil {
return
}
nextLogID = i + 1
n += log.Size()
if n > maxReplLogSize {
break
}
}
return
}
// ReadLogsToTimeout tries to read events, if no events read,
// tres to wait the new event singal until timeout seconds
func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) {
n, nextLogID, err = l.ReadLogsTo(startLogID, w)
if err != nil {
return
} else if n != 0 {
return
}
//no events read
select {
case <-l.r.WaitLog():
case <-time.After(time.Duration(timeout) * time.Second):
case <-quitCh:
return
}
return l.ReadLogsTo(startLogID, w)
}
func (l *Ledis) propagate(rl *rpl.Log) {
for _, h := range l.rhs {
h(rl)
}
}
// NewLogEventHandler is the handler to handle new log event.
type NewLogEventHandler func(rl *rpl.Log)
// AddNewLogEventHandler adds the handler for the new log event
func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error {
if !l.ReplicationUsed() {
return ErrRplNotSupport
}
l.rhs = append(l.rhs, h)
return nil
}
// ReplicationStat returns the statistics of repliaciton.
func (l *Ledis) ReplicationStat() (*rpl.Stat, error) {
if !l.ReplicationUsed() {
return nil, ErrRplNotSupport
}
return l.r.Stat()
}

402
vendor/github.com/siddontang/ledisdb/ledis/scan.go generated vendored Normal file
View File

@ -0,0 +1,402 @@
package ledis
import (
"errors"
"regexp"
"github.com/siddontang/ledisdb/store"
)
var errDataType = errors.New("error data type")
var errMetaKey = errors.New("error meta key")
//Scan scans the data. If inclusive is true, scan range [cursor, inf) else (cursor, inf)
func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType)
if err != nil {
return nil, err
}
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false)
}
// RevScan scans the data reversed. if inclusive is true, revscan range (-inf, cursor] else (inf, cursor)
func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
storeDataType, err := getDataStoreType(dataType)
if err != nil {
return nil, err
}
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, true)
}
func getDataStoreType(dataType DataType) (byte, error) {
var storeDataType byte
switch dataType {
case KV:
storeDataType = KVType
case LIST:
storeDataType = LMetaType
case HASH:
storeDataType = HSizeType
case SET:
storeDataType = SSizeType
case ZSET:
storeDataType = ZSizeType
default:
return 0, errDataType
}
return storeDataType, nil
}
func buildMatchRegexp(match string) (*regexp.Regexp, error) {
var err error
var r *regexp.Regexp
if len(match) > 0 {
if r, err = regexp.Compile(match); err != nil {
return nil, err
}
}
return r, nil
}
func (db *DB) buildScanIterator(minKey []byte, maxKey []byte, inclusive bool, reverse bool) *store.RangeLimitIterator {
tp := store.RangeOpen
if !reverse {
if inclusive {
tp = store.RangeROpen
}
} else {
if inclusive {
tp = store.RangeLOpen
}
}
var it *store.RangeLimitIterator
if !reverse {
it = db.bucket.RangeIterator(minKey, maxKey, tp)
} else {
it = db.bucket.RevRangeIterator(minKey, maxKey, tp)
}
return it
}
func (db *DB) buildScanKeyRange(storeDataType byte, key []byte, reverse bool) (minKey []byte, maxKey []byte, err error) {
if !reverse {
if minKey, err = db.encodeScanMinKey(storeDataType, key); err != nil {
return
}
if maxKey, err = db.encodeScanMaxKey(storeDataType, nil); err != nil {
return
}
} else {
if minKey, err = db.encodeScanMinKey(storeDataType, nil); err != nil {
return
}
if maxKey, err = db.encodeScanMaxKey(storeDataType, key); err != nil {
return
}
}
return
}
func checkScanCount(count int) int {
if count <= 0 {
count = defaultScanCount
}
return count
}
func (db *DB) scanGeneric(storeDataType byte, key []byte, count int,
inclusive bool, match string, reverse bool) ([][]byte, error) {
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
minKey, maxKey, err := db.buildScanKeyRange(storeDataType, key, reverse)
if err != nil {
return nil, err
}
count = checkScanCount(count)
it := db.buildScanIterator(minKey, maxKey, inclusive, reverse)
v := make([][]byte, 0, count)
for i := 0; it.Valid() && i < count; it.Next() {
if k, err := db.decodeScanKey(storeDataType, it.Key()); err != nil {
continue
} else if r != nil && !r.Match(k) {
continue
} else {
v = append(v, k)
i++
}
}
it.Close()
return v, nil
}
func (db *DB) encodeScanMinKey(storeDataType byte, key []byte) ([]byte, error) {
return db.encodeScanKey(storeDataType, key)
}
func (db *DB) encodeScanMaxKey(storeDataType byte, key []byte) ([]byte, error) {
if len(key) > 0 {
return db.encodeScanKey(storeDataType, key)
}
k, err := db.encodeScanKey(storeDataType, nil)
if err != nil {
return nil, err
}
k[len(k)-1] = storeDataType + 1
return k, nil
}
func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) {
switch storeDataType {
case KVType:
return db.encodeKVKey(key), nil
case LMetaType:
return db.lEncodeMetaKey(key), nil
case HSizeType:
return db.hEncodeSizeKey(key), nil
case ZSizeType:
return db.zEncodeSizeKey(key), nil
case SSizeType:
return db.sEncodeSizeKey(key), nil
default:
return nil, errDataType
}
}
func (db *DB) decodeScanKey(storeDataType byte, ek []byte) (key []byte, err error) {
switch storeDataType {
case KVType:
key, err = db.decodeKVKey(ek)
case LMetaType:
key, err = db.lDecodeMetaKey(ek)
case HSizeType:
key, err = db.hDecodeSizeKey(ek)
case ZSizeType:
key, err = db.zDecodeSizeKey(ek)
case SSizeType:
key, err = db.sDecodeSizeKey(ek)
default:
err = errDataType
}
return
}
// for specail data scan
func (db *DB) buildDataScanKeyRange(storeDataType byte, key []byte, cursor []byte, reverse bool) (minKey []byte, maxKey []byte, err error) {
if !reverse {
if minKey, err = db.encodeDataScanMinKey(storeDataType, key, cursor); err != nil {
return
}
if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, nil); err != nil {
return
}
} else {
if minKey, err = db.encodeDataScanMinKey(storeDataType, key, nil); err != nil {
return
}
if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, cursor); err != nil {
return
}
}
return
}
func (db *DB) encodeDataScanMinKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
return db.encodeDataScanKey(storeDataType, key, cursor)
}
func (db *DB) encodeDataScanMaxKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
if len(cursor) > 0 {
return db.encodeDataScanKey(storeDataType, key, cursor)
}
k, err := db.encodeDataScanKey(storeDataType, key, nil)
if err != nil {
return nil, err
}
// here, the last byte is the start seperator, set it to stop seperator
k[len(k)-1] = k[len(k)-1] + 1
return k, nil
}
func (db *DB) encodeDataScanKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
switch storeDataType {
case HashType:
return db.hEncodeHashKey(key, cursor), nil
case ZSetType:
return db.zEncodeSetKey(key, cursor), nil
case SetType:
return db.sEncodeSetKey(key, cursor), nil
default:
return nil, errDataType
}
}
func (db *DB) buildDataScanIterator(storeDataType byte, key []byte, cursor []byte, count int,
inclusive bool, reverse bool) (*store.RangeLimitIterator, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
minKey, maxKey, err := db.buildDataScanKeyRange(storeDataType, key, cursor, reverse)
if err != nil {
return nil, err
}
it := db.buildScanIterator(minKey, maxKey, inclusive, reverse)
return it, nil
}
func (db *DB) hScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]FVPair, error) {
count = checkScanCount(count)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
v := make([]FVPair, 0, count)
it, err := db.buildDataScanIterator(HashType, key, cursor, count, inclusive, reverse)
if err != nil {
return nil, err
}
defer it.Close()
for i := 0; it.Valid() && i < count; it.Next() {
_, f, err := db.hDecodeHashKey(it.Key())
if err != nil {
return nil, err
} else if r != nil && !r.Match(f) {
continue
}
v = append(v, FVPair{Field: f, Value: it.Value()})
i++
}
return v, nil
}
// HScan scans data for hash.
func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
return db.hScanGeneric(key, cursor, count, inclusive, match, false)
}
// HRevScan reversed scans data for hash.
func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
return db.hScanGeneric(key, cursor, count, inclusive, match, true)
}
func (db *DB) sScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([][]byte, error) {
count = checkScanCount(count)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
v := make([][]byte, 0, count)
it, err := db.buildDataScanIterator(SetType, key, cursor, count, inclusive, reverse)
if err != nil {
return nil, err
}
defer it.Close()
for i := 0; it.Valid() && i < count; it.Next() {
_, m, err := db.sDecodeSetKey(it.Key())
if err != nil {
return nil, err
} else if r != nil && !r.Match(m) {
continue
}
v = append(v, m)
i++
}
return v, nil
}
// SScan scans data for set.
func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.sScanGeneric(key, cursor, count, inclusive, match, false)
}
// SRevScan scans data reversed for set.
func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
return db.sScanGeneric(key, cursor, count, inclusive, match, true)
}
func (db *DB) zScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]ScorePair, error) {
count = checkScanCount(count)
r, err := buildMatchRegexp(match)
if err != nil {
return nil, err
}
v := make([]ScorePair, 0, count)
it, err := db.buildDataScanIterator(ZSetType, key, cursor, count, inclusive, reverse)
if err != nil {
return nil, err
}
defer it.Close()
for i := 0; it.Valid() && i < count; it.Next() {
_, m, err := db.zDecodeSetKey(it.Key())
if err != nil {
return nil, err
} else if r != nil && !r.Match(m) {
continue
}
score, err := Int64(it.Value(), nil)
if err != nil {
return nil, err
}
v = append(v, ScorePair{Score: score, Member: m})
i++
}
return v, nil
}
// ZScan scans data for zset.
func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
return db.zScanGeneric(key, cursor, count, inclusive, match, false)
}
// ZRevScan scans data reversed for zset.
func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
return db.zScanGeneric(key, cursor, count, inclusive, match, true)
}

235
vendor/github.com/siddontang/ledisdb/ledis/sort.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
package ledis
import (
"bytes"
"fmt"
"sort"
"strconv"
"github.com/siddontang/ledisdb/store"
)
// Limit is for sort.
type Limit struct {
Offset int
Size int
}
func getSortRange(values [][]byte, offset int, size int) (int, int) {
var start = 0
if offset > 0 {
start = offset
}
valueLen := len(values)
var end = valueLen - 1
if size > 0 {
end = start + size - 1
}
if start >= valueLen {
start = valueLen - 1
end = valueLen - 2
}
if end >= valueLen {
end = valueLen - 1
}
return start, end
}
var hashPattern = []byte("*->")
func (db *DB) lookupKeyByPattern(pattern []byte, subKey []byte) []byte {
// If the pattern is #, return the substitution key itself
if bytes.Equal(pattern, []byte{'#'}) {
return subKey
}
// If we can't find '*' in the pattern, return nil
if !bytes.Contains(pattern, []byte{'*'}) {
return nil
}
key := pattern
var field []byte
// Find out if we're dealing with a hash dereference
if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) {
key = pattern[0 : n+1]
field = pattern[n+3:]
}
// Perform the '*' substitution
key = bytes.Replace(key, []byte{'*'}, subKey, 1)
var value []byte
if field == nil {
value, _ = db.Get(key)
} else {
value, _ = db.HGet(key, field)
}
return value
}
type sortItem struct {
value []byte
cmpValue []byte
score float64
}
type sortItemSlice struct {
alpha bool
sortByPattern bool
items []sortItem
}
func (s *sortItemSlice) Len() int {
return len(s.items)
}
func (s *sortItemSlice) Swap(i, j int) {
s.items[i], s.items[j] = s.items[j], s.items[i]
}
func (s *sortItemSlice) Less(i, j int) bool {
s1 := s.items[i]
s2 := s.items[j]
if !s.alpha {
if s1.score < s2.score {
return true
} else if s1.score > s2.score {
return false
} else {
return bytes.Compare(s1.value, s2.value) < 0
}
} else {
if s.sortByPattern {
if s1.cmpValue == nil || s2.cmpValue == nil {
if s1.cmpValue == nil {
return true
}
return false
}
// Unlike redis, we only use bytes compare
return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0
}
// Unlike redis, we only use bytes compare
return bytes.Compare(s1.value, s2.value) < 0
}
}
func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
if len(values) == 0 {
return [][]byte{}, nil
}
start, end := getSortRange(values, offset, size)
dontsort := 0
if sortBy != nil {
if !bytes.Contains(sortBy, []byte{'*'}) {
dontsort = 1
}
}
items := &sortItemSlice{
alpha: alpha,
sortByPattern: sortBy != nil,
items: make([]sortItem, len(values)),
}
for i, value := range values {
items.items[i].value = value
items.items[i].score = 0
items.items[i].cmpValue = nil
if dontsort == 0 {
var cmpValue []byte
if sortBy != nil {
cmpValue = db.lookupKeyByPattern(sortBy, value)
} else {
// use value iteself to sort by
cmpValue = value
}
if cmpValue == nil {
continue
}
if alpha {
if sortBy != nil {
items.items[i].cmpValue = cmpValue
}
} else {
score, err := strconv.ParseFloat(string(cmpValue), 64)
if err != nil {
return nil, fmt.Errorf("%s scores can't be converted into double", cmpValue)
}
items.items[i].score = score
}
}
}
if dontsort == 0 {
if !desc {
sort.Sort(items)
} else {
sort.Sort(sort.Reverse(items))
}
}
resLen := end - start + 1
if len(sortGet) > 0 {
resLen = len(sortGet) * (end - start + 1)
}
res := make([][]byte, 0, resLen)
for i := start; i <= end; i++ {
if len(sortGet) == 0 {
res = append(res, items.items[i].value)
} else {
for _, getPattern := range sortGet {
v := db.lookupKeyByPattern(getPattern, items.items[i].value)
res = append(res, v)
}
}
}
return res, nil
}
// XLSort sorts list.
func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
values, err := db.LRange(key, 0, -1)
if err != nil {
return nil, err
}
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
}
// XSSort sorts set.
func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
values, err := db.SMembers(key)
if err != nil {
return nil, err
}
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
}
// XZSort sorts zset.
func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1)
if err != nil {
return nil, err
}
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
}

556
vendor/github.com/siddontang/ledisdb/ledis/t_hash.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
package ledis
import (
"encoding/binary"
"errors"
"time"
"github.com/siddontang/go/num"
"github.com/siddontang/ledisdb/store"
)
// FVPair is the pair of field and value.
type FVPair struct {
Field []byte
Value []byte
}
var errHashKey = errors.New("invalid hash key")
var errHSizeKey = errors.New("invalid hsize key")
const (
hashStartSep byte = ':'
hashStopSep byte = hashStartSep + 1
)
func checkHashKFSize(key []byte, field []byte) error {
if len(key) > MaxKeySize || len(key) == 0 {
return errKeySize
} else if len(field) > MaxHashFieldSize || len(field) == 0 {
return errHashFieldSize
}
return nil
}
func (db *DB) hEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+1+len(db.indexVarBuf))
pos := 0
n := copy(buf, db.indexVarBuf)
pos += n
buf[pos] = HSizeType
pos++
copy(buf[pos:], key)
return buf
}
func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) {
pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, err
}
if pos+1 > len(ek) || ek[pos] != HSizeType {
return nil, errHSizeKey
}
pos++
return ek[pos:], nil
}
func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte {
buf := make([]byte, len(key)+len(field)+1+1+2+len(db.indexVarBuf))
pos := 0
n := copy(buf, db.indexVarBuf)
pos += n
buf[pos] = HashType
pos++
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
pos += 2
copy(buf[pos:], key)
pos += len(key)
buf[pos] = hashStartSep
pos++
copy(buf[pos:], field)
return buf
}
func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) {
pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, nil, err
}
if pos+1 > len(ek) || ek[pos] != HashType {
return nil, nil, errHashKey
}
pos++
if pos+2 > len(ek) {
return nil, nil, errHashKey
}
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
pos += 2
if keyLen+pos > len(ek) {
return nil, nil, errHashKey
}
key := ek[pos : pos+keyLen]
pos += keyLen
if ek[pos] != hashStartSep {
return nil, nil, errHashKey
}
pos++
field := ek[pos:]
return key, field, nil
}
func (db *DB) hEncodeStartKey(key []byte) []byte {
return db.hEncodeHashKey(key, nil)
}
func (db *DB) hEncodeStopKey(key []byte) []byte {
k := db.hEncodeHashKey(key, nil)
k[len(k)-1] = hashStopSep
return k
}
func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) {
t := db.hashBatch
ek := db.hEncodeHashKey(key, field)
var n int64 = 1
if v, _ := db.bucket.Get(ek); v != nil {
n = 0
} else {
if _, err := db.hIncrSize(key, 1); err != nil {
return 0, err
}
}
t.Put(ek, value)
return n, nil
}
// ps : here just focus on deleting the hash data,
// any other likes expire is ignore.
func (db *DB) hDelete(t *batch, key []byte) int64 {
sk := db.hEncodeSizeKey(key)
start := db.hEncodeStartKey(key)
stop := db.hEncodeStopKey(key)
var num int64
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
for ; it.Valid(); it.Next() {
t.Delete(it.Key())
num++
}
it.Close()
t.Delete(sk)
return num
}
func (db *DB) hExpireAt(key []byte, when int64) (int64, error) {
t := db.hashBatch
t.Lock()
defer t.Unlock()
if hlen, err := db.HLen(key); err != nil || hlen == 0 {
return 0, err
}
db.expireAt(t, HashType, key, when)
if err := t.Commit(); err != nil {
return 0, err
}
return 1, nil
}
// HLen returns the lengh of hash.
func (db *DB) HLen(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
return Int64(db.bucket.Get(db.hEncodeSizeKey(key)))
}
// HSet sets the field with value of key.
func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) {
if err := checkHashKFSize(key, field); err != nil {
return 0, err
} else if err := checkValueSize(value); err != nil {
return 0, err
}
t := db.hashBatch
t.Lock()
defer t.Unlock()
n, err := db.hSetItem(key, field, value)
if err != nil {
return 0, err
}
err = t.Commit()
return n, err
}
// HGet gets the value of the field.
func (db *DB) HGet(key []byte, field []byte) ([]byte, error) {
if err := checkHashKFSize(key, field); err != nil {
return nil, err
}
return db.bucket.Get(db.hEncodeHashKey(key, field))
}
// HMset sets multi field-values.
func (db *DB) HMset(key []byte, args ...FVPair) error {
t := db.hashBatch
t.Lock()
defer t.Unlock()
var err error
var ek []byte
var num int64
for i := 0; i < len(args); i++ {
if err := checkHashKFSize(key, args[i].Field); err != nil {
return err
} else if err := checkValueSize(args[i].Value); err != nil {
return err
}
ek = db.hEncodeHashKey(key, args[i].Field)
if v, err := db.bucket.Get(ek); err != nil {
return err
} else if v == nil {
num++
}
t.Put(ek, args[i].Value)
}
if _, err = db.hIncrSize(key, num); err != nil {
return err
}
//todo add binglog
err = t.Commit()
return err
}
// HMget gets multi values of fields
func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
var ek []byte
it := db.bucket.NewIterator()
defer it.Close()
r := make([][]byte, len(args))
for i := 0; i < len(args); i++ {
if err := checkHashKFSize(key, args[i]); err != nil {
return nil, err
}
ek = db.hEncodeHashKey(key, args[i])
r[i] = it.Find(ek)
}
return r, nil
}
// HDel deletes the fields.
func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) {
t := db.hashBatch
var ek []byte
var v []byte
var err error
t.Lock()
defer t.Unlock()
it := db.bucket.NewIterator()
defer it.Close()
var num int64
for i := 0; i < len(args); i++ {
if err := checkHashKFSize(key, args[i]); err != nil {
return 0, err
}
ek = db.hEncodeHashKey(key, args[i])
v = it.RawFind(ek)
if v == nil {
continue
} else {
num++
t.Delete(ek)
}
}
if _, err = db.hIncrSize(key, -num); err != nil {
return 0, err
}
err = t.Commit()
return num, err
}
func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) {
t := db.hashBatch
sk := db.hEncodeSizeKey(key)
var err error
var size int64
if size, err = Int64(db.bucket.Get(sk)); err != nil {
return 0, err
}
size += delta
if size <= 0 {
size = 0
t.Delete(sk)
db.rmExpire(t, HashType, key)
} else {
t.Put(sk, PutInt64(size))
}
return size, nil
}
// HIncrBy increases the value of field by delta.
func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
if err := checkHashKFSize(key, field); err != nil {
return 0, err
}
t := db.hashBatch
var ek []byte
var err error
t.Lock()
defer t.Unlock()
ek = db.hEncodeHashKey(key, field)
var n int64
if n, err = StrInt64(db.bucket.Get(ek)); err != nil {
return 0, err
}
n += delta
_, err = db.hSetItem(key, field, num.FormatInt64ToSlice(n))
if err != nil {
return 0, err
}
err = t.Commit()
return n, err
}
// HGetAll returns all field-values.
func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.hEncodeStartKey(key)
stop := db.hEncodeStopKey(key)
v := make([]FVPair, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, f, err := db.hDecodeHashKey(it.Key())
if err != nil {
return nil, err
}
v = append(v, FVPair{Field: f, Value: it.Value()})
}
return v, nil
}
// HKeys returns the all fields.
func (db *DB) HKeys(key []byte) ([][]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.hEncodeStartKey(key)
stop := db.hEncodeStopKey(key)
v := make([][]byte, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, f, err := db.hDecodeHashKey(it.Key())
if err != nil {
return nil, err
}
v = append(v, f)
}
return v, nil
}
// HValues returns all values
func (db *DB) HValues(key []byte) ([][]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.hEncodeStartKey(key)
stop := db.hEncodeStopKey(key)
v := make([][]byte, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, _, err := db.hDecodeHashKey(it.Key())
if err != nil {
return nil, err
}
v = append(v, it.Value())
}
return v, nil
}
// HClear clears the data.
func (db *DB) HClear(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.hashBatch
t.Lock()
defer t.Unlock()
num := db.hDelete(t, key)
db.rmExpire(t, HashType, key)
err := t.Commit()
return num, err
}
// HMclear cleans multi data.
func (db *DB) HMclear(keys ...[]byte) (int64, error) {
t := db.hashBatch
t.Lock()
defer t.Unlock()
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return 0, err
}
db.hDelete(t, key)
db.rmExpire(t, HashType, key)
}
err := t.Commit()
return int64(len(keys)), err
}
func (db *DB) hFlush() (drop int64, err error) {
t := db.hashBatch
t.Lock()
defer t.Unlock()
return db.flushType(t, HashType)
}
// HExpire expires the data with duration.
func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 {
return 0, errExpireValue
}
return db.hExpireAt(key, time.Now().Unix()+duration)
}
// HExpireAt expires the data at time when.
func (db *DB) HExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() {
return 0, errExpireValue
}
return db.hExpireAt(key, when)
}
// HTTL gets the TTL of data.
func (db *DB) HTTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return -1, err
}
return db.ttl(HashType, key)
}
// HPersist removes the TTL of data.
func (db *DB) HPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.hashBatch
t.Lock()
defer t.Unlock()
n, err := db.rmExpire(t, HashType, key)
if err != nil {
return 0, err
}
err = t.Commit()
return n, err
}
// HKeyExists checks whether data exists or not.
func (db *DB) HKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
sk := db.hEncodeSizeKey(key)
v, err := db.bucket.Get(sk)
if v != nil && err == nil {
return 1, nil
}
return 0, err
}

794
vendor/github.com/siddontang/ledisdb/ledis/t_kv.go generated vendored Normal file
View File

@ -0,0 +1,794 @@
package ledis
import (
"encoding/binary"
"errors"
"fmt"
"strings"
"time"
"github.com/siddontang/go/num"
"github.com/siddontang/ledisdb/store"
)
// KVPair is the pair of key-value.
type KVPair struct {
Key []byte
Value []byte
}
var errKVKey = errors.New("invalid encode kv key")
func checkKeySize(key []byte) error {
if len(key) > MaxKeySize || len(key) == 0 {
return errKeySize
}
return nil
}
func checkValueSize(value []byte) error {
if len(value) > MaxValueSize {
return errValueSize
}
return nil
}
func (db *DB) encodeKVKey(key []byte) []byte {
ek := make([]byte, len(key)+1+len(db.indexVarBuf))
pos := copy(ek, db.indexVarBuf)
ek[pos] = KVType
pos++
copy(ek[pos:], key)
return ek
}
func (db *DB) decodeKVKey(ek []byte) ([]byte, error) {
pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, err
}
if pos+1 > len(ek) || ek[pos] != KVType {
return nil, errKVKey
}
pos++
return ek[pos:], nil
}
func (db *DB) encodeKVMinKey() []byte {
ek := db.encodeKVKey(nil)
return ek
}
func (db *DB) encodeKVMaxKey() []byte {
ek := db.encodeKVKey(nil)
ek[len(ek)-1] = KVType + 1
return ek
}
func (db *DB) incr(key []byte, delta int64) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
var err error
key = db.encodeKVKey(key)
t := db.kvBatch
t.Lock()
defer t.Unlock()
var n int64
n, err = StrInt64(db.bucket.Get(key))
if err != nil {
return 0, err
}
n += delta
t.Put(key, num.FormatInt64ToSlice(n))
err = t.Commit()
return n, err
}
// ps : here just focus on deleting the key-value data,
// any other likes expire is ignore.
func (db *DB) delete(t *batch, key []byte) int64 {
key = db.encodeKVKey(key)
t.Delete(key)
return 1
}
func (db *DB) setExpireAt(key []byte, when int64) (int64, error) {
t := db.kvBatch
t.Lock()
defer t.Unlock()
if exist, err := db.Exists(key); err != nil || exist == 0 {
return 0, err
}
db.expireAt(t, KVType, key, when)
if err := t.Commit(); err != nil {
return 0, err
}
return 1, nil
}
// Decr decreases the data.
func (db *DB) Decr(key []byte) (int64, error) {
return db.incr(key, -1)
}
// DecrBy decreases the data by decrement.
func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) {
return db.incr(key, -decrement)
}
// Del deletes the data.
func (db *DB) Del(keys ...[]byte) (int64, error) {
if len(keys) == 0 {
return 0, nil
}
codedKeys := make([][]byte, len(keys))
for i, k := range keys {
codedKeys[i] = db.encodeKVKey(k)
}
t := db.kvBatch
t.Lock()
defer t.Unlock()
for i, k := range keys {
t.Delete(codedKeys[i])
db.rmExpire(t, KVType, k)
}
err := t.Commit()
return int64(len(keys)), err
}
// Exists check data exists or not.
func (db *DB) Exists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
var err error
key = db.encodeKVKey(key)
var v []byte
v, err = db.bucket.Get(key)
if v != nil && err == nil {
return 1, nil
}
return 0, err
}
// Get gets the value.
func (db *DB) Get(key []byte) ([]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
key = db.encodeKVKey(key)
return db.bucket.Get(key)
}
// GetSlice gets the slice of the data.
func (db *DB) GetSlice(key []byte) (store.Slice, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
key = db.encodeKVKey(key)
return db.bucket.GetSlice(key)
}
// GetSet gets the value and sets new value.
func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
} else if err := checkValueSize(value); err != nil {
return nil, err
}
key = db.encodeKVKey(key)
t := db.kvBatch
t.Lock()
defer t.Unlock()
oldValue, err := db.bucket.Get(key)
if err != nil {
return nil, err
}
t.Put(key, value)
err = t.Commit()
return oldValue, err
}
// Incr increases the data.
func (db *DB) Incr(key []byte) (int64, error) {
return db.incr(key, 1)
}
// IncrBy increases the data by increment.
func (db *DB) IncrBy(key []byte, increment int64) (int64, error) {
return db.incr(key, increment)
}
// MGet gets multi data.
func (db *DB) MGet(keys ...[]byte) ([][]byte, error) {
values := make([][]byte, len(keys))
it := db.bucket.NewIterator()
defer it.Close()
for i := range keys {
if err := checkKeySize(keys[i]); err != nil {
return nil, err
}
values[i] = it.Find(db.encodeKVKey(keys[i]))
}
return values, nil
}
// MSet sets multi data.
func (db *DB) MSet(args ...KVPair) error {
if len(args) == 0 {
return nil
}
t := db.kvBatch
var err error
var key []byte
var value []byte
t.Lock()
defer t.Unlock()
for i := 0; i < len(args); i++ {
if err := checkKeySize(args[i].Key); err != nil {
return err
} else if err := checkValueSize(args[i].Value); err != nil {
return err
}
key = db.encodeKVKey(args[i].Key)
value = args[i].Value
t.Put(key, value)
}
err = t.Commit()
return err
}
// Set sets the data.
func (db *DB) Set(key []byte, value []byte) error {
if err := checkKeySize(key); err != nil {
return err
} else if err := checkValueSize(value); err != nil {
return err
}
var err error
key = db.encodeKVKey(key)
t := db.kvBatch
t.Lock()
defer t.Unlock()
t.Put(key, value)
err = t.Commit()
return err
}
// SetNX sets the data if not existed.
func (db *DB) SetNX(key []byte, value []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
} else if err := checkValueSize(value); err != nil {
return 0, err
}
var err error
key = db.encodeKVKey(key)
var n int64 = 1
t := db.kvBatch
t.Lock()
defer t.Unlock()
if v, err := db.bucket.Get(key); err != nil {
return 0, err
} else if v != nil {
n = 0
} else {
t.Put(key, value)
err = t.Commit()
}
return n, err
}
// SetEX sets the data with a TTL.
func (db *DB) SetEX(key []byte, duration int64, value []byte) error {
if err := checkKeySize(key); err != nil {
return err
} else if err := checkValueSize(value); err != nil {
return err
} else if duration <= 0 {
return errExpireValue
}
ek := db.encodeKVKey(key)
t := db.kvBatch
t.Lock()
defer t.Unlock()
t.Put(ek, value)
db.expireAt(t, KVType, key, time.Now().Unix()+duration)
return t.Commit()
}
func (db *DB) flush() (drop int64, err error) {
t := db.kvBatch
t.Lock()
defer t.Unlock()
return db.flushType(t, KVType)
}
// Expire expires the data.
func (db *DB) Expire(key []byte, duration int64) (int64, error) {
if duration <= 0 {
return 0, errExpireValue
}
return db.setExpireAt(key, time.Now().Unix()+duration)
}
// ExpireAt expires the data at when.
func (db *DB) ExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() {
return 0, errExpireValue
}
return db.setExpireAt(key, when)
}
// TTL returns the TTL of the data.
func (db *DB) TTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return -1, err
}
return db.ttl(KVType, key)
}
// Persist removes the TTL of the data.
func (db *DB) Persist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.kvBatch
t.Lock()
defer t.Unlock()
n, err := db.rmExpire(t, KVType, key)
if err != nil {
return 0, err
}
err = t.Commit()
return n, err
}
// SetRange sets the data with new value from offset.
func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) {
if len(value) == 0 {
return 0, nil
}
if err := checkKeySize(key); err != nil {
return 0, err
} else if len(value)+offset > MaxValueSize {
return 0, errValueSize
}
key = db.encodeKVKey(key)
t := db.kvBatch
t.Lock()
defer t.Unlock()
oldValue, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
extra := offset + len(value) - len(oldValue)
if extra > 0 {
oldValue = append(oldValue, make([]byte, extra)...)
}
copy(oldValue[offset:], value)
t.Put(key, oldValue)
if err := t.Commit(); err != nil {
return 0, err
}
return int64(len(oldValue)), nil
}
func getRange(start int, end int, valLen int) (int, int) {
if start < 0 {
start = valLen + start
}
if end < 0 {
end = valLen + end
}
if start < 0 {
start = 0
}
if end < 0 {
end = 0
}
if end >= valLen {
end = valLen - 1
}
return start, end
}
// GetRange gets the range of the data.
func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
key = db.encodeKVKey(key)
value, err := db.bucket.Get(key)
if err != nil {
return nil, err
}
valLen := len(value)
start, end = getRange(start, end, valLen)
if start > end {
return nil, nil
}
return value[start : end+1], nil
}
// StrLen returns the length of the data.
func (db *DB) StrLen(key []byte) (int64, error) {
s, err := db.GetSlice(key)
if err != nil {
return 0, err
}
n := s.Size()
s.Free()
return int64(n), nil
}
// Append appends the value to the data.
func (db *DB) Append(key []byte, value []byte) (int64, error) {
if len(value) == 0 {
return 0, nil
}
if err := checkKeySize(key); err != nil {
return 0, err
}
key = db.encodeKVKey(key)
t := db.kvBatch
t.Lock()
defer t.Unlock()
oldValue, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
if len(oldValue)+len(value) > MaxValueSize {
return 0, errValueSize
}
oldValue = append(oldValue, value...)
t.Put(key, oldValue)
if err := t.Commit(); err != nil {
return 0, nil
}
return int64(len(oldValue)), nil
}
// BitOP does the bit operations in data.
func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) {
if err := checkKeySize(destKey); err != nil {
return 0, err
}
op = strings.ToLower(op)
if len(srcKeys) == 0 {
return 0, nil
} else if op == BitNot && len(srcKeys) > 1 {
return 0, fmt.Errorf("BITOP NOT has only one srckey")
} else if len(srcKeys) < 2 {
return 0, nil
}
key := db.encodeKVKey(srcKeys[0])
value, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
if op == BitNot {
for i := 0; i < len(value); i++ {
value[i] = ^value[i]
}
} else {
for j := 1; j < len(srcKeys); j++ {
if err := checkKeySize(srcKeys[j]); err != nil {
return 0, err
}
key = db.encodeKVKey(srcKeys[j])
ovalue, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
if len(value) < len(ovalue) {
value, ovalue = ovalue, value
}
for i := 0; i < len(ovalue); i++ {
switch op {
case BitAND:
value[i] &= ovalue[i]
case BitOR:
value[i] |= ovalue[i]
case BitXOR:
value[i] ^= ovalue[i]
default:
return 0, fmt.Errorf("invalid op type: %s", op)
}
}
for i := len(ovalue); i < len(value); i++ {
switch op {
case BitAND:
value[i] &= 0
case BitOR:
value[i] |= 0
case BitXOR:
value[i] ^= 0
}
}
}
}
key = db.encodeKVKey(destKey)
t := db.kvBatch
t.Lock()
defer t.Unlock()
t.Put(key, value)
if err := t.Commit(); err != nil {
return 0, err
}
return int64(len(value)), nil
}
var bitsInByte = [256]int32{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3,
4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3,
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4,
5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4,
5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2,
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3,
4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4,
5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6,
6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5,
6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}
func numberBitCount(i uint32) uint32 {
i = i - ((i >> 1) & 0x55555555)
i = (i & 0x33333333) + ((i >> 2) & 0x33333333)
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
}
// BitCount returns the bit count of data.
func (db *DB) BitCount(key []byte, start int, end int) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
key = db.encodeKVKey(key)
value, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
start, end = getRange(start, end, len(value))
value = value[start : end+1]
var n int64
pos := 0
for ; pos+4 <= len(value); pos = pos + 4 {
n += int64(numberBitCount(binary.BigEndian.Uint32(value[pos : pos+4])))
}
for ; pos < len(value); pos++ {
n += int64(bitsInByte[value[pos]])
}
return n, nil
}
// BitPos returns the pos of the data.
func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
if (on & ^1) != 0 {
return 0, fmt.Errorf("bit must be 0 or 1, not %d", on)
}
var skipValue uint8
if on == 0 {
skipValue = 0xFF
}
key = db.encodeKVKey(key)
value, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
start, end = getRange(start, end, len(value))
value = value[start : end+1]
for i, v := range value {
if uint8(v) != skipValue {
for j := 0; j < 8; j++ {
isNull := uint8(v)&(1<<uint8(7-j)) == 0
if (on == 1 && !isNull) || (on == 0 && isNull) {
return int64((start+i)*8 + j), nil
}
}
}
}
return -1, nil
}
// SetBit sets the bit to the data.
func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
if (on & ^1) != 0 {
return 0, fmt.Errorf("bit must be 0 or 1, not %d", on)
}
t := db.kvBatch
t.Lock()
defer t.Unlock()
key = db.encodeKVKey(key)
value, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
byteOffset := int(uint32(offset) >> 3)
extra := byteOffset + 1 - len(value)
if extra > 0 {
value = append(value, make([]byte, extra)...)
}
byteVal := value[byteOffset]
bit := 7 - uint8(uint32(offset)&0x7)
bitVal := byteVal & (1 << bit)
byteVal &= ^(1 << bit)
byteVal |= (uint8(on&0x1) << bit)
value[byteOffset] = byteVal
t.Put(key, value)
if err := t.Commit(); err != nil {
return 0, err
}
if bitVal > 0 {
return 1, nil
}
return 0, nil
}
// GetBit gets the bit of data at offset.
func (db *DB) GetBit(key []byte, offset int) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
key = db.encodeKVKey(key)
value, err := db.bucket.Get(key)
if err != nil {
return 0, err
}
byteOffset := uint32(offset) >> 3
bit := 7 - uint8(uint32(offset)&0x7)
if byteOffset >= uint32(len(value)) {
return 0, nil
}
bitVal := value[byteOffset] & (1 << bit)
if bitVal > 0 {
return 1, nil
}
return 0, nil
}

808
vendor/github.com/siddontang/ledisdb/ledis/t_list.go generated vendored Normal file
View File

@ -0,0 +1,808 @@
package ledis
import (
"container/list"
"encoding/binary"
"errors"
"sync"
"time"
"github.com/siddontang/go/hack"
"github.com/siddontang/go/log"
"github.com/siddontang/go/num"
"github.com/siddontang/ledisdb/store"
"golang.org/x/net/context"
)
const (
listHeadSeq int32 = 1
listTailSeq int32 = 2
listMinSeq int32 = 1000
listMaxSeq int32 = 1<<31 - 1000
listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2
)
var errLMetaKey = errors.New("invalid lmeta key")
var errListKey = errors.New("invalid list key")
var errListSeq = errors.New("invalid list sequence, overflow")
func (db *DB) lEncodeMetaKey(key []byte) []byte {
buf := make([]byte, len(key)+1+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
buf[pos] = LMetaType
pos++
copy(buf[pos:], key)
return buf
}
func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) {
pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, err
}
if pos+1 > len(ek) || ek[pos] != LMetaType {
return nil, errLMetaKey
}
pos++
return ek[pos:], nil
}
func (db *DB) lEncodeListKey(key []byte, seq int32) []byte {
buf := make([]byte, len(key)+7+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
buf[pos] = ListType
pos++
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
pos += 2
copy(buf[pos:], key)
pos += len(key)
binary.BigEndian.PutUint32(buf[pos:], uint32(seq))
return buf
}
func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) {
pos := 0
pos, err = db.checkKeyIndex(ek)
if err != nil {
return
}
if pos+1 > len(ek) || ek[pos] != ListType {
err = errListKey
return
}
pos++
if pos+2 > len(ek) {
err = errListKey
return
}
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
pos += 2
if keyLen+pos+4 != len(ek) {
err = errListKey
return
}
key = ek[pos : pos+keyLen]
seq = int32(binary.BigEndian.Uint32(ek[pos+keyLen:]))
return
}
func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
var headSeq int32
var tailSeq int32
var size int32
var err error
t := db.listBatch
t.Lock()
defer t.Unlock()
metaKey := db.lEncodeMetaKey(key)
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
if err != nil {
return 0, err
}
pushCnt := len(args)
if pushCnt == 0 {
return int64(size), nil
}
seq := headSeq
var delta int32 = -1
if whereSeq == listTailSeq {
seq = tailSeq
delta = 1
}
// append elements
if size > 0 {
seq += delta
}
for i := 0; i < pushCnt; i++ {
ek := db.lEncodeListKey(key, seq+int32(i)*delta)
t.Put(ek, args[i])
}
seq += int32(pushCnt-1) * delta
if seq <= listMinSeq || seq >= listMaxSeq {
return 0, errListSeq
}
// set meta info
if whereSeq == listHeadSeq {
headSeq = seq
} else {
tailSeq = seq
}
db.lSetMeta(metaKey, headSeq, tailSeq)
err = t.Commit()
if err == nil {
db.lSignalAsReady(key)
}
return int64(size) + int64(pushCnt), err
}
func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
t := db.listBatch
t.Lock()
defer t.Unlock()
var headSeq int32
var tailSeq int32
var size int32
var err error
metaKey := db.lEncodeMetaKey(key)
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
if err != nil {
return nil, err
} else if size == 0 {
return nil, nil
}
var value []byte
seq := headSeq
if whereSeq == listTailSeq {
seq = tailSeq
}
itemKey := db.lEncodeListKey(key, seq)
value, err = db.bucket.Get(itemKey)
if err != nil {
return nil, err
}
if whereSeq == listHeadSeq {
headSeq++
} else {
tailSeq--
}
t.Delete(itemKey)
size = db.lSetMeta(metaKey, headSeq, tailSeq)
if size == 0 {
db.rmExpire(t, ListType, key)
}
err = t.Commit()
return value, err
}
func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) {
if err := checkKeySize(key); err != nil {
return err
}
t := db.listBatch
t.Lock()
defer t.Unlock()
var headSeq int32
var llen int32
start := int32(startP)
stop := int32(stopP)
ek := db.lEncodeMetaKey(key)
if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil {
return err
}
if start < 0 {
start = llen + start
}
if stop < 0 {
stop = llen + stop
}
if start >= llen || start > stop {
db.lDelete(t, key)
db.rmExpire(t, ListType, key)
return t.Commit()
}
if start < 0 {
start = 0
}
if stop >= llen {
stop = llen - 1
}
if start > 0 {
for i := int32(0); i < start; i++ {
t.Delete(db.lEncodeListKey(key, headSeq+i))
}
}
if stop < int32(llen-1) {
for i := int32(stop + 1); i < llen; i++ {
t.Delete(db.lEncodeListKey(key, headSeq+i))
}
}
db.lSetMeta(ek, headSeq+start, headSeq+stop)
return t.Commit()
}
func (db *DB) ltrim(key []byte, trimSize, whereSeq int32) (int32, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
if trimSize == 0 {
return 0, nil
}
t := db.listBatch
t.Lock()
defer t.Unlock()
var headSeq int32
var tailSeq int32
var size int32
var err error
metaKey := db.lEncodeMetaKey(key)
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
if err != nil {
return 0, err
} else if size == 0 {
return 0, nil
}
var (
trimStartSeq int32
trimEndSeq int32
)
if whereSeq == listHeadSeq {
trimStartSeq = headSeq
trimEndSeq = num.MinInt32(trimStartSeq+trimSize-1, tailSeq)
headSeq = trimEndSeq + 1
} else {
trimEndSeq = tailSeq
trimStartSeq = num.MaxInt32(trimEndSeq-trimSize+1, headSeq)
tailSeq = trimStartSeq - 1
}
for trimSeq := trimStartSeq; trimSeq <= trimEndSeq; trimSeq++ {
itemKey := db.lEncodeListKey(key, trimSeq)
t.Delete(itemKey)
}
size = db.lSetMeta(metaKey, headSeq, tailSeq)
if size == 0 {
db.rmExpire(t, ListType, key)
}
err = t.Commit()
return trimEndSeq - trimStartSeq + 1, err
}
// ps : here just focus on deleting the list data,
// any other likes expire is ignore.
func (db *DB) lDelete(t *batch, key []byte) int64 {
mk := db.lEncodeMetaKey(key)
var headSeq int32
var tailSeq int32
var err error
it := db.bucket.NewIterator()
defer it.Close()
headSeq, tailSeq, _, err = db.lGetMeta(it, mk)
if err != nil {
return 0
}
var num int64
startKey := db.lEncodeListKey(key, headSeq)
stopKey := db.lEncodeListKey(key, tailSeq)
rit := store.NewRangeIterator(it, &store.Range{
Min: startKey,
Max: stopKey,
Type: store.RangeClose})
for ; rit.Valid(); rit.Next() {
t.Delete(rit.RawKey())
num++
}
t.Delete(mk)
return num
}
func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) {
var v []byte
if it != nil {
v = it.Find(ek)
} else {
v, err = db.bucket.Get(ek)
}
if err != nil {
return
} else if v == nil {
headSeq = listInitialSeq
tailSeq = listInitialSeq
size = 0
return
} else {
headSeq = int32(binary.LittleEndian.Uint32(v[0:4]))
tailSeq = int32(binary.LittleEndian.Uint32(v[4:8]))
size = tailSeq - headSeq + 1
}
return
}
func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 {
t := db.listBatch
size := tailSeq - headSeq + 1
if size < 0 {
// todo : log error + panic
log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq)
} else if size == 0 {
t.Delete(ek)
} else {
buf := make([]byte, 8)
binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq))
binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq))
t.Put(ek, buf)
}
return size
}
func (db *DB) lExpireAt(key []byte, when int64) (int64, error) {
t := db.listBatch
t.Lock()
defer t.Unlock()
if llen, err := db.LLen(key); err != nil || llen == 0 {
return 0, err
}
db.expireAt(t, ListType, key, when)
if err := t.Commit(); err != nil {
return 0, err
}
return 1, nil
}
// LIndex returns the value at index.
func (db *DB) LIndex(key []byte, index int32) ([]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
var seq int32
var headSeq int32
var tailSeq int32
var err error
metaKey := db.lEncodeMetaKey(key)
it := db.bucket.NewIterator()
defer it.Close()
headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey)
if err != nil {
return nil, err
}
if index >= 0 {
seq = headSeq + index
} else {
seq = tailSeq + index + 1
}
sk := db.lEncodeListKey(key, seq)
v := it.Find(sk)
return v, nil
}
// LLen gets the length of the list.
func (db *DB) LLen(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
ek := db.lEncodeMetaKey(key)
_, _, size, err := db.lGetMeta(nil, ek)
return int64(size), err
}
// LPop pops the value.
func (db *DB) LPop(key []byte) ([]byte, error) {
return db.lpop(key, listHeadSeq)
}
// LTrim trims the value from start to stop.
func (db *DB) LTrim(key []byte, start, stop int64) error {
return db.ltrim2(key, start, stop)
}
// LTrimFront trims the value from top.
func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) {
return db.ltrim(key, trimSize, listHeadSeq)
}
// LTrimBack trims the value from back.
func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) {
return db.ltrim(key, trimSize, listTailSeq)
}
// LPush push the value to the list.
func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) {
return db.lpush(key, listHeadSeq, args...)
}
// LSet sets the value at index.
func (db *DB) LSet(key []byte, index int32, value []byte) error {
if err := checkKeySize(key); err != nil {
return err
}
var seq int32
var headSeq int32
var tailSeq int32
//var size int32
var err error
t := db.listBatch
t.Lock()
defer t.Unlock()
metaKey := db.lEncodeMetaKey(key)
headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey)
if err != nil {
return err
}
if index >= 0 {
seq = headSeq + index
} else {
seq = tailSeq + index + 1
}
if seq < headSeq || seq > tailSeq {
return errListIndex
}
sk := db.lEncodeListKey(key, seq)
t.Put(sk, value)
err = t.Commit()
return err
}
// LRange gets the value of list at range.
func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
var headSeq int32
var llen int32
var err error
metaKey := db.lEncodeMetaKey(key)
it := db.bucket.NewIterator()
defer it.Close()
if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil {
return nil, err
}
if start < 0 {
start = llen + start
}
if stop < 0 {
stop = llen + stop
}
if start < 0 {
start = 0
}
if start > stop || start >= llen {
return [][]byte{}, nil
}
if stop >= llen {
stop = llen - 1
}
limit := (stop - start) + 1
headSeq += start
v := make([][]byte, 0, limit)
startKey := db.lEncodeListKey(key, headSeq)
rit := store.NewRangeLimitIterator(it,
&store.Range{
Min: startKey,
Max: nil,
Type: store.RangeClose},
&store.Limit{
Offset: 0,
Count: int(limit)})
for ; rit.Valid(); rit.Next() {
v = append(v, rit.Value())
}
return v, nil
}
// RPop rpops the value.
func (db *DB) RPop(key []byte) ([]byte, error) {
return db.lpop(key, listTailSeq)
}
// RPush rpushs the value .
func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) {
return db.lpush(key, listTailSeq, args...)
}
// LClear clears the list.
func (db *DB) LClear(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.listBatch
t.Lock()
defer t.Unlock()
num := db.lDelete(t, key)
db.rmExpire(t, ListType, key)
err := t.Commit()
return num, err
}
// LMclear clears multi lists.
func (db *DB) LMclear(keys ...[]byte) (int64, error) {
t := db.listBatch
t.Lock()
defer t.Unlock()
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return 0, err
}
db.lDelete(t, key)
db.rmExpire(t, ListType, key)
}
err := t.Commit()
return int64(len(keys)), err
}
func (db *DB) lFlush() (drop int64, err error) {
t := db.listBatch
t.Lock()
defer t.Unlock()
return db.flushType(t, ListType)
}
// LExpire expires the list.
func (db *DB) LExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 {
return 0, errExpireValue
}
return db.lExpireAt(key, time.Now().Unix()+duration)
}
// LExpireAt expires the list at when.
func (db *DB) LExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() {
return 0, errExpireValue
}
return db.lExpireAt(key, when)
}
// LTTL gets the TTL of list.
func (db *DB) LTTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return -1, err
}
return db.ttl(ListType, key)
}
// LPersist removes the TTL of list.
func (db *DB) LPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.listBatch
t.Lock()
defer t.Unlock()
n, err := db.rmExpire(t, ListType, key)
if err != nil {
return 0, err
}
err = t.Commit()
return n, err
}
func (db *DB) lEncodeMinKey() []byte {
return db.lEncodeMetaKey(nil)
}
func (db *DB) lEncodeMaxKey() []byte {
ek := db.lEncodeMetaKey(nil)
ek[len(ek)-1] = LMetaType + 1
return ek
}
// BLPop pops the list with block way.
func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) {
return db.lblockPop(keys, listHeadSeq, timeout)
}
// BRPop bpops the list with block way.
func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) {
return db.lblockPop(keys, listTailSeq, timeout)
}
// LKeyExists check list existed or not.
func (db *DB) LKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
sk := db.lEncodeMetaKey(key)
v, err := db.bucket.Get(sk)
if v != nil && err == nil {
return 1, nil
}
return 0, err
}
func (db *DB) lblockPop(keys [][]byte, whereSeq int32, timeout time.Duration) ([]interface{}, error) {
for {
var ctx context.Context
var cancel context.CancelFunc
if timeout > 0 {
ctx, cancel = context.WithTimeout(context.Background(), timeout)
} else {
ctx, cancel = context.WithCancel(context.Background())
}
for _, key := range keys {
v, err := db.lbkeys.popOrWait(db, key, whereSeq, cancel)
if err != nil {
cancel()
return nil, err
} else if v != nil {
cancel()
return []interface{}{key, v}, nil
}
}
//blocking wait
<-ctx.Done()
cancel()
//if ctx.Err() is a deadline exceeded (timeout) we return
//otherwise we try to pop one of the keys again.
if ctx.Err() == context.DeadlineExceeded {
return nil, nil
}
}
}
func (db *DB) lSignalAsReady(key []byte) {
db.lbkeys.signal(key)
}
type lBlockKeys struct {
sync.Mutex
keys map[string]*list.List
}
func newLBlockKeys() *lBlockKeys {
l := new(lBlockKeys)
l.keys = make(map[string]*list.List)
return l
}
func (l *lBlockKeys) signal(key []byte) {
l.Lock()
defer l.Unlock()
s := hack.String(key)
fns, ok := l.keys[s]
if !ok {
return
}
for e := fns.Front(); e != nil; e = e.Next() {
fn := e.Value.(context.CancelFunc)
fn()
}
delete(l.keys, s)
}
func (l *lBlockKeys) popOrWait(db *DB, key []byte, whereSeq int32, fn context.CancelFunc) ([]interface{}, error) {
v, err := db.lpop(key, whereSeq)
if err != nil {
return nil, err
} else if v != nil {
return []interface{}{key, v}, nil
}
l.Lock()
s := hack.String(key)
chs, ok := l.keys[s]
if !ok {
chs = list.New()
l.keys[s] = chs
}
chs.PushBack(fn)
l.Unlock()
return nil, nil
}

644
vendor/github.com/siddontang/ledisdb/ledis/t_set.go generated vendored Normal file
View File

@ -0,0 +1,644 @@
package ledis
import (
"encoding/binary"
"errors"
"time"
"github.com/siddontang/go/hack"
"github.com/siddontang/ledisdb/store"
)
var errSetKey = errors.New("invalid set key")
var errSSizeKey = errors.New("invalid ssize key")
// For set operation type.
const (
setStartSep byte = ':'
setStopSep byte = setStartSep + 1
UnionType byte = 51
DiffType byte = 52
InterType byte = 53
)
func checkSetKMSize(key []byte, member []byte) error {
if len(key) > MaxKeySize || len(key) == 0 {
return errKeySize
} else if len(member) > MaxSetMemberSize || len(member) == 0 {
return errSetMemberSize
}
return nil
}
func (db *DB) sEncodeSizeKey(key []byte) []byte {
buf := make([]byte, len(key)+1+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
buf[pos] = SSizeType
pos++
copy(buf[pos:], key)
return buf
}
func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) {
pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, err
}
if pos+1 > len(ek) || ek[pos] != SSizeType {
return nil, errSSizeKey
}
pos++
return ek[pos:], nil
}
func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte {
buf := make([]byte, len(key)+len(member)+1+1+2+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
buf[pos] = SetType
pos++
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
pos += 2
copy(buf[pos:], key)
pos += len(key)
buf[pos] = setStartSep
pos++
copy(buf[pos:], member)
return buf
}
func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) {
pos, err := db.checkKeyIndex(ek)
if err != nil {
return nil, nil, err
}
if pos+1 > len(ek) || ek[pos] != SetType {
return nil, nil, errSetKey
}
pos++
if pos+2 > len(ek) {
return nil, nil, errSetKey
}
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
pos += 2
if keyLen+pos > len(ek) {
return nil, nil, errSetKey
}
key := ek[pos : pos+keyLen]
pos += keyLen
if ek[pos] != hashStartSep {
return nil, nil, errSetKey
}
pos++
member := ek[pos:]
return key, member, nil
}
func (db *DB) sEncodeStartKey(key []byte) []byte {
return db.sEncodeSetKey(key, nil)
}
func (db *DB) sEncodeStopKey(key []byte) []byte {
k := db.sEncodeSetKey(key, nil)
k[len(k)-1] = setStopSep
return k
}
func (db *DB) sFlush() (drop int64, err error) {
t := db.setBatch
t.Lock()
defer t.Unlock()
return db.flushType(t, SetType)
}
func (db *DB) sDelete(t *batch, key []byte) int64 {
sk := db.sEncodeSizeKey(key)
start := db.sEncodeStartKey(key)
stop := db.sEncodeStopKey(key)
var num int64
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
for ; it.Valid(); it.Next() {
t.Delete(it.RawKey())
num++
}
it.Close()
t.Delete(sk)
return num
}
func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) {
t := db.setBatch
sk := db.sEncodeSizeKey(key)
var err error
var size int64
if size, err = Int64(db.bucket.Get(sk)); err != nil {
return 0, err
}
size += delta
if size <= 0 {
size = 0
t.Delete(sk)
db.rmExpire(t, SetType, key)
} else {
t.Put(sk, PutInt64(size))
}
return size, nil
}
func (db *DB) sExpireAt(key []byte, when int64) (int64, error) {
t := db.setBatch
t.Lock()
defer t.Unlock()
if scnt, err := db.SCard(key); err != nil || scnt == 0 {
return 0, err
}
db.expireAt(t, SetType, key, when)
if err := t.Commit(); err != nil {
return 0, err
}
return 1, nil
}
func (db *DB) sSetItem(key []byte, member []byte) (int64, error) {
t := db.setBatch
ek := db.sEncodeSetKey(key, member)
var n int64 = 1
if v, _ := db.bucket.Get(ek); v != nil {
n = 0
} else {
if _, err := db.sIncrSize(key, 1); err != nil {
return 0, err
}
}
t.Put(ek, nil)
return n, nil
}
// SAdd adds the value to the set.
func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) {
t := db.setBatch
t.Lock()
defer t.Unlock()
var err error
var ek []byte
var num int64
for i := 0; i < len(args); i++ {
if err := checkSetKMSize(key, args[i]); err != nil {
return 0, err
}
ek = db.sEncodeSetKey(key, args[i])
if v, err := db.bucket.Get(ek); err != nil {
return 0, err
} else if v == nil {
num++
}
t.Put(ek, nil)
}
if _, err = db.sIncrSize(key, num); err != nil {
return 0, err
}
err = t.Commit()
return num, err
}
// SCard gets the size of set.
func (db *DB) SCard(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
sk := db.sEncodeSizeKey(key)
return Int64(db.bucket.Get(sk))
}
func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) {
destMap := make(map[string]bool)
members, err := db.SMembers(keys[0])
if err != nil {
return nil, err
}
for _, m := range members {
destMap[hack.String(m)] = true
}
for _, k := range keys[1:] {
members, err := db.SMembers(k)
if err != nil {
return nil, err
}
for _, m := range members {
if _, ok := destMap[hack.String(m)]; !ok {
continue
} else if ok {
delete(destMap, hack.String(m))
}
}
// O - A = O, O is zero set.
if len(destMap) == 0 {
return nil, nil
}
}
slice := make([][]byte, len(destMap))
idx := 0
for k, v := range destMap {
if !v {
continue
}
slice[idx] = []byte(k)
idx++
}
return slice, nil
}
// SDiff gets the different of sets.
func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) {
v, err := db.sDiffGeneric(keys...)
return v, err
}
// SDiffStore gets the different of sets and stores to dest set.
func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) {
n, err := db.sStoreGeneric(dstKey, DiffType, keys...)
return n, err
}
// SKeyExists checks whether set existed or not.
func (db *DB) SKeyExists(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
sk := db.sEncodeSizeKey(key)
v, err := db.bucket.Get(sk)
if v != nil && err == nil {
return 1, nil
}
return 0, err
}
func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) {
destMap := make(map[string]bool)
members, err := db.SMembers(keys[0])
if err != nil {
return nil, err
}
for _, m := range members {
destMap[hack.String(m)] = true
}
for _, key := range keys[1:] {
if err := checkKeySize(key); err != nil {
return nil, err
}
members, err := db.SMembers(key)
if err != nil {
return nil, err
} else if len(members) == 0 {
return nil, err
}
tempMap := make(map[string]bool)
for _, member := range members {
if err := checkKeySize(member); err != nil {
return nil, err
}
if _, ok := destMap[hack.String(member)]; ok {
tempMap[hack.String(member)] = true //mark this item as selected
}
}
destMap = tempMap //reduce the size of the result set
if len(destMap) == 0 {
return nil, nil
}
}
slice := make([][]byte, len(destMap))
idx := 0
for k, v := range destMap {
if !v {
continue
}
slice[idx] = []byte(k)
idx++
}
return slice, nil
}
// SInter intersects the sets.
func (db *DB) SInter(keys ...[]byte) ([][]byte, error) {
v, err := db.sInterGeneric(keys...)
return v, err
}
// SInterStore intersects the sets and stores to dest set.
func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) {
n, err := db.sStoreGeneric(dstKey, InterType, keys...)
return n, err
}
// SIsMember checks member in set.
func (db *DB) SIsMember(key []byte, member []byte) (int64, error) {
ek := db.sEncodeSetKey(key, member)
var n int64 = 1
if v, err := db.bucket.Get(ek); err != nil {
return 0, err
} else if v == nil {
n = 0
}
return n, nil
}
// SMembers gets members of set.
func (db *DB) SMembers(key []byte) ([][]byte, error) {
if err := checkKeySize(key); err != nil {
return nil, err
}
start := db.sEncodeStartKey(key)
stop := db.sEncodeStopKey(key)
v := make([][]byte, 0, 16)
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
defer it.Close()
for ; it.Valid(); it.Next() {
_, m, err := db.sDecodeSetKey(it.Key())
if err != nil {
return nil, err
}
v = append(v, m)
}
return v, nil
}
// SRem removes the members of set.
func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) {
t := db.setBatch
t.Lock()
defer t.Unlock()
var ek []byte
var v []byte
var err error
it := db.bucket.NewIterator()
defer it.Close()
var num int64
for i := 0; i < len(args); i++ {
if err := checkSetKMSize(key, args[i]); err != nil {
return 0, err
}
ek = db.sEncodeSetKey(key, args[i])
v = it.RawFind(ek)
if v == nil {
continue
} else {
num++
t.Delete(ek)
}
}
if _, err = db.sIncrSize(key, -num); err != nil {
return 0, err
}
err = t.Commit()
return num, err
}
func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) {
dstMap := make(map[string]bool)
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return nil, err
}
members, err := db.SMembers(key)
if err != nil {
return nil, err
}
for _, member := range members {
dstMap[hack.String(member)] = true
}
}
slice := make([][]byte, len(dstMap))
idx := 0
for k, v := range dstMap {
if !v {
continue
}
slice[idx] = []byte(k)
idx++
}
return slice, nil
}
// SUnion unions the sets.
func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) {
v, err := db.sUnionGeneric(keys...)
return v, err
}
// SUnionStore unions the sets and stores to the dest set.
func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) {
n, err := db.sStoreGeneric(dstKey, UnionType, keys...)
return n, err
}
func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) {
if err := checkKeySize(dstKey); err != nil {
return 0, err
}
t := db.setBatch
t.Lock()
defer t.Unlock()
db.sDelete(t, dstKey)
var err error
var ek []byte
var v [][]byte
switch optType {
case UnionType:
v, err = db.sUnionGeneric(keys...)
case DiffType:
v, err = db.sDiffGeneric(keys...)
case InterType:
v, err = db.sInterGeneric(keys...)
}
if err != nil {
return 0, err
}
for _, m := range v {
if err := checkSetKMSize(dstKey, m); err != nil {
return 0, err
}
ek = db.sEncodeSetKey(dstKey, m)
if _, err := db.bucket.Get(ek); err != nil {
return 0, err
}
t.Put(ek, nil)
}
var n = int64(len(v))
sk := db.sEncodeSizeKey(dstKey)
t.Put(sk, PutInt64(n))
if err = t.Commit(); err != nil {
return 0, err
}
return n, nil
}
// SClear clears the set.
func (db *DB) SClear(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.setBatch
t.Lock()
defer t.Unlock()
num := db.sDelete(t, key)
db.rmExpire(t, SetType, key)
err := t.Commit()
return num, err
}
// SMclear clears multi sets.
func (db *DB) SMclear(keys ...[]byte) (int64, error) {
t := db.setBatch
t.Lock()
defer t.Unlock()
for _, key := range keys {
if err := checkKeySize(key); err != nil {
return 0, err
}
db.sDelete(t, key)
db.rmExpire(t, SetType, key)
}
err := t.Commit()
return int64(len(keys)), err
}
// SExpire expries the set.
func (db *DB) SExpire(key []byte, duration int64) (int64, error) {
if duration <= 0 {
return 0, errExpireValue
}
return db.sExpireAt(key, time.Now().Unix()+duration)
}
// SExpireAt expires the set at when.
func (db *DB) SExpireAt(key []byte, when int64) (int64, error) {
if when <= time.Now().Unix() {
return 0, errExpireValue
}
return db.sExpireAt(key, when)
}
// STTL gets the TTL of set.
func (db *DB) STTL(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return -1, err
}
return db.ttl(SetType, key)
}
// SPersist removes the TTL of set.
func (db *DB) SPersist(key []byte) (int64, error) {
if err := checkKeySize(key); err != nil {
return 0, err
}
t := db.setBatch
t.Lock()
defer t.Unlock()
n, err := db.rmExpire(t, SetType, key)
if err != nil {
return 0, err
}
err = t.Commit()
return n, err
}

217
vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go generated vendored Normal file
View File

@ -0,0 +1,217 @@
package ledis
import (
"encoding/binary"
"errors"
"sync"
"time"
"github.com/siddontang/ledisdb/store"
)
var (
errExpMetaKey = errors.New("invalid expire meta key")
errExpTimeKey = errors.New("invalid expire time key")
)
type onExpired func(*batch, []byte) int64
type ttlChecker struct {
sync.Mutex
db *DB
txs []*batch
cbs []onExpired
//next check time
nc int64
}
var errExpType = errors.New("invalid expire type")
func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {
buf := make([]byte, len(key)+10+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
buf[pos] = ExpTimeType
pos++
binary.BigEndian.PutUint64(buf[pos:], uint64(when))
pos += 8
buf[pos] = dataType
pos++
copy(buf[pos:], key)
return buf
}
func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte {
buf := make([]byte, len(key)+2+len(db.indexVarBuf))
pos := copy(buf, db.indexVarBuf)
buf[pos] = ExpMetaType
pos++
buf[pos] = dataType
pos++
copy(buf[pos:], key)
return buf
}
func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) {
pos, err := db.checkKeyIndex(mk)
if err != nil {
return 0, nil, err
}
if pos+2 > len(mk) || mk[pos] != ExpMetaType {
return 0, nil, errExpMetaKey
}
return mk[pos+1], mk[pos+2:], nil
}
func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {
pos, err := db.checkKeyIndex(tk)
if err != nil {
return 0, nil, 0, err
}
if pos+10 > len(tk) || tk[pos] != ExpTimeType {
return 0, nil, 0, errExpTimeKey
}
return tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil
}
func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) {
db.expireAt(t, dataType, key, time.Now().Unix()+duration)
}
func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) {
mk := db.expEncodeMetaKey(dataType, key)
tk := db.expEncodeTimeKey(dataType, key, when)
t.Put(tk, mk)
t.Put(mk, PutInt64(when))
db.ttlChecker.setNextCheckTime(when, false)
}
func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) {
mk := db.expEncodeMetaKey(dataType, key)
if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 {
t = -1
} else {
t -= time.Now().Unix()
if t <= 0 {
t = -1
}
// if t == -1 : to remove ????
}
return t, err
}
func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) {
mk := db.expEncodeMetaKey(dataType, key)
v, err := db.bucket.Get(mk)
if err != nil {
return 0, err
} else if v == nil {
return 0, nil
}
when, err2 := Int64(v, nil)
if err2 != nil {
return 0, err2
}
tk := db.expEncodeTimeKey(dataType, key, when)
t.Delete(mk)
t.Delete(tk)
return 1, nil
}
func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) {
c.txs[dataType] = t
c.cbs[dataType] = f
}
func (c *ttlChecker) setNextCheckTime(when int64, force bool) {
c.Lock()
if force {
c.nc = when
} else if c.nc > when {
c.nc = when
}
c.Unlock()
}
func (c *ttlChecker) check() {
now := time.Now().Unix()
c.Lock()
nc := c.nc
c.Unlock()
if now < nc {
return
}
nc = now + 3600
db := c.db
dbGet := db.bucket.Get
minKey := db.expEncodeTimeKey(NoneType, nil, 0)
maxKey := db.expEncodeTimeKey(maxDataType, nil, nc)
it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1)
for ; it.Valid(); it.Next() {
tk := it.RawKey()
mk := it.RawValue()
dt, k, nt, err := db.expDecodeTimeKey(tk)
if err != nil {
continue
}
if nt > now {
//the next ttl check time is nt!
nc = nt
break
}
t := c.txs[dt]
cb := c.cbs[dt]
if tk == nil || cb == nil {
continue
}
t.Lock()
if exp, err := Int64(dbGet(mk)); err == nil {
// check expire again
if exp <= now {
cb(t, k)
t.Delete(tk)
t.Delete(mk)
t.Commit()
}
}
t.Unlock()
}
it.Close()
c.setNextCheckTime(nc, true)
return
}

1093
vendor/github.com/siddontang/ledisdb/ledis/t_zset.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

103
vendor/github.com/siddontang/ledisdb/ledis/util.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
package ledis
import (
"encoding/binary"
"errors"
"strconv"
"github.com/siddontang/go/hack"
)
var errIntNumber = errors.New("invalid integer")
/*
Below I forget why I use little endian to store int.
Maybe I was foolish at that time.
*/
// Int64 gets 64 integer with the little endian format.
func Int64(v []byte, err error) (int64, error) {
if err != nil {
return 0, err
} else if v == nil || len(v) == 0 {
return 0, nil
} else if len(v) != 8 {
return 0, errIntNumber
}
return int64(binary.LittleEndian.Uint64(v)), nil
}
// Uint64 gets unsigned 64 integer.
func Uint64(v []byte, err error) (uint64, error) {
if err != nil {
return 0, err
} else if v == nil || len(v) == 0 {
return 0, nil
} else if len(v) != 8 {
return 0, errIntNumber
}
return binary.LittleEndian.Uint64(v), nil
}
// PutInt64 puts the 64 integer.
func PutInt64(v int64) []byte {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(v))
return b
}
// StrInt64 gets the 64 integer with string format.
func StrInt64(v []byte, err error) (int64, error) {
if err != nil {
return 0, err
} else if v == nil {
return 0, nil
} else {
return strconv.ParseInt(hack.String(v), 10, 64)
}
}
// StrUint64 gets the unsigned 64 integer with string format.
func StrUint64(v []byte, err error) (uint64, error) {
if err != nil {
return 0, err
} else if v == nil {
return 0, nil
} else {
return strconv.ParseUint(hack.String(v), 10, 64)
}
}
// StrInt32 gets the 32 integer with string format.
func StrInt32(v []byte, err error) (int32, error) {
if err != nil {
return 0, err
} else if v == nil {
return 0, nil
} else {
res, err := strconv.ParseInt(hack.String(v), 10, 32)
return int32(res), err
}
}
// StrInt8 ets the 8 integer with string format.
func StrInt8(v []byte, err error) (int8, error) {
if err != nil {
return 0, err
} else if v == nil {
return 0, nil
} else {
res, err := strconv.ParseInt(hack.String(v), 10, 8)
return int8(res), err
}
}
// AsyncNotify notices the channel.
func AsyncNotify(ch chan struct{}) {
select {
case ch <- struct{}{}:
default:
}
}