1
0
mirror of https://github.com/astaxie/beego.git synced 2025-07-02 03:10:20 +00:00

add vendor

This commit is contained in:
astaxie
2018-07-30 12:05:51 +08:00
parent d55f54a8ab
commit 48acfa08be
496 changed files with 327583 additions and 0 deletions

169
vendor/github.com/siddontang/ledisdb/store/db.go generated vendored Normal file
View File

@ -0,0 +1,169 @@
package store
import (
"sync"
"time"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
)
type DB struct {
db driver.IDB
name string
st *Stat
cfg *config.Config
lastCommit time.Time
m sync.Mutex
}
func (db *DB) Close() error {
return db.db.Close()
}
func (db *DB) String() string {
return db.name
}
func (db *DB) NewIterator() *Iterator {
db.st.IterNum.Add(1)
it := new(Iterator)
it.it = db.db.NewIterator()
it.st = db.st
return it
}
func (db *DB) Get(key []byte) ([]byte, error) {
t := time.Now()
v, err := db.db.Get(key)
db.st.statGet(v, err)
db.st.GetTotalTime.Add(time.Now().Sub(t))
return v, err
}
func (db *DB) Put(key []byte, value []byte) error {
db.st.PutNum.Add(1)
if db.needSyncCommit() {
return db.db.SyncPut(key, value)
} else {
return db.db.Put(key, value)
}
}
func (db *DB) Delete(key []byte) error {
db.st.DeleteNum.Add(1)
if db.needSyncCommit() {
return db.db.SyncDelete(key)
} else {
return db.db.Delete(key)
}
}
func (db *DB) NewWriteBatch() *WriteBatch {
db.st.BatchNum.Add(1)
wb := new(WriteBatch)
wb.wb = db.db.NewWriteBatch()
wb.st = db.st
wb.db = db
return wb
}
func (db *DB) NewSnapshot() (*Snapshot, error) {
db.st.SnapshotNum.Add(1)
var err error
s := &Snapshot{}
if s.ISnapshot, err = db.db.NewSnapshot(); err != nil {
return nil, err
}
s.st = db.st
return s, nil
}
func (db *DB) Compact() error {
db.st.CompactNum.Add(1)
t := time.Now()
err := db.db.Compact()
db.st.CompactTotalTime.Add(time.Now().Sub(t))
return err
}
func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
}
func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
}
//count < 0, unlimit.
//
//offset must >= 0, if < 0, will get nothing.
func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
}
//count < 0, unlimit.
//
//offset must >= 0, if < 0, will get nothing.
func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
}
func (db *DB) Stat() *Stat {
return db.st
}
func (db *DB) needSyncCommit() bool {
if db.cfg.DBSyncCommit == 0 {
return false
} else if db.cfg.DBSyncCommit == 2 {
return true
} else {
n := time.Now()
need := false
db.m.Lock()
if n.Sub(db.lastCommit) > time.Second {
need = true
}
db.lastCommit = n
db.m.Unlock()
return need
}
}
func (db *DB) GetSlice(key []byte) (Slice, error) {
if d, ok := db.db.(driver.ISliceGeter); ok {
t := time.Now()
v, err := d.GetSlice(key)
db.st.statGet(v, err)
db.st.GetTotalTime.Add(time.Now().Sub(t))
return v, err
} else {
v, err := db.Get(key)
if err != nil {
return nil, err
} else if v == nil {
return nil, nil
} else {
return driver.GoSlice(v), nil
}
}
}

View File

@ -0,0 +1,57 @@
package driver
type IDB interface {
Close() error
Get(key []byte) ([]byte, error)
Put(key []byte, value []byte) error
Delete(key []byte) error
SyncPut(key []byte, value []byte) error
SyncDelete(key []byte) error
NewIterator() IIterator
NewWriteBatch() IWriteBatch
NewSnapshot() (ISnapshot, error)
Compact() error
}
type ISnapshot interface {
Get(key []byte) ([]byte, error)
NewIterator() IIterator
Close()
}
type IIterator interface {
Close() error
First()
Last()
Seek(key []byte)
Next()
Prev()
Valid() bool
Key() []byte
Value() []byte
}
type IWriteBatch interface {
Put(key []byte, value []byte)
Delete(key []byte)
Commit() error
SyncCommit() error
Rollback() error
Data() []byte
Close()
}
type ISliceGeter interface {
GetSlice(key []byte) (ISlice, error)
}

View File

@ -0,0 +1,21 @@
package driver
type ISlice interface {
Data() []byte
Size() int
Free()
}
type GoSlice []byte
func (s GoSlice) Data() []byte {
return []byte(s)
}
func (s GoSlice) Size() int {
return len(s)
}
func (s GoSlice) Free() {
}

View File

@ -0,0 +1,46 @@
package driver
import (
"fmt"
"github.com/siddontang/ledisdb/config"
)
type Store interface {
String() string
Open(path string, cfg *config.Config) (IDB, error)
Repair(path string, cfg *config.Config) error
}
var dbs = map[string]Store{}
func Register(s Store) {
name := s.String()
if _, ok := dbs[name]; ok {
panic(fmt.Errorf("store %s is registered", s))
}
dbs[name] = s
}
func ListStores() []string {
s := []string{}
for k := range dbs {
s = append(s, k)
}
return s
}
func GetStore(cfg *config.Config) (Store, error) {
if len(cfg.DBName) == 0 {
cfg.DBName = config.DefaultDBName
}
s, ok := dbs[cfg.DBName]
if !ok {
return nil, fmt.Errorf("store %s is not registered", cfg.DBName)
}
return s, nil
}

View File

@ -0,0 +1,39 @@
package goleveldb
import (
"github.com/syndtr/goleveldb/leveldb"
)
type WriteBatch struct {
db *DB
wbatch *leveldb.Batch
}
func (w *WriteBatch) Put(key, value []byte) {
w.wbatch.Put(key, value)
}
func (w *WriteBatch) Delete(key []byte) {
w.wbatch.Delete(key)
}
func (w *WriteBatch) Commit() error {
return w.db.db.Write(w.wbatch, nil)
}
func (w *WriteBatch) SyncCommit() error {
return w.db.db.Write(w.wbatch, w.db.syncOpts)
}
func (w *WriteBatch) Rollback() error {
w.wbatch.Reset()
return nil
}
func (w *WriteBatch) Close() {
w.wbatch.Reset()
}
func (w *WriteBatch) Data() []byte {
return w.wbatch.Dump()
}

View File

@ -0,0 +1,4 @@
package goleveldb
const DBName = "goleveldb"
const MemDBName = "memory"

View File

@ -0,0 +1,204 @@
package goleveldb
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/cache"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/storage"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
"os"
)
const defaultFilterBits int = 10
type Store struct {
}
func (s Store) String() string {
return DBName
}
type MemStore struct {
}
func (s MemStore) String() string {
return MemDBName
}
type DB struct {
path string
cfg *config.LevelDBConfig
db *leveldb.DB
opts *opt.Options
iteratorOpts *opt.ReadOptions
syncOpts *opt.WriteOptions
cache cache.Cache
filter filter.Filter
}
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
db := new(DB)
db.path = path
db.cfg = &cfg.LevelDB
db.initOpts()
var err error
db.db, err = leveldb.OpenFile(db.path, db.opts)
if err != nil {
return nil, err
}
return db, nil
}
func (s Store) Repair(path string, cfg *config.Config) error {
db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB))
if err != nil {
return err
}
db.Close()
return nil
}
func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) {
db := new(DB)
db.path = path
db.cfg = &cfg.LevelDB
db.initOpts()
var err error
db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts)
if err != nil {
return nil, err
}
return db, nil
}
func (s MemStore) Repair(path string, cfg *config.Config) error {
return nil
}
func (db *DB) initOpts() {
db.opts = newOptions(db.cfg)
db.iteratorOpts = &opt.ReadOptions{}
db.iteratorOpts.DontFillCache = true
db.syncOpts = &opt.WriteOptions{}
db.syncOpts.Sync = true
}
func newOptions(cfg *config.LevelDBConfig) *opt.Options {
opts := &opt.Options{}
opts.ErrorIfMissing = false
opts.BlockCacheCapacity = cfg.CacheSize
//we must use bloomfilter
opts.Filter = filter.NewBloomFilter(defaultFilterBits)
if !cfg.Compression {
opts.Compression = opt.NoCompression
} else {
opts.Compression = opt.SnappyCompression
}
opts.BlockSize = cfg.BlockSize
opts.WriteBuffer = cfg.WriteBufferSize
opts.OpenFilesCacheCapacity = cfg.MaxOpenFiles
//here we use default value, later add config support
opts.CompactionTableSize = 32 * 1024 * 1024
opts.WriteL0SlowdownTrigger = 16
opts.WriteL0PauseTrigger = 64
return opts
}
func (db *DB) Close() error {
return db.db.Close()
}
func (db *DB) Put(key, value []byte) error {
return db.db.Put(key, value, nil)
}
func (db *DB) Get(key []byte) ([]byte, error) {
v, err := db.db.Get(key, nil)
if err == leveldb.ErrNotFound {
return nil, nil
}
return v, nil
}
func (db *DB) Delete(key []byte) error {
return db.db.Delete(key, nil)
}
func (db *DB) SyncPut(key []byte, value []byte) error {
return db.db.Put(key, value, db.syncOpts)
}
func (db *DB) SyncDelete(key []byte) error {
return db.db.Delete(key, db.syncOpts)
}
func (db *DB) NewWriteBatch() driver.IWriteBatch {
wb := &WriteBatch{
db: db,
wbatch: new(leveldb.Batch),
}
return wb
}
func (db *DB) NewIterator() driver.IIterator {
it := &Iterator{
db.db.NewIterator(nil, db.iteratorOpts),
}
return it
}
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
snapshot, err := db.db.GetSnapshot()
if err != nil {
return nil, err
}
s := &Snapshot{
db: db,
snp: snapshot,
}
return s, nil
}
func (db *DB) Compact() error {
return db.db.CompactRange(util.Range{nil, nil})
}
func init() {
driver.Register(Store{})
driver.Register(MemStore{})
}

View File

@ -0,0 +1,49 @@
package goleveldb
import (
"github.com/syndtr/goleveldb/leveldb/iterator"
)
type Iterator struct {
it iterator.Iterator
}
func (it *Iterator) Key() []byte {
return it.it.Key()
}
func (it *Iterator) Value() []byte {
return it.it.Value()
}
func (it *Iterator) Close() error {
if it.it != nil {
it.it.Release()
it.it = nil
}
return nil
}
func (it *Iterator) Valid() bool {
return it.it.Valid()
}
func (it *Iterator) Next() {
it.it.Next()
}
func (it *Iterator) Prev() {
it.it.Prev()
}
func (it *Iterator) First() {
it.it.First()
}
func (it *Iterator) Last() {
it.it.Last()
}
func (it *Iterator) Seek(key []byte) {
it.it.Seek(key)
}

View File

@ -0,0 +1,26 @@
package goleveldb
import (
"github.com/siddontang/ledisdb/store/driver"
"github.com/syndtr/goleveldb/leveldb"
)
type Snapshot struct {
db *DB
snp *leveldb.Snapshot
}
func (s *Snapshot) Get(key []byte) ([]byte, error) {
return s.snp.Get(key, s.db.iteratorOpts)
}
func (s *Snapshot) NewIterator() driver.IIterator {
it := &Iterator{
s.snp.NewIterator(nil, s.db.iteratorOpts),
}
return it
}
func (s *Snapshot) Close() {
s.snp.Release()
}

334
vendor/github.com/siddontang/ledisdb/store/iterator.go generated vendored Normal file
View File

@ -0,0 +1,334 @@
package store
import (
"bytes"
"github.com/siddontang/ledisdb/store/driver"
)
const (
IteratorForward uint8 = 0
IteratorBackward uint8 = 1
)
const (
RangeClose uint8 = 0x00
RangeLOpen uint8 = 0x01
RangeROpen uint8 = 0x10
RangeOpen uint8 = 0x11
)
// min must less or equal than max
//
// range type:
//
// close: [min, max]
// open: (min, max)
// lopen: (min, max]
// ropen: [min, max)
//
type Range struct {
Min []byte
Max []byte
Type uint8
}
type Limit struct {
Offset int
Count int
}
type Iterator struct {
it driver.IIterator
st *Stat
}
// Returns a copy of key.
func (it *Iterator) Key() []byte {
k := it.it.Key()
if k == nil {
return nil
}
return append([]byte{}, k...)
}
// Returns a copy of value.
func (it *Iterator) Value() []byte {
v := it.it.Value()
if v == nil {
return nil
}
return append([]byte{}, v...)
}
// Returns a reference of key.
// you must be careful that it will be changed after next iterate.
func (it *Iterator) RawKey() []byte {
return it.it.Key()
}
// Returns a reference of value.
// you must be careful that it will be changed after next iterate.
func (it *Iterator) RawValue() []byte {
return it.it.Value()
}
// Copy key to b, if b len is small or nil, returns a new one.
func (it *Iterator) BufKey(b []byte) []byte {
k := it.RawKey()
if k == nil {
return nil
}
if b == nil {
b = []byte{}
}
b = b[0:0]
return append(b, k...)
}
// Copy value to b, if b len is small or nil, returns a new one.
func (it *Iterator) BufValue(b []byte) []byte {
v := it.RawValue()
if v == nil {
return nil
}
if b == nil {
b = []byte{}
}
b = b[0:0]
return append(b, v...)
}
func (it *Iterator) Close() {
if it.it != nil {
it.st.IterCloseNum.Add(1)
it.it.Close()
it.it = nil
}
}
func (it *Iterator) Valid() bool {
return it.it.Valid()
}
func (it *Iterator) Next() {
it.st.IterSeekNum.Add(1)
it.it.Next()
}
func (it *Iterator) Prev() {
it.st.IterSeekNum.Add(1)
it.it.Prev()
}
func (it *Iterator) SeekToFirst() {
it.st.IterSeekNum.Add(1)
it.it.First()
}
func (it *Iterator) SeekToLast() {
it.st.IterSeekNum.Add(1)
it.it.Last()
}
func (it *Iterator) Seek(key []byte) {
it.st.IterSeekNum.Add(1)
it.it.Seek(key)
}
// Finds by key, if not found, nil returns.
func (it *Iterator) Find(key []byte) []byte {
it.Seek(key)
if it.Valid() {
k := it.RawKey()
if k == nil {
return nil
} else if bytes.Equal(k, key) {
return it.Value()
}
}
return nil
}
// Finds by key, if not found, nil returns, else a reference of value returns.
// you must be careful that it will be changed after next iterate.
func (it *Iterator) RawFind(key []byte) []byte {
it.Seek(key)
if it.Valid() {
k := it.RawKey()
if k == nil {
return nil
} else if bytes.Equal(k, key) {
return it.RawValue()
}
}
return nil
}
type RangeLimitIterator struct {
it *Iterator
r *Range
l *Limit
step int
//0 for IteratorForward, 1 for IteratorBackward
direction uint8
}
func (it *RangeLimitIterator) Key() []byte {
return it.it.Key()
}
func (it *RangeLimitIterator) Value() []byte {
return it.it.Value()
}
func (it *RangeLimitIterator) RawKey() []byte {
return it.it.RawKey()
}
func (it *RangeLimitIterator) RawValue() []byte {
return it.it.RawValue()
}
func (it *RangeLimitIterator) BufKey(b []byte) []byte {
return it.it.BufKey(b)
}
func (it *RangeLimitIterator) BufValue(b []byte) []byte {
return it.it.BufValue(b)
}
func (it *RangeLimitIterator) Valid() bool {
if it.l.Offset < 0 {
return false
} else if !it.it.Valid() {
return false
} else if it.l.Count >= 0 && it.step >= it.l.Count {
return false
}
if it.direction == IteratorForward {
if it.r.Max != nil {
r := bytes.Compare(it.it.RawKey(), it.r.Max)
if it.r.Type&RangeROpen > 0 {
return !(r >= 0)
} else {
return !(r > 0)
}
}
} else {
if it.r.Min != nil {
r := bytes.Compare(it.it.RawKey(), it.r.Min)
if it.r.Type&RangeLOpen > 0 {
return !(r <= 0)
} else {
return !(r < 0)
}
}
}
return true
}
func (it *RangeLimitIterator) Next() {
it.step++
if it.direction == IteratorForward {
it.it.Next()
} else {
it.it.Prev()
}
}
func (it *RangeLimitIterator) Close() {
it.it.Close()
}
func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator {
return rangeLimitIterator(i, r, l, IteratorForward)
}
func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator {
return rangeLimitIterator(i, r, l, IteratorBackward)
}
func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator {
return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward)
}
func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator {
return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward)
}
func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator {
it := new(RangeLimitIterator)
it.it = i
it.r = r
it.l = l
it.direction = direction
it.step = 0
if l.Offset < 0 {
return it
}
if direction == IteratorForward {
if r.Min == nil {
it.it.SeekToFirst()
} else {
it.it.Seek(r.Min)
if r.Type&RangeLOpen > 0 {
if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) {
it.it.Next()
}
}
}
} else {
if r.Max == nil {
it.it.SeekToLast()
} else {
it.it.Seek(r.Max)
if !it.it.Valid() {
it.it.SeekToLast()
} else {
if !bytes.Equal(it.it.RawKey(), r.Max) {
it.it.Prev()
}
}
if r.Type&RangeROpen > 0 {
if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) {
it.it.Prev()
}
}
}
}
for i := 0; i < l.Offset; i++ {
if it.it.Valid() {
if it.direction == IteratorForward {
it.it.Next()
} else {
it.it.Prev()
}
}
}
return it
}

View File

@ -0,0 +1,99 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include "leveldb/c.h"
// #include "leveldb_ext.h"
import "C"
import (
"unsafe"
"github.com/syndtr/goleveldb/leveldb"
)
type WriteBatch struct {
db *DB
wbatch *C.leveldb_writebatch_t
}
func newWriteBatch(db *DB) *WriteBatch {
w := new(WriteBatch)
w.db = db
w.wbatch = C.leveldb_writebatch_create()
return w
}
func (w *WriteBatch) Close() {
if w.wbatch != nil {
C.leveldb_writebatch_destroy(w.wbatch)
w.wbatch = nil
}
}
func (w *WriteBatch) Put(key, value []byte) {
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.leveldb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
}
func (w *WriteBatch) Delete(key []byte) {
C.leveldb_writebatch_delete(w.wbatch,
(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}
func (w *WriteBatch) Commit() error {
return w.commit(w.db.writeOpts)
}
func (w *WriteBatch) SyncCommit() error {
return w.commit(w.db.syncOpts)
}
func (w *WriteBatch) Rollback() error {
C.leveldb_writebatch_clear(w.wbatch)
return nil
}
func (w *WriteBatch) commit(wb *WriteOptions) error {
var errStr *C.char
C.leveldb_write(w.db.db, wb.Opt, w.wbatch, &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
//export leveldb_writebatch_iterate_put
func leveldb_writebatch_iterate_put(p unsafe.Pointer, k *C.char, klen C.size_t, v *C.char, vlen C.size_t) {
b := (*leveldb.Batch)(p)
key := slice(unsafe.Pointer(k), int(klen))
value := slice(unsafe.Pointer(v), int(vlen))
b.Put(key, value)
}
//export leveldb_writebatch_iterate_delete
func leveldb_writebatch_iterate_delete(p unsafe.Pointer, k *C.char, klen C.size_t) {
b := (*leveldb.Batch)(p)
key := slice(unsafe.Pointer(k), int(klen))
b.Delete(key)
}
func (w *WriteBatch) Data() []byte {
gbatch := leveldb.Batch{}
C.leveldb_writebatch_iterate_ext(w.wbatch,
unsafe.Pointer(&gbatch))
return gbatch.Dump()
}

View File

@ -0,0 +1,20 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include <stdint.h>
// #include "leveldb/c.h"
import "C"
type Cache struct {
Cache *C.leveldb_cache_t
}
func NewLRUCache(capacity int) *Cache {
return &Cache{C.leveldb_cache_create_lru(C.size_t(capacity))}
}
func (c *Cache) Close() {
C.leveldb_cache_destroy(c.Cache)
}

View File

@ -0,0 +1,3 @@
package leveldb
const DBName = "leveldb"

View File

@ -0,0 +1,314 @@
// +build leveldb
// Package leveldb is a wrapper for c++ leveldb
package leveldb
/*
#cgo LDFLAGS: -lleveldb
#include <leveldb/c.h>
#include "leveldb_ext.h"
*/
import "C"
import (
"os"
"runtime"
"unsafe"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
)
const defaultFilterBits int = 10
type Store struct {
}
func (s Store) String() string {
return DBName
}
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
db := new(DB)
db.path = path
db.cfg = &cfg.LevelDB
if err := db.open(); err != nil {
return nil, err
}
return db, nil
}
func (s Store) Repair(path string, cfg *config.Config) error {
db := new(DB)
db.cfg = &cfg.LevelDB
db.path = path
err := db.open()
defer db.Close()
//open ok, do not need repair
if err == nil {
return nil
}
var errStr *C.char
ldbname := C.CString(path)
defer C.leveldb_free(unsafe.Pointer(ldbname))
C.leveldb_repair_db(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
type DB struct {
path string
cfg *config.LevelDBConfig
db *C.leveldb_t
opts *Options
//for default read and write options
readOpts *ReadOptions
writeOpts *WriteOptions
iteratorOpts *ReadOptions
syncOpts *WriteOptions
cache *Cache
filter *FilterPolicy
}
func (db *DB) open() error {
db.initOptions(db.cfg)
var errStr *C.char
ldbname := C.CString(db.path)
defer C.leveldb_free(unsafe.Pointer(ldbname))
db.db = C.leveldb_open(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
db.db = nil
return saveError(errStr)
}
return nil
}
func (db *DB) initOptions(cfg *config.LevelDBConfig) {
opts := NewOptions()
opts.SetCreateIfMissing(true)
db.cache = NewLRUCache(cfg.CacheSize)
opts.SetCache(db.cache)
//we must use bloomfilter
db.filter = NewBloomFilter(defaultFilterBits)
opts.SetFilterPolicy(db.filter)
if !cfg.Compression {
opts.SetCompression(NoCompression)
} else {
opts.SetCompression(SnappyCompression)
}
opts.SetBlockSize(cfg.BlockSize)
opts.SetWriteBufferSize(cfg.WriteBufferSize)
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
opts.SetMaxFileSize(cfg.MaxFileSize)
db.opts = opts
db.readOpts = NewReadOptions()
db.writeOpts = NewWriteOptions()
db.syncOpts = NewWriteOptions()
db.syncOpts.SetSync(true)
db.iteratorOpts = NewReadOptions()
db.iteratorOpts.SetFillCache(false)
}
func (db *DB) Close() error {
if db.db != nil {
C.leveldb_close(db.db)
db.db = nil
}
db.opts.Close()
if db.cache != nil {
db.cache.Close()
}
if db.filter != nil {
db.filter.Close()
}
db.readOpts.Close()
db.writeOpts.Close()
db.iteratorOpts.Close()
return nil
}
func (db *DB) Put(key, value []byte) error {
return db.put(db.writeOpts, key, value)
}
func (db *DB) Get(key []byte) ([]byte, error) {
return db.get(db.readOpts, key)
}
func (db *DB) Delete(key []byte) error {
return db.delete(db.writeOpts, key)
}
func (db *DB) SyncPut(key []byte, value []byte) error {
return db.put(db.syncOpts, key, value)
}
func (db *DB) SyncDelete(key []byte) error {
return db.delete(db.syncOpts, key)
}
func (db *DB) NewWriteBatch() driver.IWriteBatch {
wb := newWriteBatch(db)
runtime.SetFinalizer(wb, func(w *WriteBatch) {
w.Close()
})
return wb
}
func (db *DB) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.leveldb_create_iterator(db.db, db.iteratorOpts.Opt)
return it
}
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
snap := &Snapshot{
db: db,
snap: C.leveldb_create_snapshot(db.db),
readOpts: NewReadOptions(),
iteratorOpts: NewReadOptions(),
}
snap.readOpts.SetSnapshot(snap)
snap.iteratorOpts.SetSnapshot(snap)
snap.iteratorOpts.SetFillCache(false)
return snap, nil
}
func (db *DB) put(wo *WriteOptions, key, value []byte) error {
var errStr *C.char
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.leveldb_put(
db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.leveldb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
defer C.leveldb_free(unsafe.Pointer(value))
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
}
func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.leveldb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
return NewCSlice(unsafe.Pointer(value), int(vallen)), nil
}
func (db *DB) delete(wo *WriteOptions, key []byte) error {
var errStr *C.char
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
C.leveldb_delete(
db.db, wo.Opt, k, C.size_t(len(key)), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) Compact() error {
C.leveldb_compact_range(db.db, nil, 0, nil, 0)
return nil
}
func (db *DB) GetSlice(key []byte) (driver.ISlice, error) {
return db.getSlice(db.readOpts, key)
}
func init() {
driver.Register(Store{})
}

View File

@ -0,0 +1,21 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include <stdlib.h>
// #include "leveldb/c.h"
import "C"
type FilterPolicy struct {
Policy *C.leveldb_filterpolicy_t
}
func NewBloomFilter(bitsPerKey int) *FilterPolicy {
policy := C.leveldb_filterpolicy_create_bloom(C.int(bitsPerKey))
return &FilterPolicy{policy}
}
func (fp *FilterPolicy) Close() {
C.leveldb_filterpolicy_destroy(fp.Policy)
}

View File

@ -0,0 +1,70 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include <stdlib.h>
// #include "leveldb/c.h"
// #include "leveldb_ext.h"
import "C"
import (
"unsafe"
)
type Iterator struct {
it *C.leveldb_iterator_t
isValid C.uchar
}
func (it *Iterator) Key() []byte {
var klen C.size_t
kdata := C.leveldb_iter_key(it.it, &klen)
if kdata == nil {
return nil
}
return slice(unsafe.Pointer(kdata), int(C.int(klen)))
}
func (it *Iterator) Value() []byte {
var vlen C.size_t
vdata := C.leveldb_iter_value(it.it, &vlen)
if vdata == nil {
return nil
}
return slice(unsafe.Pointer(vdata), int(C.int(vlen)))
}
func (it *Iterator) Close() error {
if it.it != nil {
C.leveldb_iter_destroy(it.it)
it.it = nil
}
return nil
}
func (it *Iterator) Valid() bool {
return ucharToBool(it.isValid)
}
func (it *Iterator) Next() {
it.isValid = C.leveldb_iter_next_ext(it.it)
}
func (it *Iterator) Prev() {
it.isValid = C.leveldb_iter_prev_ext(it.it)
}
func (it *Iterator) First() {
it.isValid = C.leveldb_iter_seek_to_first_ext(it.it)
}
func (it *Iterator) Last() {
it.isValid = C.leveldb_iter_seek_to_last_ext(it.it)
}
func (it *Iterator) Seek(key []byte) {
it.isValid = C.leveldb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}

View File

@ -0,0 +1,95 @@
// +build leveldb
#include "leveldb_ext.h"
#include <stdlib.h>
//#include <string>
//#include "leveldb/db.h"
//using namespace leveldb;
extern "C" {
// static bool SaveError(char** errptr, const Status& s) {
// assert(errptr != NULL);
// if (s.ok()) {
// return false;
// } else if (*errptr == NULL) {
// *errptr = strdup(s.ToString().c_str());
// } else {
// free(*errptr);
// *errptr = strdup(s.ToString().c_str());
// }
// return true;
// }
// void* leveldb_get_ext(
// leveldb_t* db,
// const leveldb_readoptions_t* options,
// const char* key, size_t keylen,
// char** valptr,
// size_t* vallen,
// char** errptr) {
// std::string *tmp = new(std::string);
// //very tricky, maybe changed with c++ leveldb upgrade
// Status s = (*(DB**)db)->Get(*(ReadOptions*)options, Slice(key, keylen), tmp);
// if (s.ok()) {
// *valptr = (char*)tmp->data();
// *vallen = tmp->size();
// } else {
// delete(tmp);
// tmp = NULL;
// *valptr = NULL;
// *vallen = 0;
// if (!s.IsNotFound()) {
// SaveError(errptr, s);
// }
// }
// return tmp;
// }
// void leveldb_get_free_ext(void* context) {
// std::string* s = (std::string*)context;
// delete(s);
// }
unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t* iter) {
leveldb_iter_seek_to_first(iter);
return leveldb_iter_valid(iter);
}
unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t* iter) {
leveldb_iter_seek_to_last(iter);
return leveldb_iter_valid(iter);
}
unsigned char leveldb_iter_seek_ext(leveldb_iterator_t* iter, const char* k, size_t klen) {
leveldb_iter_seek(iter, k, klen);
return leveldb_iter_valid(iter);
}
unsigned char leveldb_iter_next_ext(leveldb_iterator_t* iter) {
leveldb_iter_next(iter);
return leveldb_iter_valid(iter);
}
unsigned char leveldb_iter_prev_ext(leveldb_iterator_t* iter) {
leveldb_iter_prev(iter);
return leveldb_iter_valid(iter);
}
extern void leveldb_writebatch_iterate_put(void*, const char* k, size_t klen, const char* v, size_t vlen);
extern void leveldb_writebatch_iterate_delete(void*, const char* k, size_t klen);
void leveldb_writebatch_iterate_ext(leveldb_writebatch_t* w, void *p) {
leveldb_writebatch_iterate(w, p,
leveldb_writebatch_iterate_put, leveldb_writebatch_iterate_delete);
}
}

View File

@ -0,0 +1,41 @@
// +build leveldb
#ifndef LEVELDB_EXT_H
#define LEVELDB_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "leveldb/c.h"
// /* Returns NULL if not found. Otherwise stores the value in **valptr.
// Stores the length of the value in *vallen.
// Returns a context must be later to free*/
// extern void* leveldb_get_ext(
// leveldb_t* db,
// const leveldb_readoptions_t* options,
// const char* key, size_t keylen,
// char** valptr,
// size_t* vallen,
// char** errptr);
// // Free context returns by leveldb_get_ext
// extern void leveldb_get_free_ext(void* context);
// Below iterator functions like leveldb iterator but returns valid status for iterator
extern unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t*);
extern unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t*);
extern unsigned char leveldb_iter_seek_ext(leveldb_iterator_t*, const char* k, size_t klen);
extern unsigned char leveldb_iter_next_ext(leveldb_iterator_t*);
extern unsigned char leveldb_iter_prev_ext(leveldb_iterator_t*);
extern void leveldb_writebatch_iterate_ext(leveldb_writebatch_t*, void* p);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,7 @@
Copyright (c) 2012 Jeffrey M Hodges
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,126 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include "leveldb/c.h"
import "C"
type CompressionOpt int
const (
NoCompression = CompressionOpt(0)
SnappyCompression = CompressionOpt(1)
)
type Options struct {
Opt *C.leveldb_options_t
}
type ReadOptions struct {
Opt *C.leveldb_readoptions_t
}
type WriteOptions struct {
Opt *C.leveldb_writeoptions_t
}
func NewOptions() *Options {
opt := C.leveldb_options_create()
return &Options{opt}
}
func NewReadOptions() *ReadOptions {
opt := C.leveldb_readoptions_create()
return &ReadOptions{opt}
}
func NewWriteOptions() *WriteOptions {
opt := C.leveldb_writeoptions_create()
return &WriteOptions{opt}
}
func (o *Options) Close() {
C.leveldb_options_destroy(o.Opt)
}
func (o *Options) SetComparator(cmp *C.leveldb_comparator_t) {
C.leveldb_options_set_comparator(o.Opt, cmp)
}
func (o *Options) SetErrorIfExists(error_if_exists bool) {
eie := boolToUchar(error_if_exists)
C.leveldb_options_set_error_if_exists(o.Opt, eie)
}
func (o *Options) SetCache(cache *Cache) {
C.leveldb_options_set_cache(o.Opt, cache.Cache)
}
func (o *Options) SetWriteBufferSize(s int) {
C.leveldb_options_set_write_buffer_size(o.Opt, C.size_t(s))
}
func (o *Options) SetParanoidChecks(pc bool) {
C.leveldb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
}
func (o *Options) SetMaxOpenFiles(n int) {
C.leveldb_options_set_max_open_files(o.Opt, C.int(n))
}
func (o *Options) SetMaxFileSize(n int) {
C.leveldb_options_set_max_file_size(o.Opt, C.size_t(n))
}
func (o *Options) SetBlockSize(s int) {
C.leveldb_options_set_block_size(o.Opt, C.size_t(s))
}
func (o *Options) SetBlockRestartInterval(n int) {
C.leveldb_options_set_block_restart_interval(o.Opt, C.int(n))
}
func (o *Options) SetCompression(t CompressionOpt) {
C.leveldb_options_set_compression(o.Opt, C.int(t))
}
func (o *Options) SetCreateIfMissing(b bool) {
C.leveldb_options_set_create_if_missing(o.Opt, boolToUchar(b))
}
func (o *Options) SetFilterPolicy(fp *FilterPolicy) {
var policy *C.leveldb_filterpolicy_t
if fp != nil {
policy = fp.Policy
}
C.leveldb_options_set_filter_policy(o.Opt, policy)
}
func (ro *ReadOptions) Close() {
C.leveldb_readoptions_destroy(ro.Opt)
}
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
C.leveldb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetFillCache(b bool) {
C.leveldb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetSnapshot(snap *Snapshot) {
var s *C.leveldb_snapshot_t
if snap != nil {
s = snap.snap
}
C.leveldb_readoptions_set_snapshot(ro.Opt, s)
}
func (wo *WriteOptions) Close() {
C.leveldb_writeoptions_destroy(wo.Opt)
}
func (wo *WriteOptions) SetSync(b bool) {
C.leveldb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
}

View File

@ -0,0 +1,40 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include "leveldb/c.h"
import "C"
import (
"reflect"
"unsafe"
)
type CSlice struct {
data unsafe.Pointer
size int
}
func NewCSlice(p unsafe.Pointer, n int) *CSlice {
return &CSlice{p, n}
}
func (s *CSlice) Data() []byte {
var value []byte
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
sH.Cap = int(s.size)
sH.Len = int(s.size)
sH.Data = uintptr(s.data)
return value
}
func (s *CSlice) Size() int {
return int(s.size)
}
func (s *CSlice) Free() {
C.leveldb_free(s.data)
}

View File

@ -0,0 +1,39 @@
// +build leveldb
package leveldb
// #cgo LDFLAGS: -lleveldb
// #include "leveldb/c.h"
import "C"
import (
"github.com/siddontang/ledisdb/store/driver"
)
type Snapshot struct {
db *DB
snap *C.leveldb_snapshot_t
readOpts *ReadOptions
iteratorOpts *ReadOptions
}
func (s *Snapshot) Get(key []byte) ([]byte, error) {
return s.db.get(s.readOpts, key)
}
func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) {
return s.db.getSlice(s.readOpts, key)
}
func (s *Snapshot) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.leveldb_create_iterator(s.db.db, s.db.iteratorOpts.Opt)
return it
}
func (s *Snapshot) Close() {
C.leveldb_release_snapshot(s.db.db, s.snap)
s.iteratorOpts.Close()
s.readOpts.Close()
}

View File

@ -0,0 +1,45 @@
// +build leveldb
package leveldb
// #include "leveldb/c.h"
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
func boolToUchar(b bool) C.uchar {
uc := C.uchar(0)
if b {
uc = C.uchar(1)
}
return uc
}
func ucharToBool(uc C.uchar) bool {
if uc == C.uchar(0) {
return false
}
return true
}
func saveError(errStr *C.char) error {
if errStr != nil {
gs := C.GoString(errStr)
C.leveldb_free(unsafe.Pointer(errStr))
return fmt.Errorf(gs)
}
return nil
}
func slice(p unsafe.Pointer, n int) []byte {
var b []byte
pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pbyte.Data = uintptr(p)
pbyte.Len = n
pbyte.Cap = n
return b
}

View File

@ -0,0 +1,83 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
// #include "rocksdb_ext.h"
import "C"
import (
"unsafe"
)
type WriteBatch struct {
db *DB
wbatch *C.rocksdb_writebatch_t
commitOk bool
}
func (w *WriteBatch) Close() {
if w.wbatch != nil {
C.rocksdb_writebatch_destroy(w.wbatch)
w.wbatch = nil
}
}
func (w *WriteBatch) Put(key, value []byte) {
w.commitOk = false
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
}
func (w *WriteBatch) Delete(key []byte) {
w.commitOk = false
C.rocksdb_writebatch_delete(w.wbatch,
(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}
func (w *WriteBatch) Commit() error {
return w.commit(w.db.writeOpts)
}
func (w *WriteBatch) SyncCommit() error {
return w.commit(w.db.syncOpts)
}
func (w *WriteBatch) Rollback() error {
if !w.commitOk {
C.rocksdb_writebatch_clear(w.wbatch)
}
return nil
}
func (w *WriteBatch) commit(wb *WriteOptions) error {
w.commitOk = true
var errStr *C.char
C.rocksdb_write_ext(w.db.db, wb.Opt, w.wbatch, &errStr)
if errStr != nil {
w.commitOk = false
return saveError(errStr)
}
return nil
}
func (w *WriteBatch) Data() []byte {
var vallen C.size_t
value := C.rocksdb_writebatch_data(w.wbatch, &vallen)
return slice(unsafe.Pointer(value), int(vallen))
}

View File

@ -0,0 +1,20 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdint.h>
// #include "rocksdb/c.h"
import "C"
type Cache struct {
Cache *C.rocksdb_cache_t
}
func NewLRUCache(capacity int) *Cache {
return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))}
}
func (c *Cache) Close() {
C.rocksdb_cache_destroy(c.Cache)
}

View File

@ -0,0 +1,3 @@
package rocksdb
const DBName = "rocksdb"

View File

@ -0,0 +1,342 @@
// +build rocksdb
// Package rocksdb is a wrapper for c++ rocksdb
package rocksdb
/*
#cgo LDFLAGS: -lrocksdb
#include <rocksdb/c.h>
#include <stdlib.h>
#include "rocksdb_ext.h"
*/
import "C"
import (
"os"
"runtime"
"unsafe"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
)
const defaultFilterBits int = 10
type Store struct {
}
func (s Store) String() string {
return DBName
}
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
db := new(DB)
db.path = path
db.cfg = &cfg.RocksDB
if err := db.open(); err != nil {
return nil, err
}
return db, nil
}
func (s Store) Repair(path string, cfg *config.Config) error {
db := new(DB)
db.path = path
db.cfg = &cfg.RocksDB
err := db.open()
defer db.Close()
//open ok, do not need repair
if err == nil {
return nil
}
var errStr *C.char
ldbname := C.CString(path)
defer C.free(unsafe.Pointer(ldbname))
C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
type DB struct {
path string
cfg *config.RocksDBConfig
db *C.rocksdb_t
env *Env
opts *Options
blockOpts *BlockBasedTableOptions
//for default read and write options
readOpts *ReadOptions
writeOpts *WriteOptions
iteratorOpts *ReadOptions
syncOpts *WriteOptions
cache *Cache
filter *FilterPolicy
}
func (db *DB) open() error {
db.initOptions(db.cfg)
var errStr *C.char
ldbname := C.CString(db.path)
defer C.free(unsafe.Pointer(ldbname))
db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
db.db = nil
return saveError(errStr)
}
return nil
}
func (db *DB) initOptions(cfg *config.RocksDBConfig) {
opts := NewOptions()
blockOpts := NewBlockBasedTableOptions()
opts.SetCreateIfMissing(true)
db.env = NewDefaultEnv()
db.env.SetBackgroundThreads(cfg.BackgroundThreads)
db.env.SetHighPriorityBackgroundThreads(cfg.HighPriorityBackgroundThreads)
opts.SetEnv(db.env)
db.cache = NewLRUCache(cfg.CacheSize)
blockOpts.SetCache(db.cache)
//we must use bloomfilter
db.filter = NewBloomFilter(defaultFilterBits)
blockOpts.SetFilterPolicy(db.filter)
blockOpts.SetBlockSize(cfg.BlockSize)
opts.SetBlockBasedTableFactory(blockOpts)
opts.SetCompression(CompressionOpt(cfg.Compression))
opts.SetWriteBufferSize(cfg.WriteBufferSize)
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions)
opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes)
opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger)
opts.SetLevel0SlowdownWritesTrigger(cfg.Level0SlowdownWritesTrigger)
opts.SetLevel0StopWritesTrigger(cfg.Level0StopWritesTrigger)
opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase)
opts.SetTargetFileSizeMultiplier(cfg.TargetFileSizeMultiplier)
opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase)
opts.SetMaxBytesForLevelMultiplier(cfg.MaxBytesForLevelMultiplier)
opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge)
opts.DisableAutoCompactions(cfg.DisableAutoCompactions)
opts.EnableStatistics(cfg.EnableStatistics)
opts.UseFsync(cfg.UseFsync)
opts.SetStatsDumpPeriodSec(cfg.StatsDumpPeriodSec)
opts.SetMaxManifestFileSize(cfg.MaxManifestFileSize)
db.opts = opts
db.blockOpts = blockOpts
db.readOpts = NewReadOptions()
db.writeOpts = NewWriteOptions()
db.writeOpts.DisableWAL(cfg.DisableWAL)
db.syncOpts = NewWriteOptions()
db.syncOpts.SetSync(true)
db.syncOpts.DisableWAL(cfg.DisableWAL)
db.iteratorOpts = NewReadOptions()
db.iteratorOpts.SetFillCache(false)
}
func (db *DB) Close() error {
if db.db != nil {
C.rocksdb_close(db.db)
db.db = nil
}
if db.filter != nil {
db.filter.Close()
}
if db.cache != nil {
db.cache.Close()
}
if db.env != nil {
db.env.Close()
}
//db.blockOpts.Close()
db.opts.Close()
db.readOpts.Close()
db.writeOpts.Close()
db.iteratorOpts.Close()
return nil
}
func (db *DB) Put(key, value []byte) error {
return db.put(db.writeOpts, key, value)
}
func (db *DB) Get(key []byte) ([]byte, error) {
return db.get(db.readOpts, key)
}
func (db *DB) Delete(key []byte) error {
return db.delete(db.writeOpts, key)
}
func (db *DB) SyncPut(key []byte, value []byte) error {
return db.put(db.syncOpts, key, value)
}
func (db *DB) SyncDelete(key []byte) error {
return db.delete(db.syncOpts, key)
}
func (db *DB) NewWriteBatch() driver.IWriteBatch {
wb := &WriteBatch{
db: db,
wbatch: C.rocksdb_writebatch_create(),
}
runtime.SetFinalizer(wb, func(w *WriteBatch) {
w.Close()
})
return wb
}
func (db *DB) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.rocksdb_create_iterator(db.db, db.iteratorOpts.Opt)
return it
}
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
snap := &Snapshot{
db: db,
snap: C.rocksdb_create_snapshot(db.db),
readOpts: NewReadOptions(),
iteratorOpts: NewReadOptions(),
}
snap.readOpts.SetSnapshot(snap)
snap.iteratorOpts.SetSnapshot(snap)
snap.iteratorOpts.SetFillCache(false)
return snap, nil
}
func (db *DB) put(wo *WriteOptions, key, value []byte) error {
var errStr *C.char
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.rocksdb_put(
db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.rocksdb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
defer C.free(unsafe.Pointer(value))
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
}
func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.rocksdb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
return NewCSlice(unsafe.Pointer(value), int(vallen)), nil
}
func (db *DB) delete(wo *WriteOptions, key []byte) error {
var errStr *C.char
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
C.rocksdb_delete(
db.db, wo.Opt, k, C.size_t(len(key)), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) Compact() error {
C.rocksdb_compact_range(db.db, nil, 0, nil, 0)
return nil
}
func (db *DB) GetSlice(key []byte) (driver.ISlice, error) {
return db.getSlice(db.readOpts, key)
}
func init() {
driver.Register(Store{})
}

View File

@ -0,0 +1,27 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
type Env struct {
Env *C.rocksdb_env_t
}
func NewDefaultEnv() *Env {
return &Env{C.rocksdb_create_default_env()}
}
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
C.rocksdb_env_set_high_priority_background_threads(env.Env, C.int(n))
}
func (env *Env) SetBackgroundThreads(n int) {
C.rocksdb_env_set_background_threads(env.Env, C.int(n))
}
func (env *Env) Close() {
C.rocksdb_env_destroy(env.Env)
}

View File

@ -0,0 +1,21 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
type FilterPolicy struct {
Policy *C.rocksdb_filterpolicy_t
}
func NewBloomFilter(bitsPerKey int) *FilterPolicy {
policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))
return &FilterPolicy{policy}
}
func (fp *FilterPolicy) Close() {
C.rocksdb_filterpolicy_destroy(fp.Policy)
}

View File

@ -0,0 +1,70 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
// #include "rocksdb_ext.h"
import "C"
import (
"unsafe"
)
type Iterator struct {
it *C.rocksdb_iterator_t
isValid C.uchar
}
func (it *Iterator) Key() []byte {
var klen C.size_t
kdata := C.rocksdb_iter_key(it.it, &klen)
if kdata == nil {
return nil
}
return slice(unsafe.Pointer(kdata), int(C.int(klen)))
}
func (it *Iterator) Value() []byte {
var vlen C.size_t
vdata := C.rocksdb_iter_value(it.it, &vlen)
if vdata == nil {
return nil
}
return slice(unsafe.Pointer(vdata), int(C.int(vlen)))
}
func (it *Iterator) Close() error {
if it.it != nil {
C.rocksdb_iter_destroy(it.it)
it.it = nil
}
return nil
}
func (it *Iterator) Valid() bool {
return ucharToBool(it.isValid)
}
func (it *Iterator) Next() {
it.isValid = C.rocksdb_iter_next_ext(it.it)
}
func (it *Iterator) Prev() {
it.isValid = C.rocksdb_iter_prev_ext(it.it)
}
func (it *Iterator) First() {
it.isValid = C.rocksdb_iter_seek_to_first_ext(it.it)
}
func (it *Iterator) Last() {
it.isValid = C.rocksdb_iter_seek_to_last_ext(it.it)
}
func (it *Iterator) Seek(key []byte) {
it.isValid = C.rocksdb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}

View File

@ -0,0 +1,229 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
type CompressionOpt int
const (
NoCompression = CompressionOpt(0)
SnappyCompression = CompressionOpt(1)
ZlibCompression = CompressionOpt(2)
Bz2Compression = CompressionOpt(3)
Lz4Compression = CompressionOpt(4)
Lz4hcCompression = CompressionOpt(5)
)
type Options struct {
Opt *C.rocksdb_options_t
}
type ReadOptions struct {
Opt *C.rocksdb_readoptions_t
}
type WriteOptions struct {
Opt *C.rocksdb_writeoptions_t
}
type BlockBasedTableOptions struct {
Opt *C.rocksdb_block_based_table_options_t
}
func NewOptions() *Options {
opt := C.rocksdb_options_create()
return &Options{opt}
}
func NewReadOptions() *ReadOptions {
opt := C.rocksdb_readoptions_create()
return &ReadOptions{opt}
}
func NewWriteOptions() *WriteOptions {
opt := C.rocksdb_writeoptions_create()
return &WriteOptions{opt}
}
func NewBlockBasedTableOptions() *BlockBasedTableOptions {
opt := C.rocksdb_block_based_options_create()
return &BlockBasedTableOptions{opt}
}
func (o *Options) Close() {
C.rocksdb_options_destroy(o.Opt)
}
func (o *Options) IncreaseParallelism(n int) {
C.rocksdb_options_increase_parallelism(o.Opt, C.int(n))
}
func (o *Options) OptimizeLevelStyleCompaction(n int) {
C.rocksdb_options_optimize_level_style_compaction(o.Opt, C.uint64_t(n))
}
func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) {
C.rocksdb_options_set_comparator(o.Opt, cmp)
}
func (o *Options) SetErrorIfExists(error_if_exists bool) {
eie := boolToUchar(error_if_exists)
C.rocksdb_options_set_error_if_exists(o.Opt, eie)
}
func (o *Options) SetEnv(env *Env) {
C.rocksdb_options_set_env(o.Opt, env.Env)
}
func (o *Options) SetWriteBufferSize(s int) {
C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s))
}
func (o *Options) SetParanoidChecks(pc bool) {
C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
}
func (o *Options) SetMaxOpenFiles(n int) {
C.rocksdb_options_set_max_open_files(o.Opt, C.int(n))
}
func (o *Options) SetCompression(t CompressionOpt) {
C.rocksdb_options_set_compression(o.Opt, C.int(t))
}
func (o *Options) SetCreateIfMissing(b bool) {
C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b))
}
func (o *Options) SetMaxWriteBufferNumber(n int) {
C.rocksdb_options_set_max_write_buffer_number(o.Opt, C.int(n))
}
func (o *Options) SetMaxBackgroundCompactions(n int) {
C.rocksdb_options_set_max_background_compactions(o.Opt, C.int(n))
}
func (o *Options) SetMaxBackgroundFlushes(n int) {
C.rocksdb_options_set_max_background_flushes(o.Opt, C.int(n))
}
func (o *Options) SetNumLevels(n int) {
C.rocksdb_options_set_num_levels(o.Opt, C.int(n))
}
func (o *Options) SetLevel0FileNumCompactionTrigger(n int) {
C.rocksdb_options_set_level0_file_num_compaction_trigger(o.Opt, C.int(n))
}
func (o *Options) SetLevel0SlowdownWritesTrigger(n int) {
C.rocksdb_options_set_level0_slowdown_writes_trigger(o.Opt, C.int(n))
}
func (o *Options) SetLevel0StopWritesTrigger(n int) {
C.rocksdb_options_set_level0_stop_writes_trigger(o.Opt, C.int(n))
}
func (o *Options) SetTargetFileSizeBase(n int) {
C.rocksdb_options_set_target_file_size_base(o.Opt, C.uint64_t(uint64(n)))
}
func (o *Options) SetTargetFileSizeMultiplier(n int) {
C.rocksdb_options_set_target_file_size_multiplier(o.Opt, C.int(n))
}
func (o *Options) SetMaxBytesForLevelBase(n int) {
C.rocksdb_options_set_max_bytes_for_level_base(o.Opt, C.uint64_t(uint64(n)))
}
func (o *Options) SetMaxBytesForLevelMultiplier(n int) {
C.rocksdb_options_set_max_bytes_for_level_multiplier(o.Opt, C.double(n))
}
func (o *Options) SetBlockBasedTableFactory(opt *BlockBasedTableOptions) {
C.rocksdb_options_set_block_based_table_factory(o.Opt, opt.Opt)
}
func (o *Options) SetMinWriteBufferNumberToMerge(n int) {
C.rocksdb_options_set_min_write_buffer_number_to_merge(o.Opt, C.int(n))
}
func (o *Options) DisableAutoCompactions(b bool) {
C.rocksdb_options_set_disable_auto_compactions(o.Opt, boolToInt(b))
}
func (o *Options) UseFsync(b bool) {
C.rocksdb_options_set_use_fsync(o.Opt, boolToInt(b))
}
func (o *Options) EnableStatistics(b bool) {
if b {
C.rocksdb_options_enable_statistics(o.Opt)
}
}
func (o *Options) SetStatsDumpPeriodSec(n int) {
C.rocksdb_options_set_stats_dump_period_sec(o.Opt, C.uint(n))
}
func (o *Options) SetMaxManifestFileSize(n int) {
C.rocksdb_options_set_max_manifest_file_size(o.Opt, C.size_t(n))
}
func (o *BlockBasedTableOptions) Close() {
C.rocksdb_block_based_options_destroy(o.Opt)
}
func (o *BlockBasedTableOptions) SetFilterPolicy(fp *FilterPolicy) {
var policy *C.rocksdb_filterpolicy_t
if fp != nil {
policy = fp.Policy
}
C.rocksdb_block_based_options_set_filter_policy(o.Opt, policy)
}
func (o *BlockBasedTableOptions) SetBlockSize(s int) {
C.rocksdb_block_based_options_set_block_size(o.Opt, C.size_t(s))
}
func (o *BlockBasedTableOptions) SetBlockRestartInterval(n int) {
C.rocksdb_block_based_options_set_block_restart_interval(o.Opt, C.int(n))
}
func (o *BlockBasedTableOptions) SetCache(cache *Cache) {
C.rocksdb_block_based_options_set_block_cache(o.Opt, cache.Cache)
}
func (ro *ReadOptions) Close() {
C.rocksdb_readoptions_destroy(ro.Opt)
}
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetFillCache(b bool) {
C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetSnapshot(snap *Snapshot) {
var s *C.rocksdb_snapshot_t
if snap != nil {
s = snap.snap
}
C.rocksdb_readoptions_set_snapshot(ro.Opt, s)
}
func (wo *WriteOptions) Close() {
C.rocksdb_writeoptions_destroy(wo.Opt)
}
func (wo *WriteOptions) SetSync(b bool) {
C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
}
func (wo *WriteOptions) DisableWAL(b bool) {
C.rocksdb_writeoptions_disable_WAL(wo.Opt, boolToInt(b))
}

View File

@ -0,0 +1,44 @@
// +build rocksdb
#include "rocksdb_ext.h"
#include <stdlib.h>
#include <string>
extern "C" {
unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_seek_to_first(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_seek_to_last(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t* iter, const char* k, size_t klen) {
rocksdb_iter_seek(iter, k, klen);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_next(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_prev(iter);
return rocksdb_iter_valid(iter);
}
void rocksdb_write_ext(rocksdb_t* db,
const rocksdb_writeoptions_t* options,
rocksdb_writebatch_t* batch, char** errptr) {
rocksdb_write(db, options, batch, errptr);
if(*errptr == NULL) {
rocksdb_writebatch_clear(batch);
}
}
}

View File

@ -0,0 +1,24 @@
// +build rocksdb
#ifndef ROCKSDB_EXT_H
#define ROCKSDB_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "rocksdb/c.h"
// Below iterator functions like rocksdb iterator but returns valid status for iterator
extern unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t*, const char* k, size_t klen);
extern unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t*);
extern void rocksdb_write_ext(rocksdb_t* db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, char** errptr);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,41 @@
//+build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <rocksdb/c.h>
// #include <stdlib.h>
import "C"
import (
"reflect"
"unsafe"
)
type CSlice struct {
data unsafe.Pointer
size int
}
func NewCSlice(p unsafe.Pointer, n int) *CSlice {
return &CSlice{p, n}
}
func (s *CSlice) Data() []byte {
var value []byte
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
sH.Cap = int(s.size)
sH.Len = int(s.size)
sH.Data = uintptr(s.data)
return value
}
func (s *CSlice) Size() int {
return int(s.size)
}
func (s *CSlice) Free() {
C.free(s.data)
}

View File

@ -0,0 +1,39 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
import (
"github.com/siddontang/ledisdb/store/driver"
)
type Snapshot struct {
db *DB
snap *C.rocksdb_snapshot_t
readOpts *ReadOptions
iteratorOpts *ReadOptions
}
func (s *Snapshot) Get(key []byte) ([]byte, error) {
return s.db.get(s.readOpts, key)
}
func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) {
return s.db.getSlice(s.readOpts, key)
}
func (s *Snapshot) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.rocksdb_create_iterator(s.db.db, s.db.iteratorOpts.Opt)
return it
}
func (s *Snapshot) Close() {
C.rocksdb_release_snapshot(s.db.db, s.snap)
s.iteratorOpts.Close()
s.readOpts.Close()
}

View File

@ -0,0 +1,54 @@
// +build rocksdb
package rocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
func boolToUchar(b bool) C.uchar {
uc := C.uchar(0)
if b {
uc = C.uchar(1)
}
return uc
}
func ucharToBool(uc C.uchar) bool {
if uc == C.uchar(0) {
return false
}
return true
}
func boolToInt(b bool) C.int {
uc := C.int(0)
if b {
uc = C.int(1)
}
return uc
}
func saveError(errStr *C.char) error {
if errStr != nil {
gs := C.GoString(errStr)
C.free(unsafe.Pointer(errStr))
return fmt.Errorf(gs)
}
return nil
}
func slice(p unsafe.Pointer, n int) []byte {
var b []byte
pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pbyte.Data = uintptr(p)
pbyte.Len = n
pbyte.Cap = n
return b
}

9
vendor/github.com/siddontang/ledisdb/store/slice.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package store
import (
"github.com/siddontang/ledisdb/store/driver"
)
type Slice interface {
driver.ISlice
}

48
vendor/github.com/siddontang/ledisdb/store/snapshot.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package store
import (
"github.com/siddontang/ledisdb/store/driver"
)
type Snapshot struct {
driver.ISnapshot
st *Stat
}
func (s *Snapshot) NewIterator() *Iterator {
it := new(Iterator)
it.it = s.ISnapshot.NewIterator()
it.st = s.st
s.st.IterNum.Add(1)
return it
}
func (s *Snapshot) Get(key []byte) ([]byte, error) {
v, err := s.ISnapshot.Get(key)
s.st.statGet(v, err)
return v, err
}
func (s *Snapshot) GetSlice(key []byte) (Slice, error) {
if d, ok := s.ISnapshot.(driver.ISliceGeter); ok {
v, err := d.GetSlice(key)
s.st.statGet(v, err)
return v, err
} else {
v, err := s.Get(key)
if err != nil {
return nil, err
} else if v == nil {
return nil, nil
} else {
return driver.GoSlice(v), nil
}
}
}
func (s *Snapshot) Close() {
s.st.SnapshotCloseNum.Add(1)
s.ISnapshot.Close()
}

37
vendor/github.com/siddontang/ledisdb/store/stat.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
package store
import (
"github.com/siddontang/go/sync2"
)
type Stat struct {
GetNum sync2.AtomicInt64
GetMissingNum sync2.AtomicInt64
GetTotalTime sync2.AtomicDuration
PutNum sync2.AtomicInt64
DeleteNum sync2.AtomicInt64
IterNum sync2.AtomicInt64
IterSeekNum sync2.AtomicInt64
IterCloseNum sync2.AtomicInt64
SnapshotNum sync2.AtomicInt64
SnapshotCloseNum sync2.AtomicInt64
BatchNum sync2.AtomicInt64
BatchCommitNum sync2.AtomicInt64
BatchCommitTotalTime sync2.AtomicDuration
TxNum sync2.AtomicInt64
TxCommitNum sync2.AtomicInt64
TxCloseNum sync2.AtomicInt64
CompactNum sync2.AtomicInt64
CompactTotalTime sync2.AtomicDuration
}
func (st *Stat) statGet(v interface{}, err error) {
st.GetNum.Add(1)
if v == nil && err == nil {
st.GetMissingNum.Add(1)
}
}
func (st *Stat) Reset() {
*st = Stat{}
}

62
vendor/github.com/siddontang/ledisdb/store/store.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
package store
import (
"fmt"
"os"
"path"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
_ "github.com/siddontang/ledisdb/store/goleveldb"
_ "github.com/siddontang/ledisdb/store/leveldb"
_ "github.com/siddontang/ledisdb/store/rocksdb"
)
func getStorePath(cfg *config.Config) string {
if len(cfg.DBPath) > 0 {
return cfg.DBPath
} else {
return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName))
}
}
func Open(cfg *config.Config) (*DB, error) {
s, err := driver.GetStore(cfg)
if err != nil {
return nil, err
}
path := getStorePath(cfg)
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
idb, err := s.Open(path, cfg)
if err != nil {
return nil, err
}
db := new(DB)
db.db = idb
db.name = s.String()
db.st = &Stat{}
db.cfg = cfg
return db, nil
}
func Repair(cfg *config.Config) error {
s, err := driver.GetStore(cfg)
if err != nil {
return err
}
path := getStorePath(cfg)
return s.Repair(path, cfg)
}
func init() {
}

View File

@ -0,0 +1,136 @@
package store
import (
"time"
"github.com/siddontang/ledisdb/store/driver"
"github.com/syndtr/goleveldb/leveldb"
)
type WriteBatch struct {
wb driver.IWriteBatch
st *Stat
putNum int64
deleteNum int64
db *DB
data *BatchData
}
func (wb *WriteBatch) Close() {
wb.wb.Close()
}
func (wb *WriteBatch) Put(key []byte, value []byte) {
wb.putNum++
wb.wb.Put(key, value)
}
func (wb *WriteBatch) Delete(key []byte) {
wb.deleteNum++
wb.wb.Delete(key)
}
func (wb *WriteBatch) Commit() error {
wb.st.BatchCommitNum.Add(1)
wb.st.PutNum.Add(wb.putNum)
wb.st.DeleteNum.Add(wb.deleteNum)
wb.putNum = 0
wb.deleteNum = 0
var err error
t := time.Now()
if wb.db == nil || !wb.db.needSyncCommit() {
err = wb.wb.Commit()
} else {
err = wb.wb.SyncCommit()
}
wb.st.BatchCommitTotalTime.Add(time.Now().Sub(t))
return err
}
func (wb *WriteBatch) Rollback() error {
wb.putNum = 0
wb.deleteNum = 0
return wb.wb.Rollback()
}
// the data will be undefined after commit or rollback
func (wb *WriteBatch) BatchData() *BatchData {
data := wb.wb.Data()
if wb.data == nil {
wb.data = new(BatchData)
}
wb.data.Load(data)
return wb.data
}
func (wb *WriteBatch) Data() []byte {
b := wb.BatchData()
return b.Data()
}
/*
see leveldb batch data format for more information
*/
type BatchData struct {
leveldb.Batch
}
func NewBatchData(data []byte) (*BatchData, error) {
b := new(BatchData)
if err := b.Load(data); err != nil {
return nil, err
}
return b, nil
}
func (d *BatchData) Data() []byte {
return d.Dump()
}
func (d *BatchData) Reset() {
d.Batch.Reset()
}
type BatchDataReplay interface {
Put(key, value []byte)
Delete(key []byte)
}
type BatchItem struct {
Key []byte
Value []byte
}
type batchItems []BatchItem
func (bs *batchItems) Put(key, value []byte) {
*bs = append(*bs, BatchItem{key, value})
}
func (bs *batchItems) Delete(key []byte) {
*bs = append(*bs, BatchItem{key, nil})
}
func (d *BatchData) Replay(r BatchDataReplay) error {
return d.Batch.Replay(r)
}
func (d *BatchData) Items() ([]BatchItem, error) {
is := make(batchItems, 0, d.Len())
if err := d.Replay(&is); err != nil {
return nil, err
}
return []BatchItem(is), nil
}