1
0
mirror of https://github.com/astaxie/beego.git synced 2025-07-03 02:20:19 +00:00

update mod

This commit is contained in:
astaxie
2018-11-09 12:37:28 +08:00
parent 9fdc1eaf3a
commit 5ea04bdfd3
548 changed files with 339257 additions and 46 deletions

View File

@ -0,0 +1,83 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
// #include "rocksdb_ext.h"
import "C"
import (
"unsafe"
)
type WriteBatch struct {
db *DB
wbatch *C.rocksdb_writebatch_t
commitOk bool
}
func (w *WriteBatch) Close() {
if w.wbatch != nil {
C.rocksdb_writebatch_destroy(w.wbatch)
w.wbatch = nil
}
}
func (w *WriteBatch) Put(key, value []byte) {
w.commitOk = false
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
}
func (w *WriteBatch) Delete(key []byte) {
w.commitOk = false
C.rocksdb_writebatch_delete(w.wbatch,
(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}
func (w *WriteBatch) Commit() error {
return w.commit(w.db.writeOpts)
}
func (w *WriteBatch) SyncCommit() error {
return w.commit(w.db.syncOpts)
}
func (w *WriteBatch) Rollback() error {
if !w.commitOk {
C.rocksdb_writebatch_clear(w.wbatch)
}
return nil
}
func (w *WriteBatch) commit(wb *WriteOptions) error {
w.commitOk = true
var errStr *C.char
C.rocksdb_write_ext(w.db.db, wb.Opt, w.wbatch, &errStr)
if errStr != nil {
w.commitOk = false
return saveError(errStr)
}
return nil
}
func (w *WriteBatch) Data() []byte {
var vallen C.size_t
value := C.rocksdb_writebatch_data(w.wbatch, &vallen)
return slice(unsafe.Pointer(value), int(vallen))
}

View File

@ -0,0 +1,20 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdint.h>
// #include "rocksdb/c.h"
import "C"
type Cache struct {
Cache *C.rocksdb_cache_t
}
func NewLRUCache(capacity int) *Cache {
return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))}
}
func (c *Cache) Close() {
C.rocksdb_cache_destroy(c.Cache)
}

View File

@ -0,0 +1,3 @@
package rocksdb
const DBName = "rocksdb"

View File

@ -0,0 +1,342 @@
// +build rocksdb
// Package rocksdb is a wrapper for c++ rocksdb
package rocksdb
/*
#cgo LDFLAGS: -lrocksdb
#include <rocksdb/c.h>
#include <stdlib.h>
#include "rocksdb_ext.h"
*/
import "C"
import (
"os"
"runtime"
"unsafe"
"github.com/siddontang/ledisdb/config"
"github.com/siddontang/ledisdb/store/driver"
)
const defaultFilterBits int = 10
type Store struct {
}
func (s Store) String() string {
return DBName
}
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
db := new(DB)
db.path = path
db.cfg = &cfg.RocksDB
if err := db.open(); err != nil {
return nil, err
}
return db, nil
}
func (s Store) Repair(path string, cfg *config.Config) error {
db := new(DB)
db.path = path
db.cfg = &cfg.RocksDB
err := db.open()
defer db.Close()
//open ok, do not need repair
if err == nil {
return nil
}
var errStr *C.char
ldbname := C.CString(path)
defer C.free(unsafe.Pointer(ldbname))
C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
type DB struct {
path string
cfg *config.RocksDBConfig
db *C.rocksdb_t
env *Env
opts *Options
blockOpts *BlockBasedTableOptions
//for default read and write options
readOpts *ReadOptions
writeOpts *WriteOptions
iteratorOpts *ReadOptions
syncOpts *WriteOptions
cache *Cache
filter *FilterPolicy
}
func (db *DB) open() error {
db.initOptions(db.cfg)
var errStr *C.char
ldbname := C.CString(db.path)
defer C.free(unsafe.Pointer(ldbname))
db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr)
if errStr != nil {
db.db = nil
return saveError(errStr)
}
return nil
}
func (db *DB) initOptions(cfg *config.RocksDBConfig) {
opts := NewOptions()
blockOpts := NewBlockBasedTableOptions()
opts.SetCreateIfMissing(true)
db.env = NewDefaultEnv()
db.env.SetBackgroundThreads(cfg.BackgroundThreads)
db.env.SetHighPriorityBackgroundThreads(cfg.HighPriorityBackgroundThreads)
opts.SetEnv(db.env)
db.cache = NewLRUCache(cfg.CacheSize)
blockOpts.SetCache(db.cache)
//we must use bloomfilter
db.filter = NewBloomFilter(defaultFilterBits)
blockOpts.SetFilterPolicy(db.filter)
blockOpts.SetBlockSize(cfg.BlockSize)
opts.SetBlockBasedTableFactory(blockOpts)
opts.SetCompression(CompressionOpt(cfg.Compression))
opts.SetWriteBufferSize(cfg.WriteBufferSize)
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions)
opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes)
opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger)
opts.SetLevel0SlowdownWritesTrigger(cfg.Level0SlowdownWritesTrigger)
opts.SetLevel0StopWritesTrigger(cfg.Level0StopWritesTrigger)
opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase)
opts.SetTargetFileSizeMultiplier(cfg.TargetFileSizeMultiplier)
opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase)
opts.SetMaxBytesForLevelMultiplier(cfg.MaxBytesForLevelMultiplier)
opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge)
opts.DisableAutoCompactions(cfg.DisableAutoCompactions)
opts.EnableStatistics(cfg.EnableStatistics)
opts.UseFsync(cfg.UseFsync)
opts.SetStatsDumpPeriodSec(cfg.StatsDumpPeriodSec)
opts.SetMaxManifestFileSize(cfg.MaxManifestFileSize)
db.opts = opts
db.blockOpts = blockOpts
db.readOpts = NewReadOptions()
db.writeOpts = NewWriteOptions()
db.writeOpts.DisableWAL(cfg.DisableWAL)
db.syncOpts = NewWriteOptions()
db.syncOpts.SetSync(true)
db.syncOpts.DisableWAL(cfg.DisableWAL)
db.iteratorOpts = NewReadOptions()
db.iteratorOpts.SetFillCache(false)
}
func (db *DB) Close() error {
if db.db != nil {
C.rocksdb_close(db.db)
db.db = nil
}
if db.filter != nil {
db.filter.Close()
}
if db.cache != nil {
db.cache.Close()
}
if db.env != nil {
db.env.Close()
}
//db.blockOpts.Close()
db.opts.Close()
db.readOpts.Close()
db.writeOpts.Close()
db.iteratorOpts.Close()
return nil
}
func (db *DB) Put(key, value []byte) error {
return db.put(db.writeOpts, key, value)
}
func (db *DB) Get(key []byte) ([]byte, error) {
return db.get(db.readOpts, key)
}
func (db *DB) Delete(key []byte) error {
return db.delete(db.writeOpts, key)
}
func (db *DB) SyncPut(key []byte, value []byte) error {
return db.put(db.syncOpts, key, value)
}
func (db *DB) SyncDelete(key []byte) error {
return db.delete(db.syncOpts, key)
}
func (db *DB) NewWriteBatch() driver.IWriteBatch {
wb := &WriteBatch{
db: db,
wbatch: C.rocksdb_writebatch_create(),
}
runtime.SetFinalizer(wb, func(w *WriteBatch) {
w.Close()
})
return wb
}
func (db *DB) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.rocksdb_create_iterator(db.db, db.iteratorOpts.Opt)
return it
}
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
snap := &Snapshot{
db: db,
snap: C.rocksdb_create_snapshot(db.db),
readOpts: NewReadOptions(),
iteratorOpts: NewReadOptions(),
}
snap.readOpts.SetSnapshot(snap)
snap.iteratorOpts.SetSnapshot(snap)
snap.iteratorOpts.SetFillCache(false)
return snap, nil
}
func (db *DB) put(wo *WriteOptions, key, value []byte) error {
var errStr *C.char
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.rocksdb_put(
db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.rocksdb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
defer C.free(unsafe.Pointer(value))
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
}
func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.rocksdb_get(
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, saveError(errStr)
}
if value == nil {
return nil, nil
}
return NewCSlice(unsafe.Pointer(value), int(vallen)), nil
}
func (db *DB) delete(wo *WriteOptions, key []byte) error {
var errStr *C.char
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
C.rocksdb_delete(
db.db, wo.Opt, k, C.size_t(len(key)), &errStr)
if errStr != nil {
return saveError(errStr)
}
return nil
}
func (db *DB) Compact() error {
C.rocksdb_compact_range(db.db, nil, 0, nil, 0)
return nil
}
func (db *DB) GetSlice(key []byte) (driver.ISlice, error) {
return db.getSlice(db.readOpts, key)
}
func init() {
driver.Register(Store{})
}

View File

@ -0,0 +1,27 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
type Env struct {
Env *C.rocksdb_env_t
}
func NewDefaultEnv() *Env {
return &Env{C.rocksdb_create_default_env()}
}
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
C.rocksdb_env_set_high_priority_background_threads(env.Env, C.int(n))
}
func (env *Env) SetBackgroundThreads(n int) {
C.rocksdb_env_set_background_threads(env.Env, C.int(n))
}
func (env *Env) Close() {
C.rocksdb_env_destroy(env.Env)
}

View File

@ -0,0 +1,21 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
type FilterPolicy struct {
Policy *C.rocksdb_filterpolicy_t
}
func NewBloomFilter(bitsPerKey int) *FilterPolicy {
policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))
return &FilterPolicy{policy}
}
func (fp *FilterPolicy) Close() {
C.rocksdb_filterpolicy_destroy(fp.Policy)
}

View File

@ -0,0 +1,70 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
// #include "rocksdb_ext.h"
import "C"
import (
"unsafe"
)
type Iterator struct {
it *C.rocksdb_iterator_t
isValid C.uchar
}
func (it *Iterator) Key() []byte {
var klen C.size_t
kdata := C.rocksdb_iter_key(it.it, &klen)
if kdata == nil {
return nil
}
return slice(unsafe.Pointer(kdata), int(C.int(klen)))
}
func (it *Iterator) Value() []byte {
var vlen C.size_t
vdata := C.rocksdb_iter_value(it.it, &vlen)
if vdata == nil {
return nil
}
return slice(unsafe.Pointer(vdata), int(C.int(vlen)))
}
func (it *Iterator) Close() error {
if it.it != nil {
C.rocksdb_iter_destroy(it.it)
it.it = nil
}
return nil
}
func (it *Iterator) Valid() bool {
return ucharToBool(it.isValid)
}
func (it *Iterator) Next() {
it.isValid = C.rocksdb_iter_next_ext(it.it)
}
func (it *Iterator) Prev() {
it.isValid = C.rocksdb_iter_prev_ext(it.it)
}
func (it *Iterator) First() {
it.isValid = C.rocksdb_iter_seek_to_first_ext(it.it)
}
func (it *Iterator) Last() {
it.isValid = C.rocksdb_iter_seek_to_last_ext(it.it)
}
func (it *Iterator) Seek(key []byte) {
it.isValid = C.rocksdb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
}

View File

@ -0,0 +1,229 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
type CompressionOpt int
const (
NoCompression = CompressionOpt(0)
SnappyCompression = CompressionOpt(1)
ZlibCompression = CompressionOpt(2)
Bz2Compression = CompressionOpt(3)
Lz4Compression = CompressionOpt(4)
Lz4hcCompression = CompressionOpt(5)
)
type Options struct {
Opt *C.rocksdb_options_t
}
type ReadOptions struct {
Opt *C.rocksdb_readoptions_t
}
type WriteOptions struct {
Opt *C.rocksdb_writeoptions_t
}
type BlockBasedTableOptions struct {
Opt *C.rocksdb_block_based_table_options_t
}
func NewOptions() *Options {
opt := C.rocksdb_options_create()
return &Options{opt}
}
func NewReadOptions() *ReadOptions {
opt := C.rocksdb_readoptions_create()
return &ReadOptions{opt}
}
func NewWriteOptions() *WriteOptions {
opt := C.rocksdb_writeoptions_create()
return &WriteOptions{opt}
}
func NewBlockBasedTableOptions() *BlockBasedTableOptions {
opt := C.rocksdb_block_based_options_create()
return &BlockBasedTableOptions{opt}
}
func (o *Options) Close() {
C.rocksdb_options_destroy(o.Opt)
}
func (o *Options) IncreaseParallelism(n int) {
C.rocksdb_options_increase_parallelism(o.Opt, C.int(n))
}
func (o *Options) OptimizeLevelStyleCompaction(n int) {
C.rocksdb_options_optimize_level_style_compaction(o.Opt, C.uint64_t(n))
}
func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) {
C.rocksdb_options_set_comparator(o.Opt, cmp)
}
func (o *Options) SetErrorIfExists(error_if_exists bool) {
eie := boolToUchar(error_if_exists)
C.rocksdb_options_set_error_if_exists(o.Opt, eie)
}
func (o *Options) SetEnv(env *Env) {
C.rocksdb_options_set_env(o.Opt, env.Env)
}
func (o *Options) SetWriteBufferSize(s int) {
C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s))
}
func (o *Options) SetParanoidChecks(pc bool) {
C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
}
func (o *Options) SetMaxOpenFiles(n int) {
C.rocksdb_options_set_max_open_files(o.Opt, C.int(n))
}
func (o *Options) SetCompression(t CompressionOpt) {
C.rocksdb_options_set_compression(o.Opt, C.int(t))
}
func (o *Options) SetCreateIfMissing(b bool) {
C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b))
}
func (o *Options) SetMaxWriteBufferNumber(n int) {
C.rocksdb_options_set_max_write_buffer_number(o.Opt, C.int(n))
}
func (o *Options) SetMaxBackgroundCompactions(n int) {
C.rocksdb_options_set_max_background_compactions(o.Opt, C.int(n))
}
func (o *Options) SetMaxBackgroundFlushes(n int) {
C.rocksdb_options_set_max_background_flushes(o.Opt, C.int(n))
}
func (o *Options) SetNumLevels(n int) {
C.rocksdb_options_set_num_levels(o.Opt, C.int(n))
}
func (o *Options) SetLevel0FileNumCompactionTrigger(n int) {
C.rocksdb_options_set_level0_file_num_compaction_trigger(o.Opt, C.int(n))
}
func (o *Options) SetLevel0SlowdownWritesTrigger(n int) {
C.rocksdb_options_set_level0_slowdown_writes_trigger(o.Opt, C.int(n))
}
func (o *Options) SetLevel0StopWritesTrigger(n int) {
C.rocksdb_options_set_level0_stop_writes_trigger(o.Opt, C.int(n))
}
func (o *Options) SetTargetFileSizeBase(n int) {
C.rocksdb_options_set_target_file_size_base(o.Opt, C.uint64_t(uint64(n)))
}
func (o *Options) SetTargetFileSizeMultiplier(n int) {
C.rocksdb_options_set_target_file_size_multiplier(o.Opt, C.int(n))
}
func (o *Options) SetMaxBytesForLevelBase(n int) {
C.rocksdb_options_set_max_bytes_for_level_base(o.Opt, C.uint64_t(uint64(n)))
}
func (o *Options) SetMaxBytesForLevelMultiplier(n int) {
C.rocksdb_options_set_max_bytes_for_level_multiplier(o.Opt, C.double(n))
}
func (o *Options) SetBlockBasedTableFactory(opt *BlockBasedTableOptions) {
C.rocksdb_options_set_block_based_table_factory(o.Opt, opt.Opt)
}
func (o *Options) SetMinWriteBufferNumberToMerge(n int) {
C.rocksdb_options_set_min_write_buffer_number_to_merge(o.Opt, C.int(n))
}
func (o *Options) DisableAutoCompactions(b bool) {
C.rocksdb_options_set_disable_auto_compactions(o.Opt, boolToInt(b))
}
func (o *Options) UseFsync(b bool) {
C.rocksdb_options_set_use_fsync(o.Opt, boolToInt(b))
}
func (o *Options) EnableStatistics(b bool) {
if b {
C.rocksdb_options_enable_statistics(o.Opt)
}
}
func (o *Options) SetStatsDumpPeriodSec(n int) {
C.rocksdb_options_set_stats_dump_period_sec(o.Opt, C.uint(n))
}
func (o *Options) SetMaxManifestFileSize(n int) {
C.rocksdb_options_set_max_manifest_file_size(o.Opt, C.size_t(n))
}
func (o *BlockBasedTableOptions) Close() {
C.rocksdb_block_based_options_destroy(o.Opt)
}
func (o *BlockBasedTableOptions) SetFilterPolicy(fp *FilterPolicy) {
var policy *C.rocksdb_filterpolicy_t
if fp != nil {
policy = fp.Policy
}
C.rocksdb_block_based_options_set_filter_policy(o.Opt, policy)
}
func (o *BlockBasedTableOptions) SetBlockSize(s int) {
C.rocksdb_block_based_options_set_block_size(o.Opt, C.size_t(s))
}
func (o *BlockBasedTableOptions) SetBlockRestartInterval(n int) {
C.rocksdb_block_based_options_set_block_restart_interval(o.Opt, C.int(n))
}
func (o *BlockBasedTableOptions) SetCache(cache *Cache) {
C.rocksdb_block_based_options_set_block_cache(o.Opt, cache.Cache)
}
func (ro *ReadOptions) Close() {
C.rocksdb_readoptions_destroy(ro.Opt)
}
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetFillCache(b bool) {
C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
}
func (ro *ReadOptions) SetSnapshot(snap *Snapshot) {
var s *C.rocksdb_snapshot_t
if snap != nil {
s = snap.snap
}
C.rocksdb_readoptions_set_snapshot(ro.Opt, s)
}
func (wo *WriteOptions) Close() {
C.rocksdb_writeoptions_destroy(wo.Opt)
}
func (wo *WriteOptions) SetSync(b bool) {
C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
}
func (wo *WriteOptions) DisableWAL(b bool) {
C.rocksdb_writeoptions_disable_WAL(wo.Opt, boolToInt(b))
}

View File

@ -0,0 +1,44 @@
// +build rocksdb
#include "rocksdb_ext.h"
#include <stdlib.h>
#include <string>
extern "C" {
unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_seek_to_first(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_seek_to_last(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t* iter, const char* k, size_t klen) {
rocksdb_iter_seek(iter, k, klen);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_next(iter);
return rocksdb_iter_valid(iter);
}
unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t* iter) {
rocksdb_iter_prev(iter);
return rocksdb_iter_valid(iter);
}
void rocksdb_write_ext(rocksdb_t* db,
const rocksdb_writeoptions_t* options,
rocksdb_writebatch_t* batch, char** errptr) {
rocksdb_write(db, options, batch, errptr);
if(*errptr == NULL) {
rocksdb_writebatch_clear(batch);
}
}
}

View File

@ -0,0 +1,24 @@
// +build rocksdb
#ifndef ROCKSDB_EXT_H
#define ROCKSDB_EXT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "rocksdb/c.h"
// Below iterator functions like rocksdb iterator but returns valid status for iterator
extern unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t*, const char* k, size_t klen);
extern unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t*);
extern unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t*);
extern void rocksdb_write_ext(rocksdb_t* db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, char** errptr);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,41 @@
//+build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include <rocksdb/c.h>
// #include <stdlib.h>
import "C"
import (
"reflect"
"unsafe"
)
type CSlice struct {
data unsafe.Pointer
size int
}
func NewCSlice(p unsafe.Pointer, n int) *CSlice {
return &CSlice{p, n}
}
func (s *CSlice) Data() []byte {
var value []byte
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
sH.Cap = int(s.size)
sH.Len = int(s.size)
sH.Data = uintptr(s.data)
return value
}
func (s *CSlice) Size() int {
return int(s.size)
}
func (s *CSlice) Free() {
C.free(s.data)
}

View File

@ -0,0 +1,39 @@
// +build rocksdb
package rocksdb
// #cgo LDFLAGS: -lrocksdb
// #include "rocksdb/c.h"
import "C"
import (
"github.com/siddontang/ledisdb/store/driver"
)
type Snapshot struct {
db *DB
snap *C.rocksdb_snapshot_t
readOpts *ReadOptions
iteratorOpts *ReadOptions
}
func (s *Snapshot) Get(key []byte) ([]byte, error) {
return s.db.get(s.readOpts, key)
}
func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) {
return s.db.getSlice(s.readOpts, key)
}
func (s *Snapshot) NewIterator() driver.IIterator {
it := new(Iterator)
it.it = C.rocksdb_create_iterator(s.db.db, s.db.iteratorOpts.Opt)
return it
}
func (s *Snapshot) Close() {
C.rocksdb_release_snapshot(s.db.db, s.snap)
s.iteratorOpts.Close()
s.readOpts.Close()
}

View File

@ -0,0 +1,54 @@
// +build rocksdb
package rocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
func boolToUchar(b bool) C.uchar {
uc := C.uchar(0)
if b {
uc = C.uchar(1)
}
return uc
}
func ucharToBool(uc C.uchar) bool {
if uc == C.uchar(0) {
return false
}
return true
}
func boolToInt(b bool) C.int {
uc := C.int(0)
if b {
uc = C.int(1)
}
return uc
}
func saveError(errStr *C.char) error {
if errStr != nil {
gs := C.GoString(errStr)
C.free(unsafe.Pointer(errStr))
return fmt.Errorf(gs)
}
return nil
}
func slice(p unsafe.Pointer, n int) []byte {
var b []byte
pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pbyte.Data = uintptr(p)
pbyte.Len = n
pbyte.Cap = n
return b
}