mirror of
https://github.com/astaxie/beego.git
synced 2025-07-01 11:40:18 +00:00
only add golang.org vendor
This commit is contained in:
20
vendor/github.com/siddontang/go/LICENSE
generated
vendored
20
vendor/github.com/siddontang/go/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 siddontang
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
27
vendor/github.com/siddontang/go/filelock/LICENSE
generated
vendored
27
vendor/github.com/siddontang/go/filelock/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
17
vendor/github.com/siddontang/go/filelock/file_lock_generic.go
generated
vendored
17
vendor/github.com/siddontang/go/filelock/file_lock_generic.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
// Copyright 2012 The LevelDB-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
|
||||
|
||||
package filelock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func Lock(name string) (io.Closer, error) {
|
||||
return nil, fmt.Errorf("leveldb/db: file locking is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
43
vendor/github.com/siddontang/go/filelock/file_lock_solaris.go
generated
vendored
43
vendor/github.com/siddontang/go/filelock/file_lock_solaris.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
// Copyright 2014 The LevelDB-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build solaris
|
||||
|
||||
package filelock
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// lockCloser hides all of an os.File's methods, except for Close.
|
||||
type lockCloser struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (l lockCloser) Close() error {
|
||||
return l.f.Close()
|
||||
}
|
||||
|
||||
func Lock(name string) (io.Closer, error) {
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spec := syscall.Flock_t{
|
||||
Type: syscall.F_WRLCK,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0, // 0 means to lock the entire file.
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lockCloser{f}, nil
|
||||
}
|
51
vendor/github.com/siddontang/go/filelock/file_lock_unix.go
generated
vendored
51
vendor/github.com/siddontang/go/filelock/file_lock_unix.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
// Copyright 2014 The LevelDB-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin dragonfly freebsd linux netbsd openbsd
|
||||
|
||||
package filelock
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// lockCloser hides all of an os.File's methods, except for Close.
|
||||
type lockCloser struct {
|
||||
f *os.File
|
||||
}
|
||||
|
||||
func (l lockCloser) Close() error {
|
||||
return l.f.Close()
|
||||
}
|
||||
|
||||
func Lock(name string) (io.Closer, error) {
|
||||
f, err := os.Create(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
Some people tell me FcntlFlock does not exist, so use flock here
|
||||
*/
|
||||
if err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// spec := syscall.Flock_t{
|
||||
// Type: syscall.F_WRLCK,
|
||||
// Whence: int16(os.SEEK_SET),
|
||||
// Start: 0,
|
||||
// Len: 0, // 0 means to lock the entire file.
|
||||
// Pid: int32(os.Getpid()),
|
||||
// }
|
||||
// if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &spec); err != nil {
|
||||
// f.Close()
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
return lockCloser{f}, nil
|
||||
}
|
36
vendor/github.com/siddontang/go/filelock/file_lock_windows.go
generated
vendored
36
vendor/github.com/siddontang/go/filelock/file_lock_windows.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
// Copyright 2013 The LevelDB-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package filelock
|
||||
|
||||
import (
|
||||
"io"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// lockCloser hides all of an syscall.Handle's methods, except for Close.
|
||||
type lockCloser struct {
|
||||
fd syscall.Handle
|
||||
}
|
||||
|
||||
func (l lockCloser) Close() error {
|
||||
return syscall.Close(l.fd)
|
||||
}
|
||||
|
||||
func Lock(name string) (io.Closer, error) {
|
||||
p, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fd, err := syscall.CreateFile(p,
|
||||
syscall.GENERIC_READ|syscall.GENERIC_WRITE,
|
||||
0, nil, syscall.CREATE_ALWAYS,
|
||||
syscall.FILE_ATTRIBUTE_NORMAL,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lockCloser{fd: fd}, nil
|
||||
}
|
27
vendor/github.com/siddontang/go/hack/hack.go
generated
vendored
27
vendor/github.com/siddontang/go/hack/hack.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package hack
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// no copy to change slice to string
|
||||
// use your own risk
|
||||
func String(b []byte) (s string) {
|
||||
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
pstring.Data = pbytes.Data
|
||||
pstring.Len = pbytes.Len
|
||||
return
|
||||
}
|
||||
|
||||
// no copy to change string to slice
|
||||
// use your own risk
|
||||
func Slice(s string) (b []byte) {
|
||||
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
pbytes.Data = pstring.Data
|
||||
pbytes.Len = pstring.Len
|
||||
pbytes.Cap = pstring.Len
|
||||
return
|
||||
}
|
39
vendor/github.com/siddontang/go/ioutil2/ioutil.go
generated
vendored
39
vendor/github.com/siddontang/go/ioutil2/ioutil.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// Copyright 2012, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ioutil2
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Write file to temp and atomically move when everything else succeeds.
|
||||
func WriteFileAtomic(filename string, data []byte, perm os.FileMode) error {
|
||||
dir, name := path.Dir(filename), path.Base(filename)
|
||||
f, err := ioutil.TempFile(dir, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := f.Write(data)
|
||||
f.Close()
|
||||
if err == nil && n < len(data) {
|
||||
err = io.ErrShortWrite
|
||||
} else {
|
||||
err = os.Chmod(f.Name(), perm)
|
||||
}
|
||||
if err != nil {
|
||||
os.Remove(f.Name())
|
||||
return err
|
||||
}
|
||||
return os.Rename(f.Name(), filename)
|
||||
}
|
||||
|
||||
// Check file exists or not
|
||||
func FileExists(name string) bool {
|
||||
_, err := os.Stat(name)
|
||||
return !os.IsNotExist(err)
|
||||
}
|
69
vendor/github.com/siddontang/go/ioutil2/sectionwriter.go
generated
vendored
69
vendor/github.com/siddontang/go/ioutil2/sectionwriter.go
generated
vendored
@ -1,69 +0,0 @@
|
||||
package ioutil2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var ErrExceedLimit = errors.New("write exceed limit")
|
||||
|
||||
func NewSectionWriter(w io.WriterAt, off int64, n int64) *SectionWriter {
|
||||
return &SectionWriter{w, off, off, off + n}
|
||||
}
|
||||
|
||||
type SectionWriter struct {
|
||||
w io.WriterAt
|
||||
base int64
|
||||
off int64
|
||||
limit int64
|
||||
}
|
||||
|
||||
func (s *SectionWriter) Write(p []byte) (n int, err error) {
|
||||
if s.off >= s.limit {
|
||||
return 0, ErrExceedLimit
|
||||
}
|
||||
|
||||
if max := s.limit - s.off; int64(len(p)) > max {
|
||||
return 0, ErrExceedLimit
|
||||
}
|
||||
|
||||
n, err = s.w.WriteAt(p, s.off)
|
||||
s.off += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
var errWhence = errors.New("Seek: invalid whence")
|
||||
var errOffset = errors.New("Seek: invalid offset")
|
||||
|
||||
func (s *SectionWriter) Seek(offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
default:
|
||||
return 0, errWhence
|
||||
case 0:
|
||||
offset += s.base
|
||||
case 1:
|
||||
offset += s.off
|
||||
case 2:
|
||||
offset += s.limit
|
||||
}
|
||||
if offset < s.base {
|
||||
return 0, errOffset
|
||||
}
|
||||
s.off = offset
|
||||
return offset - s.base, nil
|
||||
}
|
||||
|
||||
func (s *SectionWriter) WriteAt(p []byte, off int64) (n int, err error) {
|
||||
if off < 0 || off >= s.limit-s.base {
|
||||
return 0, errOffset
|
||||
}
|
||||
off += s.base
|
||||
if max := s.limit - off; int64(len(p)) > max {
|
||||
return 0, ErrExceedLimit
|
||||
}
|
||||
|
||||
return s.w.WriteAt(p, off)
|
||||
}
|
||||
|
||||
// Size returns the size of the section in bytes.
|
||||
func (s *SectionWriter) Size() int64 { return s.limit - s.base }
|
21
vendor/github.com/siddontang/go/log/doc.go
generated
vendored
21
vendor/github.com/siddontang/go/log/doc.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
// log package supplies more advanced features than go orign log package.
|
||||
//
|
||||
// It supports log different level: trace, debug, info, warn, error, fatal.
|
||||
//
|
||||
// It also supports different log handlers which you can log to stdout, file, socket, etc...
|
||||
//
|
||||
// Use
|
||||
//
|
||||
// import "github.com/siddontang/go/log"
|
||||
//
|
||||
// //log with different level
|
||||
// log.Info("hello world")
|
||||
// log.Error("hello world")
|
||||
//
|
||||
// //create a logger with specified handler
|
||||
// h := NewStreamHandler(os.Stdout)
|
||||
// l := log.NewDefault(h)
|
||||
// l.Info("hello world")
|
||||
// l.Infof("%s %d", "hello", 123)
|
||||
//
|
||||
package log
|
200
vendor/github.com/siddontang/go/log/filehandler.go
generated
vendored
200
vendor/github.com/siddontang/go/log/filehandler.go
generated
vendored
@ -1,200 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
//FileHandler writes log to a file.
|
||||
type FileHandler struct {
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func NewFileHandler(fileName string, flag int) (*FileHandler, error) {
|
||||
dir := path.Dir(fileName)
|
||||
os.Mkdir(dir, 0777)
|
||||
|
||||
f, err := os.OpenFile(fileName, flag, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := new(FileHandler)
|
||||
|
||||
h.fd = f
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *FileHandler) Write(b []byte) (n int, err error) {
|
||||
return h.fd.Write(b)
|
||||
}
|
||||
|
||||
func (h *FileHandler) Close() error {
|
||||
return h.fd.Close()
|
||||
}
|
||||
|
||||
//RotatingFileHandler writes log a file, if file size exceeds maxBytes,
|
||||
//it will backup current file and open a new one.
|
||||
//
|
||||
//max backup file number is set by backupCount, it will delete oldest if backups too many.
|
||||
type RotatingFileHandler struct {
|
||||
fd *os.File
|
||||
|
||||
fileName string
|
||||
maxBytes int
|
||||
backupCount int
|
||||
}
|
||||
|
||||
func NewRotatingFileHandler(fileName string, maxBytes int, backupCount int) (*RotatingFileHandler, error) {
|
||||
dir := path.Dir(fileName)
|
||||
os.Mkdir(dir, 0777)
|
||||
|
||||
h := new(RotatingFileHandler)
|
||||
|
||||
if maxBytes <= 0 {
|
||||
return nil, fmt.Errorf("invalid max bytes")
|
||||
}
|
||||
|
||||
h.fileName = fileName
|
||||
h.maxBytes = maxBytes
|
||||
h.backupCount = backupCount
|
||||
|
||||
var err error
|
||||
h.fd, err = os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *RotatingFileHandler) Write(p []byte) (n int, err error) {
|
||||
h.doRollover()
|
||||
return h.fd.Write(p)
|
||||
}
|
||||
|
||||
func (h *RotatingFileHandler) Close() error {
|
||||
if h.fd != nil {
|
||||
return h.fd.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *RotatingFileHandler) doRollover() {
|
||||
f, err := h.fd.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if h.maxBytes <= 0 {
|
||||
return
|
||||
} else if f.Size() < int64(h.maxBytes) {
|
||||
return
|
||||
}
|
||||
|
||||
if h.backupCount > 0 {
|
||||
h.fd.Close()
|
||||
|
||||
for i := h.backupCount - 1; i > 0; i-- {
|
||||
sfn := fmt.Sprintf("%s.%d", h.fileName, i)
|
||||
dfn := fmt.Sprintf("%s.%d", h.fileName, i+1)
|
||||
|
||||
os.Rename(sfn, dfn)
|
||||
}
|
||||
|
||||
dfn := fmt.Sprintf("%s.1", h.fileName)
|
||||
os.Rename(h.fileName, dfn)
|
||||
|
||||
h.fd, _ = os.OpenFile(h.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
}
|
||||
}
|
||||
|
||||
//TimeRotatingFileHandler writes log to a file,
|
||||
//it will backup current and open a new one, with a period time you sepecified.
|
||||
//
|
||||
//refer: http://docs.python.org/2/library/logging.handlers.html.
|
||||
//same like python TimedRotatingFileHandler.
|
||||
type TimeRotatingFileHandler struct {
|
||||
fd *os.File
|
||||
|
||||
baseName string
|
||||
interval int64
|
||||
suffix string
|
||||
rolloverAt int64
|
||||
}
|
||||
|
||||
const (
|
||||
WhenSecond = iota
|
||||
WhenMinute
|
||||
WhenHour
|
||||
WhenDay
|
||||
)
|
||||
|
||||
func NewTimeRotatingFileHandler(baseName string, when int8, interval int) (*TimeRotatingFileHandler, error) {
|
||||
dir := path.Dir(baseName)
|
||||
os.Mkdir(dir, 0777)
|
||||
|
||||
h := new(TimeRotatingFileHandler)
|
||||
|
||||
h.baseName = baseName
|
||||
|
||||
switch when {
|
||||
case WhenSecond:
|
||||
h.interval = 1
|
||||
h.suffix = "2006-01-02_15-04-05"
|
||||
case WhenMinute:
|
||||
h.interval = 60
|
||||
h.suffix = "2006-01-02_15-04"
|
||||
case WhenHour:
|
||||
h.interval = 3600
|
||||
h.suffix = "2006-01-02_15"
|
||||
case WhenDay:
|
||||
h.interval = 3600 * 24
|
||||
h.suffix = "2006-01-02"
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid when_rotate: %d", when)
|
||||
}
|
||||
|
||||
h.interval = h.interval * int64(interval)
|
||||
|
||||
var err error
|
||||
h.fd, err = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fInfo, _ := h.fd.Stat()
|
||||
h.rolloverAt = fInfo.ModTime().Unix() + h.interval
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *TimeRotatingFileHandler) doRollover() {
|
||||
//refer http://hg.python.org/cpython/file/2.7/Lib/logging/handlers.py
|
||||
now := time.Now()
|
||||
|
||||
if h.rolloverAt <= now.Unix() {
|
||||
fName := h.baseName + now.Format(h.suffix)
|
||||
h.fd.Close()
|
||||
e := os.Rename(h.baseName, fName)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
h.fd, _ = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
|
||||
h.rolloverAt = time.Now().Unix() + h.interval
|
||||
}
|
||||
}
|
||||
|
||||
func (h *TimeRotatingFileHandler) Write(b []byte) (n int, err error) {
|
||||
h.doRollover()
|
||||
return h.fd.Write(b)
|
||||
}
|
||||
|
||||
func (h *TimeRotatingFileHandler) Close() error {
|
||||
return h.fd.Close()
|
||||
}
|
48
vendor/github.com/siddontang/go/log/handler.go
generated
vendored
48
vendor/github.com/siddontang/go/log/handler.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
//Handler writes logs to somewhere
|
||||
type Handler interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
//StreamHandler writes logs to a specified io Writer, maybe stdout, stderr, etc...
|
||||
type StreamHandler struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func NewStreamHandler(w io.Writer) (*StreamHandler, error) {
|
||||
h := new(StreamHandler)
|
||||
|
||||
h.w = w
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (h *StreamHandler) Write(b []byte) (n int, err error) {
|
||||
return h.w.Write(b)
|
||||
}
|
||||
|
||||
func (h *StreamHandler) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//NullHandler does nothing, it discards anything.
|
||||
type NullHandler struct {
|
||||
}
|
||||
|
||||
func NewNullHandler() (*NullHandler, error) {
|
||||
return new(NullHandler), nil
|
||||
}
|
||||
|
||||
func (h *NullHandler) Write(b []byte) (n int, err error) {
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (h *NullHandler) Close() {
|
||||
|
||||
}
|
366
vendor/github.com/siddontang/go/log/log.go
generated
vendored
366
vendor/github.com/siddontang/go/log/log.go
generated
vendored
@ -1,366 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
//log level, from low to high, more high means more serious
|
||||
const (
|
||||
LevelTrace = iota
|
||||
LevelDebug
|
||||
LevelInfo
|
||||
LevelWarn
|
||||
LevelError
|
||||
LevelFatal
|
||||
)
|
||||
|
||||
const (
|
||||
Ltime = 1 << iota //time format "2006/01/02 15:04:05"
|
||||
Lfile //file.go:123
|
||||
Llevel //[Trace|Debug|Info...]
|
||||
)
|
||||
|
||||
var LevelName [6]string = [6]string{"Trace", "Debug", "Info", "Warn", "Error", "Fatal"}
|
||||
|
||||
const TimeFormat = "2006/01/02 15:04:05"
|
||||
|
||||
const maxBufPoolSize = 16
|
||||
|
||||
type atomicInt32 int32
|
||||
|
||||
func (i *atomicInt32) Set(n int) {
|
||||
atomic.StoreInt32((*int32)(i), int32(n))
|
||||
}
|
||||
|
||||
func (i *atomicInt32) Get() int {
|
||||
return int(atomic.LoadInt32((*int32)(i)))
|
||||
}
|
||||
|
||||
type Logger struct {
|
||||
level atomicInt32
|
||||
flag int
|
||||
|
||||
hMutex sync.Mutex
|
||||
handler Handler
|
||||
|
||||
quit chan struct{}
|
||||
msg chan []byte
|
||||
|
||||
bufMutex sync.Mutex
|
||||
bufs [][]byte
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
closed atomicInt32
|
||||
}
|
||||
|
||||
//new a logger with specified handler and flag
|
||||
func New(handler Handler, flag int) *Logger {
|
||||
var l = new(Logger)
|
||||
|
||||
l.level.Set(LevelInfo)
|
||||
l.handler = handler
|
||||
|
||||
l.flag = flag
|
||||
|
||||
l.quit = make(chan struct{})
|
||||
l.closed.Set(0)
|
||||
|
||||
l.msg = make(chan []byte, 1024)
|
||||
|
||||
l.bufs = make([][]byte, 0, 16)
|
||||
|
||||
l.wg.Add(1)
|
||||
go l.run()
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
//new a default logger with specified handler and flag: Ltime|Lfile|Llevel
|
||||
func NewDefault(handler Handler) *Logger {
|
||||
return New(handler, Ltime|Lfile|Llevel)
|
||||
}
|
||||
|
||||
func newStdHandler() *StreamHandler {
|
||||
h, _ := NewStreamHandler(os.Stdout)
|
||||
return h
|
||||
}
|
||||
|
||||
var std = NewDefault(newStdHandler())
|
||||
|
||||
func (l *Logger) run() {
|
||||
defer l.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case msg := <-l.msg:
|
||||
l.hMutex.Lock()
|
||||
l.handler.Write(msg)
|
||||
l.hMutex.Unlock()
|
||||
l.putBuf(msg)
|
||||
case <-l.quit:
|
||||
//we must log all msg
|
||||
if len(l.msg) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) popBuf() []byte {
|
||||
l.bufMutex.Lock()
|
||||
var buf []byte
|
||||
if len(l.bufs) == 0 {
|
||||
buf = make([]byte, 0, 1024)
|
||||
} else {
|
||||
buf = l.bufs[len(l.bufs)-1]
|
||||
l.bufs = l.bufs[0 : len(l.bufs)-1]
|
||||
}
|
||||
l.bufMutex.Unlock()
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (l *Logger) putBuf(buf []byte) {
|
||||
l.bufMutex.Lock()
|
||||
if len(l.bufs) < maxBufPoolSize {
|
||||
buf = buf[0:0]
|
||||
l.bufs = append(l.bufs, buf)
|
||||
}
|
||||
l.bufMutex.Unlock()
|
||||
}
|
||||
|
||||
func (l *Logger) Close() {
|
||||
if l.closed.Get() == 1 {
|
||||
return
|
||||
}
|
||||
l.closed.Set(1)
|
||||
|
||||
close(l.quit)
|
||||
|
||||
l.wg.Wait()
|
||||
|
||||
l.quit = nil
|
||||
|
||||
l.handler.Close()
|
||||
}
|
||||
|
||||
//set log level, any log level less than it will not log
|
||||
func (l *Logger) SetLevel(level int) {
|
||||
l.level.Set(level)
|
||||
}
|
||||
|
||||
// name can be in ["trace", "debug", "info", "warn", "error", "fatal"]
|
||||
func (l *Logger) SetLevelByName(name string) {
|
||||
name = strings.ToLower(name)
|
||||
switch name {
|
||||
case "trace":
|
||||
l.SetLevel(LevelTrace)
|
||||
case "debug":
|
||||
l.SetLevel(LevelDebug)
|
||||
case "info":
|
||||
l.SetLevel(LevelInfo)
|
||||
case "warn":
|
||||
l.SetLevel(LevelWarn)
|
||||
case "error":
|
||||
l.SetLevel(LevelError)
|
||||
case "fatal":
|
||||
l.SetLevel(LevelFatal)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) SetHandler(h Handler) {
|
||||
if l.closed.Get() == 1 {
|
||||
return
|
||||
}
|
||||
|
||||
l.hMutex.Lock()
|
||||
if l.handler != nil {
|
||||
l.handler.Close()
|
||||
}
|
||||
l.handler = h
|
||||
l.hMutex.Unlock()
|
||||
}
|
||||
|
||||
func (l *Logger) Output(callDepth int, level int, s string) {
|
||||
if l.closed.Get() == 1 {
|
||||
// closed
|
||||
return
|
||||
}
|
||||
|
||||
if l.level.Get() > level {
|
||||
// higher level can be logged
|
||||
return
|
||||
}
|
||||
|
||||
buf := l.popBuf()
|
||||
|
||||
if l.flag&Ltime > 0 {
|
||||
now := time.Now().Format(TimeFormat)
|
||||
buf = append(buf, '[')
|
||||
buf = append(buf, now...)
|
||||
buf = append(buf, "] "...)
|
||||
}
|
||||
|
||||
if l.flag&Lfile > 0 {
|
||||
_, file, line, ok := runtime.Caller(callDepth)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 0
|
||||
} else {
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
file = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf = append(buf, file...)
|
||||
buf = append(buf, ':')
|
||||
|
||||
buf = strconv.AppendInt(buf, int64(line), 10)
|
||||
buf = append(buf, ' ')
|
||||
}
|
||||
|
||||
if l.flag&Llevel > 0 {
|
||||
buf = append(buf, '[')
|
||||
buf = append(buf, LevelName[level]...)
|
||||
buf = append(buf, "] "...)
|
||||
}
|
||||
|
||||
buf = append(buf, s...)
|
||||
|
||||
if s[len(s)-1] != '\n' {
|
||||
buf = append(buf, '\n')
|
||||
}
|
||||
|
||||
l.msg <- buf
|
||||
}
|
||||
|
||||
//log with Trace level
|
||||
func (l *Logger) Trace(v ...interface{}) {
|
||||
l.Output(2, LevelTrace, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
//log with Debug level
|
||||
func (l *Logger) Debug(v ...interface{}) {
|
||||
l.Output(2, LevelDebug, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
//log with info level
|
||||
func (l *Logger) Info(v ...interface{}) {
|
||||
l.Output(2, LevelInfo, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
//log with warn level
|
||||
func (l *Logger) Warn(v ...interface{}) {
|
||||
l.Output(2, LevelWarn, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
//log with error level
|
||||
func (l *Logger) Error(v ...interface{}) {
|
||||
l.Output(2, LevelError, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
//log with fatal level
|
||||
func (l *Logger) Fatal(v ...interface{}) {
|
||||
l.Output(2, LevelFatal, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
//log with Trace level
|
||||
func (l *Logger) Tracef(format string, v ...interface{}) {
|
||||
l.Output(2, LevelTrace, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
//log with Debug level
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
l.Output(2, LevelDebug, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
//log with info level
|
||||
func (l *Logger) Infof(format string, v ...interface{}) {
|
||||
l.Output(2, LevelInfo, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
//log with warn level
|
||||
func (l *Logger) Warnf(format string, v ...interface{}) {
|
||||
l.Output(2, LevelWarn, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
//log with error level
|
||||
func (l *Logger) Errorf(format string, v ...interface{}) {
|
||||
l.Output(2, LevelError, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
//log with fatal level
|
||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
||||
l.Output(2, LevelFatal, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func SetLevel(level int) {
|
||||
std.SetLevel(level)
|
||||
}
|
||||
|
||||
// name can be in ["trace", "debug", "info", "warn", "error", "fatal"]
|
||||
func SetLevelByName(name string) {
|
||||
std.SetLevelByName(name)
|
||||
}
|
||||
|
||||
func SetHandler(h Handler) {
|
||||
std.SetHandler(h)
|
||||
}
|
||||
|
||||
func Trace(v ...interface{}) {
|
||||
std.Output(2, LevelTrace, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func Debug(v ...interface{}) {
|
||||
std.Output(2, LevelDebug, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func Info(v ...interface{}) {
|
||||
std.Output(2, LevelInfo, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func Warn(v ...interface{}) {
|
||||
std.Output(2, LevelWarn, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func Error(v ...interface{}) {
|
||||
std.Output(2, LevelError, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func Fatal(v ...interface{}) {
|
||||
std.Output(2, LevelFatal, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
func Tracef(format string, v ...interface{}) {
|
||||
std.Output(2, LevelTrace, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
std.Output(2, LevelDebug, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func Infof(format string, v ...interface{}) {
|
||||
std.Output(2, LevelInfo, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
std.Output(2, LevelWarn, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func Errorf(format string, v ...interface{}) {
|
||||
std.Output(2, LevelError, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
std.Output(2, LevelFatal, fmt.Sprintf(format, v...))
|
||||
}
|
65
vendor/github.com/siddontang/go/log/sockethandler.go
generated
vendored
65
vendor/github.com/siddontang/go/log/sockethandler.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
//SocketHandler writes log to a connectionl.
|
||||
//Network protocol is simple: log length + log | log length + log. log length is uint32, bigendian.
|
||||
//you must implement your own log server, maybe you can use logd instead simply.
|
||||
type SocketHandler struct {
|
||||
c net.Conn
|
||||
protocol string
|
||||
addr string
|
||||
}
|
||||
|
||||
func NewSocketHandler(protocol string, addr string) (*SocketHandler, error) {
|
||||
s := new(SocketHandler)
|
||||
|
||||
s.protocol = protocol
|
||||
s.addr = addr
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (h *SocketHandler) Write(p []byte) (n int, err error) {
|
||||
if err = h.connect(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf := make([]byte, len(p)+4)
|
||||
|
||||
binary.BigEndian.PutUint32(buf, uint32(len(p)))
|
||||
|
||||
copy(buf[4:], p)
|
||||
|
||||
n, err = h.c.Write(buf)
|
||||
if err != nil {
|
||||
h.c.Close()
|
||||
h.c = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *SocketHandler) Close() error {
|
||||
if h.c != nil {
|
||||
h.c.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *SocketHandler) connect() error {
|
||||
if h.c != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
h.c, err = net.DialTimeout(h.protocol, h.addr, 20*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
67
vendor/github.com/siddontang/go/num/bytes.go
generated
vendored
67
vendor/github.com/siddontang/go/num/bytes.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
package num
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
//all are bigendian format
|
||||
|
||||
func BytesToUint16(b []byte) uint16 {
|
||||
return binary.BigEndian.Uint16(b)
|
||||
}
|
||||
|
||||
func Uint16ToBytes(u uint16) []byte {
|
||||
buf := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(buf, u)
|
||||
return buf
|
||||
}
|
||||
|
||||
func BytesToUint32(b []byte) uint32 {
|
||||
return binary.BigEndian.Uint32(b)
|
||||
}
|
||||
|
||||
func Uint32ToBytes(u uint32) []byte {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, u)
|
||||
return buf
|
||||
}
|
||||
|
||||
func BytesToUint64(b []byte) uint64 {
|
||||
return binary.BigEndian.Uint64(b)
|
||||
}
|
||||
|
||||
func Uint64ToBytes(u uint64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, u)
|
||||
return buf
|
||||
}
|
||||
|
||||
func BytesToInt16(b []byte) int16 {
|
||||
return int16(binary.BigEndian.Uint16(b))
|
||||
}
|
||||
|
||||
func Int16ToBytes(u int16) []byte {
|
||||
buf := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(buf, uint16(u))
|
||||
return buf
|
||||
}
|
||||
|
||||
func BytesToInt32(b []byte) int32 {
|
||||
return int32(binary.BigEndian.Uint32(b))
|
||||
}
|
||||
|
||||
func Int32ToBytes(u int32) []byte {
|
||||
buf := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buf, uint32(u))
|
||||
return buf
|
||||
}
|
||||
|
||||
func BytesToInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
|
||||
func Int64ToBytes(u int64) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, uint64(u))
|
||||
return buf
|
||||
}
|
161
vendor/github.com/siddontang/go/num/cmp.go
generated
vendored
161
vendor/github.com/siddontang/go/num/cmp.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
package num
|
||||
|
||||
func MinUint(a uint, b uint) uint {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxUint(a uint, b uint) uint {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinInt(a int, b int) int {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxInt(a int, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinUint8(a uint8, b uint8) uint8 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxUint8(a uint8, b uint8) uint8 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinInt8(a int8, b int8) int8 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxInt8(a int8, b int8) int8 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinUint16(a uint16, b uint16) uint16 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxUint16(a uint16, b uint16) uint16 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinInt16(a int16, b int16) int16 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxInt16(a int16, b int16) int16 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinUint32(a uint32, b uint32) uint32 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxUint32(a uint32, b uint32) uint32 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinInt32(a int32, b int32) int32 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxInt32(a int32, b int32) int32 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinUint64(a uint64, b uint64) uint64 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxUint64(a uint64, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func MinInt64(a int64, b int64) int64 {
|
||||
if a > b {
|
||||
return b
|
||||
} else {
|
||||
return a
|
||||
}
|
||||
}
|
||||
|
||||
func MaxInt64(a int64, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
157
vendor/github.com/siddontang/go/num/str.go
generated
vendored
157
vendor/github.com/siddontang/go/num/str.go
generated
vendored
@ -1,157 +0,0 @@
|
||||
package num
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func ParseUint(s string) (uint, error) {
|
||||
if v, err := strconv.ParseUint(s, 10, 0); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return uint(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUint8(s string) (uint8, error) {
|
||||
if v, err := strconv.ParseUint(s, 10, 8); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return uint8(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUint16(s string) (uint16, error) {
|
||||
if v, err := strconv.ParseUint(s, 10, 16); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return uint16(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUint32(s string) (uint32, error) {
|
||||
if v, err := strconv.ParseUint(s, 10, 32); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return uint32(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUint64(s string) (uint64, error) {
|
||||
return strconv.ParseUint(s, 10, 64)
|
||||
}
|
||||
|
||||
func ParseInt(s string) (int, error) {
|
||||
if v, err := strconv.ParseInt(s, 10, 0); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return int(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseInt8(s string) (int8, error) {
|
||||
if v, err := strconv.ParseInt(s, 10, 8); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return int8(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseInt16(s string) (int16, error) {
|
||||
if v, err := strconv.ParseInt(s, 10, 16); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return int16(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseInt32(s string) (int32, error) {
|
||||
if v, err := strconv.ParseInt(s, 10, 32); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
return int32(v), nil
|
||||
}
|
||||
}
|
||||
|
||||
func ParseInt64(s string) (int64, error) {
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
func FormatInt(v int) string {
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt8(v int8) string {
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt16(v int16) string {
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt32(v int32) string {
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt64(v int64) string {
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint(v uint) string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint8(v uint8) string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint16(v uint16) string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint32(v uint32) string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint64(v uint64) string {
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatIntToSlice(v int) []byte {
|
||||
return strconv.AppendInt(nil, int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt8ToSlice(v int8) []byte {
|
||||
return strconv.AppendInt(nil, int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt16ToSlice(v int16) []byte {
|
||||
return strconv.AppendInt(nil, int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt32ToSlice(v int32) []byte {
|
||||
return strconv.AppendInt(nil, int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatInt64ToSlice(v int64) []byte {
|
||||
return strconv.AppendInt(nil, int64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUintToSlice(v uint) []byte {
|
||||
return strconv.AppendUint(nil, uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint8ToSlice(v uint8) []byte {
|
||||
return strconv.AppendUint(nil, uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint16ToSlice(v uint16) []byte {
|
||||
return strconv.AppendUint(nil, uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint32ToSlice(v uint32) []byte {
|
||||
return strconv.AppendUint(nil, uint64(v), 10)
|
||||
}
|
||||
|
||||
func FormatUint64ToSlice(v uint64) []byte {
|
||||
return strconv.AppendUint(nil, uint64(v), 10)
|
||||
}
|
27
vendor/github.com/siddontang/go/snappy/LICENSE
generated
vendored
27
vendor/github.com/siddontang/go/snappy/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
124
vendor/github.com/siddontang/go/snappy/decode.go
generated
vendored
124
vendor/github.com/siddontang/go/snappy/decode.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
var ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n == 0 {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
if uint64(int(v)) != v {
|
||||
return 0, 0, errors.New("snappy: decoded block is too large")
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) < dLen {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
|
||||
var d, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s += 1
|
||||
case x == 60:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-2]) | uint(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
|
||||
}
|
||||
length = int(x + 1)
|
||||
if length <= 0 {
|
||||
return nil, errors.New("snappy: unsupported literal length")
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(src[s-2]) | int(src[s-1])<<8
|
||||
|
||||
case tagCopy4:
|
||||
return nil, errors.New("snappy: unsupported COPY_4 tag")
|
||||
}
|
||||
|
||||
end := d + length
|
||||
if offset > d || end > len(dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
for ; d < end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
174
vendor/github.com/siddontang/go/snappy/encode.go
generated
vendored
174
vendor/github.com/siddontang/go/snappy/encode.go
generated
vendored
@ -1,174 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// We limit how far copy back-references can go, the same as the C++ code.
|
||||
const maxOffset = 1 << 15
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
case n < 1<<16:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
case n < 1<<24:
|
||||
dst[0] = 62<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
i = 4
|
||||
case int64(n) < 1<<32:
|
||||
dst[0] = 63<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[4] = uint8(n >> 24)
|
||||
i = 5
|
||||
default:
|
||||
panic("snappy: source buffer is too long")
|
||||
}
|
||||
if copy(dst[i:], lit) != len(lit) {
|
||||
panic("snappy: destination buffer is too short")
|
||||
}
|
||||
return i + len(lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
for length > 0 {
|
||||
x := length - 4
|
||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
i += 2
|
||||
break
|
||||
}
|
||||
|
||||
x = length
|
||||
if x > 1<<6 {
|
||||
x = 1 << 6
|
||||
}
|
||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= x
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) ([]byte, error) {
|
||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
// Return early if src is short.
|
||||
if len(src) <= 4 {
|
||||
if len(src) != 0 {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
const maxTableSize = 1 << 14
|
||||
shift, tableSize := uint(32-8), 1<<8
|
||||
for tableSize < maxTableSize && tableSize < len(src) {
|
||||
shift--
|
||||
tableSize *= 2
|
||||
}
|
||||
var table [maxTableSize]int
|
||||
|
||||
// Iterate over the source bytes.
|
||||
var (
|
||||
s int // The iterator position.
|
||||
t int // The last position with the same hash as s.
|
||||
lit int // The start position of any pending literal bytes.
|
||||
)
|
||||
for s+3 < len(src) {
|
||||
// Update the hash table.
|
||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
||||
p := &table[(h*0x1e35a7bd)>>shift]
|
||||
// We need to to store values in [-1, inf) in table. To save
|
||||
// some initialization time, (re)use the table's zero value
|
||||
// and shift the values against this zero: add 1 on writes,
|
||||
// subtract 1 on reads.
|
||||
t, *p = *p-1, s+1
|
||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
||||
s++
|
||||
continue
|
||||
}
|
||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
||||
if lit != s {
|
||||
d += emitLiteral(dst[d:], src[lit:s])
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
s0 := s
|
||||
s, t = s+4, t+4
|
||||
for s < len(src) && src[s] == src[t] {
|
||||
s++
|
||||
t++
|
||||
}
|
||||
// Emit the copied bytes.
|
||||
d += emitCopy(dst[d:], s-t, s-s0)
|
||||
lit = s
|
||||
}
|
||||
|
||||
// Emit any final pending literal bytes and return.
|
||||
if lit != len(src) {
|
||||
d += emitLiteral(dst[d:], src[lit:])
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
return 32 + srcLen + srcLen/6
|
||||
}
|
38
vendor/github.com/siddontang/go/snappy/snappy.go
generated
vendored
38
vendor/github.com/siddontang/go/snappy/snappy.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at http://code.google.com/p/snappy/
|
||||
package snappy
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer supported.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
146
vendor/github.com/siddontang/go/sync2/atomic.go
generated
vendored
146
vendor/github.com/siddontang/go/sync2/atomic.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
// Copyright 2013, Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AtomicInt32 int32
|
||||
|
||||
func (i *AtomicInt32) Add(n int32) int32 {
|
||||
return atomic.AddInt32((*int32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) Set(n int32) {
|
||||
atomic.StoreInt32((*int32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) Get() int32 {
|
||||
return atomic.LoadInt32((*int32)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicUint32 uint32
|
||||
|
||||
func (i *AtomicUint32) Add(n uint32) uint32 {
|
||||
return atomic.AddUint32((*uint32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) Set(n uint32) {
|
||||
atomic.StoreUint32((*uint32)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) Get() uint32 {
|
||||
return atomic.LoadUint32((*uint32)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
|
||||
return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicInt64 int64
|
||||
|
||||
func (i *AtomicInt64) Add(n int64) int64 {
|
||||
return atomic.AddInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) Set(n int64) {
|
||||
atomic.StoreInt64((*int64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) Get() int64 {
|
||||
return atomic.LoadInt64((*int64)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicUint64 uint64
|
||||
|
||||
func (i *AtomicUint64) Add(n uint64) uint64 {
|
||||
return atomic.AddUint64((*uint64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint64) Set(n uint64) {
|
||||
atomic.StoreUint64((*uint64)(i), n)
|
||||
}
|
||||
|
||||
func (i *AtomicUint64) Get() uint64 {
|
||||
return atomic.LoadUint64((*uint64)(i))
|
||||
}
|
||||
|
||||
func (i *AtomicUint64) CompareAndSwap(oldval, newval uint64) (swapped bool) {
|
||||
return atomic.CompareAndSwapUint64((*uint64)(i), oldval, newval)
|
||||
}
|
||||
|
||||
type AtomicDuration int64
|
||||
|
||||
func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
|
||||
return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) Set(duration time.Duration) {
|
||||
atomic.StoreInt64((*int64)(d), int64(duration))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) Get() time.Duration {
|
||||
return time.Duration(atomic.LoadInt64((*int64)(d)))
|
||||
}
|
||||
|
||||
func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
|
||||
return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
|
||||
}
|
||||
|
||||
// AtomicString gives you atomic-style APIs for string, but
|
||||
// it's only a convenience wrapper that uses a mutex. So, it's
|
||||
// not as efficient as the rest of the atomic types.
|
||||
type AtomicString struct {
|
||||
mu sync.Mutex
|
||||
str string
|
||||
}
|
||||
|
||||
func (s *AtomicString) Set(str string) {
|
||||
s.mu.Lock()
|
||||
s.str = str
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *AtomicString) Get() string {
|
||||
s.mu.Lock()
|
||||
str := s.str
|
||||
s.mu.Unlock()
|
||||
return str
|
||||
}
|
||||
|
||||
func (s *AtomicString) CompareAndSwap(oldval, newval string) (swapped bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.str == oldval {
|
||||
s.str = newval
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type AtomicBool int32
|
||||
|
||||
func (b *AtomicBool) Set(v bool) {
|
||||
if v {
|
||||
atomic.StoreInt32((*int32)(b), 1)
|
||||
} else {
|
||||
atomic.StoreInt32((*int32)(b), 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *AtomicBool) Get() bool {
|
||||
return atomic.LoadInt32((*int32)(b)) == 1
|
||||
}
|
65
vendor/github.com/siddontang/go/sync2/semaphore.go
generated
vendored
65
vendor/github.com/siddontang/go/sync2/semaphore.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
package sync2
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewSemaphore(initialCount int) *Semaphore {
|
||||
res := &Semaphore{
|
||||
counter: int64(initialCount),
|
||||
}
|
||||
res.cond.L = &res.lock
|
||||
return res
|
||||
}
|
||||
|
||||
type Semaphore struct {
|
||||
lock sync.Mutex
|
||||
cond sync.Cond
|
||||
counter int64
|
||||
}
|
||||
|
||||
func (s *Semaphore) Release() {
|
||||
s.lock.Lock()
|
||||
s.counter += 1
|
||||
if s.counter >= 0 {
|
||||
s.cond.Signal()
|
||||
}
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Semaphore) Acquire() {
|
||||
s.lock.Lock()
|
||||
for s.counter < 1 {
|
||||
s.cond.Wait()
|
||||
}
|
||||
s.counter -= 1
|
||||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
func (s *Semaphore) AcquireTimeout(timeout time.Duration) bool {
|
||||
done := make(chan bool, 1)
|
||||
// Gate used to communicate between the threads and decide what the result
|
||||
// is. If the main thread decides, we have timed out, otherwise we succeed.
|
||||
decided := new(int32)
|
||||
go func() {
|
||||
s.Acquire()
|
||||
if atomic.SwapInt32(decided, 1) == 0 {
|
||||
done <- true
|
||||
} else {
|
||||
// If we already decided the result, and this thread did not win
|
||||
s.Release()
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
case <-time.NewTimer(timeout).C:
|
||||
if atomic.SwapInt32(decided, 1) == 1 {
|
||||
// The other thread already decided the result
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
21
vendor/github.com/siddontang/ledisdb/LICENSE
generated
vendored
21
vendor/github.com/siddontang/ledisdb/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 siddontang
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
170
vendor/github.com/siddontang/ledisdb/config/config-docker.toml
generated
vendored
170
vendor/github.com/siddontang/ledisdb/config/config-docker.toml
generated
vendored
@ -1,170 +0,0 @@
|
||||
# LedisDB configuration
|
||||
|
||||
# Server listen address
|
||||
addr = "0.0.0.0:6380"
|
||||
|
||||
# Unix socket permissions, 755 by default.
|
||||
# Ignored for tcp socket.
|
||||
addr_unixsocketperm = "0770"
|
||||
|
||||
# Server http listen address, set empty to disable
|
||||
http_addr = "0.0.0.0:11181"
|
||||
|
||||
# Data store path, all ledisdb's data will be saved here
|
||||
data_dir = "/datastore"
|
||||
|
||||
# Set the number of databases. You can use `select dbindex` to choose a db.
|
||||
# dbindex must be in [0, databases - 1].
|
||||
# Default databases is 16, maximum is 10240 now.
|
||||
databases = 16
|
||||
|
||||
# Log server command, set empty to disable
|
||||
access_log = ""
|
||||
|
||||
# Set slaveof to enable replication from master, empty, no replication
|
||||
# Any write operations except flushall and replication will be disabled in slave mode.
|
||||
slaveof = ""
|
||||
|
||||
# Readonly mode, slave server is always readonly even readonly = false
|
||||
# for readonly mode, only replication and flushall can write
|
||||
readonly = false
|
||||
|
||||
# Choose which backend storage to use, now support:
|
||||
#
|
||||
# leveldb
|
||||
# rocksdb
|
||||
# goleveldb
|
||||
# memory
|
||||
#
|
||||
db_name = "leveldb"
|
||||
|
||||
# If not set, use data_dir/"db_name"_data
|
||||
db_path = ""
|
||||
|
||||
# Sync commit to disk if possible
|
||||
# 0: no sync
|
||||
# 1: sync every second
|
||||
# 2: sync every commit
|
||||
db_sync_commit = 0
|
||||
|
||||
# enable replication or not
|
||||
use_replication = false
|
||||
|
||||
# set connection buffer, you can increase them appropriately
|
||||
# more size, more memory used
|
||||
conn_read_buffer_size = 10240
|
||||
conn_write_buffer_size = 10240
|
||||
|
||||
# if connection receives no data after n seconds, it may be dead, close
|
||||
# 0 to disable and not check
|
||||
conn_keepalive_interval = 0
|
||||
|
||||
# checking TTL (time to live) data every n seconds
|
||||
# if you set big, the expired data may not be deleted immediately
|
||||
ttl_check_interval = 1
|
||||
|
||||
[leveldb]
|
||||
# for leveldb and goleveldb
|
||||
compression = false
|
||||
block_size = 32768
|
||||
write_buffer_size = 67108864
|
||||
cache_size = 524288000
|
||||
max_open_files = 1024
|
||||
max_file_size = 33554432
|
||||
|
||||
[rocksdb]
|
||||
# rocksdb has many many configurations,
|
||||
# we only list little now, but may add more later.
|
||||
# good luck!
|
||||
|
||||
# 0:no, 1:snappy, 2:zlib, 3:bz2, 4:lz4, 5:lz4hc
|
||||
compression = 0
|
||||
block_size = 65536
|
||||
write_buffer_size = 134217728
|
||||
cache_size = 1073741824
|
||||
max_open_files = 1024
|
||||
max_write_buffer_num = 6
|
||||
min_write_buffer_number_to_merge = 2
|
||||
num_levels = 7
|
||||
level0_file_num_compaction_trigger = 8
|
||||
level0_slowdown_writes_trigger = 16
|
||||
level0_stop_writes_trigger = 64
|
||||
target_file_size_base = 67108864
|
||||
target_file_size_multiplier = 1
|
||||
max_bytes_for_level_base = 536870912
|
||||
max_bytes_for_level_multiplier = 8
|
||||
disable_auto_compactions = false
|
||||
disable_data_sync = false
|
||||
use_fsync = false
|
||||
background_theads = 16
|
||||
high_priority_background_threads = 1
|
||||
max_background_compactions = 15
|
||||
max_background_flushes = 1
|
||||
allow_os_buffer = true
|
||||
enable_statistics = false
|
||||
stats_dump_period_sec = 3600
|
||||
# dangerous to set true, write may got lost after a crash
|
||||
# you can set true if replication opened, we may recover from replication log,
|
||||
# but it is still not a easy work.
|
||||
disable_wal = false
|
||||
max_manifest_file_size = 20971520
|
||||
|
||||
[lmdb]
|
||||
map_size = 524288000
|
||||
nosync = true
|
||||
|
||||
[replication]
|
||||
# Path to store replication information(write ahead log, commit log, etc.)
|
||||
# if not set, use data_dir/rpl
|
||||
path = ""
|
||||
|
||||
# If sync is true, the new log must be sent to some slaves, and then commit.
|
||||
# It will reduce performance but have better high availability.
|
||||
sync = false
|
||||
|
||||
# If sync is true, wait at last wait_sync_time milliseconds for slave syncing this log
|
||||
wait_sync_time = 500
|
||||
|
||||
# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok.
|
||||
# n is slave number
|
||||
# If 0, wait (n + 1) / 2 acks.
|
||||
wait_max_slave_acks = 2
|
||||
|
||||
# store name: file, goleveldb
|
||||
# change in runtime is very dangerous
|
||||
store_name = "file"
|
||||
|
||||
# Expire write ahead logs after the given days
|
||||
expired_log_days = 7
|
||||
|
||||
# for file store, if 0, use default 256MB, max is 1G
|
||||
max_log_file_size = 0
|
||||
|
||||
# for file store, if 0, use default 50
|
||||
max_log_file_num = 0
|
||||
|
||||
# for file store, use mmap for file read and write
|
||||
use_mmap = true
|
||||
|
||||
# Sync log to disk if possible
|
||||
# 0: no sync
|
||||
# 1: sync every second
|
||||
# 2: sync every commit
|
||||
sync_log = 0
|
||||
|
||||
# Compress the log or not
|
||||
compression = false
|
||||
|
||||
[snapshot]
|
||||
# Path to store snapshot dump file
|
||||
# if not set, use data_dir/snapshot
|
||||
# snapshot file name format is dmp-2006-01-02T15:04:05.999999999
|
||||
path = ""
|
||||
|
||||
# Reserve newest max_num snapshot dump files
|
||||
max_num = 1
|
||||
|
||||
[tls]
|
||||
enabled = false
|
||||
certificate = "test.crt"
|
||||
key = "test.key"
|
316
vendor/github.com/siddontang/ledisdb/config/config.go
generated
vendored
316
vendor/github.com/siddontang/ledisdb/config/config.go
generated
vendored
@ -1,316 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/pelletier/go-toml"
|
||||
"github.com/siddontang/go/ioutil2"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoConfigFile = errors.New("Running without a config file")
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAddr string = "127.0.0.1:6380"
|
||||
|
||||
DefaultDBName string = "goleveldb"
|
||||
|
||||
DefaultDataDir string = "./var"
|
||||
|
||||
KB int = 1024
|
||||
MB int = KB * 1024
|
||||
GB int = MB * 1024
|
||||
)
|
||||
|
||||
type LevelDBConfig struct {
|
||||
Compression bool `toml:"compression"`
|
||||
BlockSize int `toml:"block_size"`
|
||||
WriteBufferSize int `toml:"write_buffer_size"`
|
||||
CacheSize int `toml:"cache_size"`
|
||||
MaxOpenFiles int `toml:"max_open_files"`
|
||||
MaxFileSize int `toml:"max_file_size"`
|
||||
}
|
||||
|
||||
type RocksDBConfig struct {
|
||||
Compression int `toml:"compression"`
|
||||
BlockSize int `toml:"block_size"`
|
||||
WriteBufferSize int `toml:"write_buffer_size"`
|
||||
CacheSize int `toml:"cache_size"`
|
||||
MaxOpenFiles int `toml:"max_open_files"`
|
||||
MaxWriteBufferNum int `toml:"max_write_buffer_num"`
|
||||
MinWriteBufferNumberToMerge int `toml:"min_write_buffer_number_to_merge"`
|
||||
NumLevels int `toml:"num_levels"`
|
||||
Level0FileNumCompactionTrigger int `toml:"level0_file_num_compaction_trigger"`
|
||||
Level0SlowdownWritesTrigger int `toml:"level0_slowdown_writes_trigger"`
|
||||
Level0StopWritesTrigger int `toml:"level0_stop_writes_trigger"`
|
||||
TargetFileSizeBase int `toml:"target_file_size_base"`
|
||||
TargetFileSizeMultiplier int `toml:"target_file_size_multiplier"`
|
||||
MaxBytesForLevelBase int `toml:"max_bytes_for_level_base"`
|
||||
MaxBytesForLevelMultiplier int `toml:"max_bytes_for_level_multiplier"`
|
||||
DisableAutoCompactions bool `toml:"disable_auto_compactions"`
|
||||
UseFsync bool `toml:"use_fsync"`
|
||||
MaxBackgroundCompactions int `toml:"max_background_compactions"`
|
||||
MaxBackgroundFlushes int `toml:"max_background_flushes"`
|
||||
EnableStatistics bool `toml:"enable_statistics"`
|
||||
StatsDumpPeriodSec int `toml:"stats_dump_period_sec"`
|
||||
BackgroundThreads int `toml:"background_theads"`
|
||||
HighPriorityBackgroundThreads int `toml:"high_priority_background_threads"`
|
||||
DisableWAL bool `toml:"disable_wal"`
|
||||
MaxManifestFileSize int `toml:"max_manifest_file_size"`
|
||||
}
|
||||
|
||||
type LMDBConfig struct {
|
||||
MapSize int `toml:"map_size"`
|
||||
NoSync bool `toml:"nosync"`
|
||||
}
|
||||
|
||||
type ReplicationConfig struct {
|
||||
Path string `toml:"path"`
|
||||
Sync bool `toml:"sync"`
|
||||
WaitSyncTime int `toml:"wait_sync_time"`
|
||||
WaitMaxSlaveAcks int `toml:"wait_max_slave_acks"`
|
||||
ExpiredLogDays int `toml:"expired_log_days"`
|
||||
StoreName string `toml:"store_name"`
|
||||
MaxLogFileSize int64 `toml:"max_log_file_size"`
|
||||
MaxLogFileNum int `toml:"max_log_file_num"`
|
||||
SyncLog int `toml:"sync_log"`
|
||||
Compression bool `toml:"compression"`
|
||||
UseMmap bool `toml:"use_mmap"`
|
||||
MasterPassword string `toml:"master_password"`
|
||||
}
|
||||
|
||||
type SnapshotConfig struct {
|
||||
Path string `toml:"path"`
|
||||
MaxNum int `toml:"max_num"`
|
||||
}
|
||||
|
||||
type TLS struct {
|
||||
Enabled bool `toml:"enabled"`
|
||||
Certificate string `toml:"certificate"`
|
||||
Key string `toml:"key"`
|
||||
}
|
||||
|
||||
type AuthMethod func(c *Config, password string) bool
|
||||
|
||||
type Config struct {
|
||||
m sync.RWMutex `toml:"-"`
|
||||
|
||||
AuthPassword string `toml:"auth_password"`
|
||||
|
||||
//AuthMethod custom authentication method
|
||||
AuthMethod AuthMethod `toml:"-"`
|
||||
|
||||
FileName string `toml:"-"`
|
||||
|
||||
// Addr can be empty to assign a local address dynamically
|
||||
Addr string `toml:"addr"`
|
||||
|
||||
AddrUnixSocketPerm string `toml:"addr_unixsocketperm"`
|
||||
|
||||
HttpAddr string `toml:"http_addr"`
|
||||
|
||||
SlaveOf string `toml:"slaveof"`
|
||||
|
||||
Readonly bool `toml:readonly`
|
||||
|
||||
DataDir string `toml:"data_dir"`
|
||||
|
||||
Databases int `toml:"databases"`
|
||||
|
||||
DBName string `toml:"db_name"`
|
||||
DBPath string `toml:"db_path"`
|
||||
DBSyncCommit int `toml:"db_sync_commit"`
|
||||
|
||||
LevelDB LevelDBConfig `toml:"leveldb"`
|
||||
RocksDB RocksDBConfig `toml:"rocksdb"`
|
||||
|
||||
LMDB LMDBConfig `toml:"lmdb"`
|
||||
|
||||
AccessLog string `toml:"access_log"`
|
||||
|
||||
UseReplication bool `toml:"use_replication"`
|
||||
Replication ReplicationConfig `toml:"replication"`
|
||||
|
||||
Snapshot SnapshotConfig `toml:"snapshot"`
|
||||
|
||||
ConnReadBufferSize int `toml:"conn_read_buffer_size"`
|
||||
ConnWriteBufferSize int `toml:"conn_write_buffer_size"`
|
||||
ConnKeepaliveInterval int `toml:"conn_keepalive_interval"`
|
||||
|
||||
TTLCheckInterval int `toml:"ttl_check_interval"`
|
||||
|
||||
//tls config
|
||||
TLS TLS `toml:"tls"`
|
||||
}
|
||||
|
||||
func NewConfigWithFile(fileName string) (*Config, error) {
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg, err := NewConfigWithData(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.FileName = fileName
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func NewConfigWithData(data []byte) (*Config, error) {
|
||||
cfg := NewConfigDefault()
|
||||
|
||||
if err := toml.Unmarshal(data, cfg); err != nil {
|
||||
return nil, fmt.Errorf("newConfigwithData: unmarashal: %s", err)
|
||||
}
|
||||
|
||||
cfg.adjust()
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func NewConfigDefault() *Config {
|
||||
cfg := new(Config)
|
||||
|
||||
cfg.Addr = DefaultAddr
|
||||
cfg.HttpAddr = ""
|
||||
|
||||
cfg.DataDir = DefaultDataDir
|
||||
|
||||
cfg.DBName = DefaultDBName
|
||||
|
||||
cfg.SlaveOf = ""
|
||||
cfg.Readonly = false
|
||||
|
||||
// Disable Auth by default, by setting password to blank
|
||||
cfg.AuthPassword = ""
|
||||
|
||||
// default databases number
|
||||
cfg.Databases = 16
|
||||
|
||||
// disable access log
|
||||
cfg.AccessLog = ""
|
||||
|
||||
cfg.LMDB.MapSize = 20 * MB
|
||||
cfg.LMDB.NoSync = true
|
||||
|
||||
cfg.UseReplication = false
|
||||
cfg.Replication.WaitSyncTime = 500
|
||||
cfg.Replication.Compression = true
|
||||
cfg.Replication.WaitMaxSlaveAcks = 2
|
||||
cfg.Replication.SyncLog = 0
|
||||
cfg.Replication.UseMmap = true
|
||||
cfg.Snapshot.MaxNum = 1
|
||||
|
||||
cfg.RocksDB.EnableStatistics = false
|
||||
cfg.RocksDB.UseFsync = false
|
||||
cfg.RocksDB.DisableAutoCompactions = false
|
||||
cfg.RocksDB.DisableWAL = false
|
||||
|
||||
cfg.adjust()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func getDefault(d int, s int) int {
|
||||
if s <= 0 {
|
||||
return d
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (cfg *Config) adjust() {
|
||||
cfg.LevelDB.adjust()
|
||||
|
||||
cfg.RocksDB.adjust()
|
||||
|
||||
cfg.Replication.ExpiredLogDays = getDefault(7, cfg.Replication.ExpiredLogDays)
|
||||
cfg.Replication.MaxLogFileNum = getDefault(50, cfg.Replication.MaxLogFileNum)
|
||||
cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize)
|
||||
cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize)
|
||||
cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval)
|
||||
cfg.Databases = getDefault(16, cfg.Databases)
|
||||
}
|
||||
|
||||
func (cfg *LevelDBConfig) adjust() {
|
||||
cfg.CacheSize = getDefault(4*MB, cfg.CacheSize)
|
||||
cfg.BlockSize = getDefault(4*KB, cfg.BlockSize)
|
||||
cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize)
|
||||
cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles)
|
||||
cfg.MaxFileSize = getDefault(32*MB, cfg.MaxFileSize)
|
||||
}
|
||||
|
||||
func (cfg *RocksDBConfig) adjust() {
|
||||
cfg.CacheSize = getDefault(4*MB, cfg.CacheSize)
|
||||
cfg.BlockSize = getDefault(4*KB, cfg.BlockSize)
|
||||
cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize)
|
||||
cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles)
|
||||
cfg.MaxWriteBufferNum = getDefault(2, cfg.MaxWriteBufferNum)
|
||||
cfg.MinWriteBufferNumberToMerge = getDefault(1, cfg.MinWriteBufferNumberToMerge)
|
||||
cfg.NumLevels = getDefault(7, cfg.NumLevels)
|
||||
cfg.Level0FileNumCompactionTrigger = getDefault(4, cfg.Level0FileNumCompactionTrigger)
|
||||
cfg.Level0SlowdownWritesTrigger = getDefault(16, cfg.Level0SlowdownWritesTrigger)
|
||||
cfg.Level0StopWritesTrigger = getDefault(64, cfg.Level0StopWritesTrigger)
|
||||
cfg.TargetFileSizeBase = getDefault(32*MB, cfg.TargetFileSizeBase)
|
||||
cfg.TargetFileSizeMultiplier = getDefault(1, cfg.TargetFileSizeMultiplier)
|
||||
cfg.MaxBytesForLevelBase = getDefault(32*MB, cfg.MaxBytesForLevelBase)
|
||||
cfg.MaxBytesForLevelMultiplier = getDefault(1, cfg.MaxBytesForLevelMultiplier)
|
||||
cfg.MaxBackgroundCompactions = getDefault(1, cfg.MaxBackgroundCompactions)
|
||||
cfg.MaxBackgroundFlushes = getDefault(1, cfg.MaxBackgroundFlushes)
|
||||
cfg.StatsDumpPeriodSec = getDefault(3600, cfg.StatsDumpPeriodSec)
|
||||
cfg.BackgroundThreads = getDefault(2, cfg.BackgroundThreads)
|
||||
cfg.HighPriorityBackgroundThreads = getDefault(1, cfg.HighPriorityBackgroundThreads)
|
||||
cfg.MaxManifestFileSize = getDefault(20*MB, cfg.MaxManifestFileSize)
|
||||
}
|
||||
|
||||
func (cfg *Config) Dump(w io.Writer) error {
|
||||
data, err := toml.Marshal(*cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cfg *Config) DumpFile(fileName string) error {
|
||||
var b bytes.Buffer
|
||||
|
||||
if err := cfg.Dump(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ioutil2.WriteFileAtomic(fileName, b.Bytes(), 0644)
|
||||
}
|
||||
|
||||
func (cfg *Config) Rewrite() error {
|
||||
if len(cfg.FileName) == 0 {
|
||||
return ErrNoConfigFile
|
||||
}
|
||||
|
||||
return cfg.DumpFile(cfg.FileName)
|
||||
}
|
||||
|
||||
func (cfg *Config) GetReadonly() bool {
|
||||
cfg.m.RLock()
|
||||
b := cfg.Readonly
|
||||
cfg.m.RUnlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (cfg *Config) SetReadonly(b bool) {
|
||||
cfg.m.Lock()
|
||||
cfg.Readonly = b
|
||||
cfg.m.Unlock()
|
||||
}
|
170
vendor/github.com/siddontang/ledisdb/config/config.toml
generated
vendored
170
vendor/github.com/siddontang/ledisdb/config/config.toml
generated
vendored
@ -1,170 +0,0 @@
|
||||
# LedisDB configuration
|
||||
|
||||
# Server listen address
|
||||
addr = "127.0.0.1:6380"
|
||||
|
||||
# Unix socket permissions, 755 by default.
|
||||
# Ignored for tcp socket.
|
||||
addr_unixsocketperm = "0770"
|
||||
|
||||
# Server http listen address, set empty to disable
|
||||
http_addr = "127.0.0.1:11181"
|
||||
|
||||
# Data store path, all ledisdb's data will be saved here
|
||||
data_dir = "/tmp/ledis_server"
|
||||
|
||||
# Set the number of databases. You can use `select dbindex` to choose a db.
|
||||
# dbindex must be in [0, databases - 1].
|
||||
# Default databases is 16, maximum is 10240 now.
|
||||
databases = 16
|
||||
|
||||
# Log server command, set empty to disable
|
||||
access_log = ""
|
||||
|
||||
# Set slaveof to enable replication from master, empty, no replication
|
||||
# Any write operations except flushall and replication will be disabled in slave mode.
|
||||
slaveof = ""
|
||||
|
||||
# Readonly mode, slave server is always readonly even readonly = false
|
||||
# for readonly mode, only replication and flushall can write
|
||||
readonly = false
|
||||
|
||||
# Choose which backend storage to use, now support:
|
||||
#
|
||||
# leveldb
|
||||
# rocksdb
|
||||
# goleveldb
|
||||
# memory
|
||||
#
|
||||
db_name = "leveldb"
|
||||
|
||||
# If not set, use data_dir/"db_name"_data
|
||||
db_path = ""
|
||||
|
||||
# Sync commit to disk if possible
|
||||
# 0: no sync
|
||||
# 1: sync every second
|
||||
# 2: sync every commit
|
||||
db_sync_commit = 0
|
||||
|
||||
# enable replication or not
|
||||
use_replication = false
|
||||
|
||||
# set connection buffer, you can increase them appropriately
|
||||
# more size, more memory used
|
||||
conn_read_buffer_size = 10240
|
||||
conn_write_buffer_size = 10240
|
||||
|
||||
# if connection receives no data after n seconds, it may be dead, close
|
||||
# 0 to disable and not check
|
||||
conn_keepalive_interval = 0
|
||||
|
||||
# checking TTL (time to live) data every n seconds
|
||||
# if you set big, the expired data may not be deleted immediately
|
||||
ttl_check_interval = 1
|
||||
|
||||
[leveldb]
|
||||
# for leveldb and goleveldb
|
||||
compression = false
|
||||
block_size = 32768
|
||||
write_buffer_size = 67108864
|
||||
cache_size = 524288000
|
||||
max_open_files = 1024
|
||||
max_file_size = 33554432
|
||||
|
||||
[rocksdb]
|
||||
# rocksdb has many many configurations,
|
||||
# we only list little now, but may add more later.
|
||||
# good luck!
|
||||
|
||||
# 0:no, 1:snappy, 2:zlib, 3:bz2, 4:lz4, 5:lz4hc
|
||||
compression = 0
|
||||
block_size = 65536
|
||||
write_buffer_size = 134217728
|
||||
cache_size = 1073741824
|
||||
max_open_files = 1024
|
||||
max_write_buffer_num = 6
|
||||
min_write_buffer_number_to_merge = 2
|
||||
num_levels = 7
|
||||
level0_file_num_compaction_trigger = 8
|
||||
level0_slowdown_writes_trigger = 16
|
||||
level0_stop_writes_trigger = 64
|
||||
target_file_size_base = 67108864
|
||||
target_file_size_multiplier = 1
|
||||
max_bytes_for_level_base = 536870912
|
||||
max_bytes_for_level_multiplier = 8
|
||||
disable_auto_compactions = false
|
||||
disable_data_sync = false
|
||||
use_fsync = false
|
||||
background_theads = 16
|
||||
high_priority_background_threads = 1
|
||||
max_background_compactions = 15
|
||||
max_background_flushes = 1
|
||||
allow_os_buffer = true
|
||||
enable_statistics = false
|
||||
stats_dump_period_sec = 3600
|
||||
# dangerous to set true, write may got lost after a crash
|
||||
# you can set true if replication opened, we may recover from replication log,
|
||||
# but it is still not a easy work.
|
||||
disable_wal = false
|
||||
max_manifest_file_size = 20971520
|
||||
|
||||
[lmdb]
|
||||
map_size = 524288000
|
||||
nosync = true
|
||||
|
||||
[replication]
|
||||
# Path to store replication information(write ahead log, commit log, etc.)
|
||||
# if not set, use data_dir/rpl
|
||||
path = ""
|
||||
|
||||
# If sync is true, the new log must be sent to some slaves, and then commit.
|
||||
# It will reduce performance but have better high availability.
|
||||
sync = false
|
||||
|
||||
# If sync is true, wait at last wait_sync_time milliseconds for slave syncing this log
|
||||
wait_sync_time = 500
|
||||
|
||||
# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok.
|
||||
# n is slave number
|
||||
# If 0, wait (n + 1) / 2 acks.
|
||||
wait_max_slave_acks = 2
|
||||
|
||||
# store name: file, goleveldb
|
||||
# change in runtime is very dangerous
|
||||
store_name = "file"
|
||||
|
||||
# Expire write ahead logs after the given days
|
||||
expired_log_days = 7
|
||||
|
||||
# for file store, if 0, use default 256MB, max is 1G
|
||||
max_log_file_size = 0
|
||||
|
||||
# for file store, if 0, use default 50
|
||||
max_log_file_num = 0
|
||||
|
||||
# for file store, use mmap for file read and write
|
||||
use_mmap = true
|
||||
|
||||
# Sync log to disk if possible
|
||||
# 0: no sync
|
||||
# 1: sync every second
|
||||
# 2: sync every commit
|
||||
sync_log = 0
|
||||
|
||||
# Compress the log or not
|
||||
compression = false
|
||||
|
||||
[snapshot]
|
||||
# Path to store snapshot dump file
|
||||
# if not set, use data_dir/snapshot
|
||||
# snapshot file name format is dmp-2006-01-02T15:04:05.999999999
|
||||
path = ""
|
||||
|
||||
# Reserve newest max_num snapshot dump files
|
||||
max_num = 1
|
||||
|
||||
[tls]
|
||||
enabled = true
|
||||
certificate = "test.crt"
|
||||
key = "test.key"
|
139
vendor/github.com/siddontang/ledisdb/ledis/batch.go
generated
vendored
139
vendor/github.com/siddontang/ledisdb/ledis/batch.go
generated
vendored
@ -1,139 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/ledisdb/rpl"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
type batch struct {
|
||||
l *Ledis
|
||||
|
||||
*store.WriteBatch
|
||||
|
||||
sync.Locker
|
||||
|
||||
// tx *Tx
|
||||
}
|
||||
|
||||
func (b *batch) Commit() error {
|
||||
if b.l.cfg.GetReadonly() {
|
||||
return ErrWriteInROnly
|
||||
}
|
||||
|
||||
return b.l.handleCommit(b.WriteBatch, b.WriteBatch)
|
||||
|
||||
// if b.tx == nil {
|
||||
// return b.l.handleCommit(b.WriteBatch, b.WriteBatch)
|
||||
// } else {
|
||||
// if b.l.r != nil {
|
||||
// if err := b.tx.data.Append(b.WriteBatch.BatchData()); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// return b.WriteBatch.Commit()
|
||||
// }
|
||||
}
|
||||
|
||||
func (b *batch) Lock() {
|
||||
b.Locker.Lock()
|
||||
}
|
||||
|
||||
func (b *batch) Unlock() {
|
||||
b.WriteBatch.Rollback()
|
||||
b.Locker.Unlock()
|
||||
}
|
||||
|
||||
func (b *batch) Put(key []byte, value []byte) {
|
||||
b.WriteBatch.Put(key, value)
|
||||
}
|
||||
|
||||
func (b *batch) Delete(key []byte) {
|
||||
b.WriteBatch.Delete(key)
|
||||
}
|
||||
|
||||
type dbBatchLocker struct {
|
||||
l *sync.Mutex
|
||||
wrLock *sync.RWMutex
|
||||
}
|
||||
|
||||
func (l *dbBatchLocker) Lock() {
|
||||
l.wrLock.RLock()
|
||||
l.l.Lock()
|
||||
}
|
||||
|
||||
func (l *dbBatchLocker) Unlock() {
|
||||
l.l.Unlock()
|
||||
l.wrLock.RUnlock()
|
||||
}
|
||||
|
||||
// type txBatchLocker struct {
|
||||
// }
|
||||
|
||||
// func (l *txBatchLocker) Lock() {}
|
||||
// func (l *txBatchLocker) Unlock() {}
|
||||
|
||||
// type multiBatchLocker struct {
|
||||
// }
|
||||
|
||||
// func (l *multiBatchLocker) Lock() {}
|
||||
// func (l *multiBatchLocker) Unlock() {}
|
||||
|
||||
func (l *Ledis) newBatch(wb *store.WriteBatch, locker sync.Locker) *batch {
|
||||
b := new(batch)
|
||||
b.l = l
|
||||
b.WriteBatch = wb
|
||||
|
||||
b.Locker = locker
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
type commiter interface {
|
||||
Commit() error
|
||||
}
|
||||
|
||||
type commitDataGetter interface {
|
||||
Data() []byte
|
||||
}
|
||||
|
||||
func (l *Ledis) handleCommit(g commitDataGetter, c commiter) error {
|
||||
l.commitLock.Lock()
|
||||
|
||||
var err error
|
||||
if l.r != nil {
|
||||
var rl *rpl.Log
|
||||
if rl, err = l.r.Log(g.Data()); err != nil {
|
||||
l.commitLock.Unlock()
|
||||
|
||||
log.Fatalf("write wal error %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
l.propagate(rl)
|
||||
|
||||
if err = c.Commit(); err != nil {
|
||||
l.commitLock.Unlock()
|
||||
|
||||
log.Fatalf("commit error %s", err.Error())
|
||||
l.noticeReplication()
|
||||
return err
|
||||
}
|
||||
|
||||
if err = l.r.UpdateCommitID(rl.ID); err != nil {
|
||||
l.commitLock.Unlock()
|
||||
|
||||
log.Fatalf("update commit id error %s", err.Error())
|
||||
l.noticeReplication()
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = c.Commit()
|
||||
}
|
||||
|
||||
l.commitLock.Unlock()
|
||||
|
||||
return err
|
||||
}
|
150
vendor/github.com/siddontang/ledisdb/ledis/const.go
generated
vendored
150
vendor/github.com/siddontang/ledisdb/ledis/const.go
generated
vendored
@ -1,150 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Version is for version
|
||||
const Version = "0.5"
|
||||
|
||||
// DataType is defined for the different types
|
||||
type DataType byte
|
||||
|
||||
// for out use
|
||||
const (
|
||||
KV DataType = iota
|
||||
LIST
|
||||
HASH
|
||||
SET
|
||||
ZSET
|
||||
)
|
||||
|
||||
func (d DataType) String() string {
|
||||
switch d {
|
||||
case KV:
|
||||
return KVName
|
||||
case LIST:
|
||||
return ListName
|
||||
case HASH:
|
||||
return HashName
|
||||
case SET:
|
||||
return SetName
|
||||
case ZSET:
|
||||
return ZSetName
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// For different type name
|
||||
const (
|
||||
KVName = "KV"
|
||||
ListName = "LIST"
|
||||
HashName = "HASH"
|
||||
SetName = "SET"
|
||||
ZSetName = "ZSET"
|
||||
)
|
||||
|
||||
// for backend store
|
||||
const (
|
||||
NoneType byte = 0
|
||||
KVType byte = 1
|
||||
HashType byte = 2
|
||||
HSizeType byte = 3
|
||||
ListType byte = 4
|
||||
LMetaType byte = 5
|
||||
ZSetType byte = 6
|
||||
ZSizeType byte = 7
|
||||
ZScoreType byte = 8
|
||||
// BitType byte = 9
|
||||
// BitMetaType byte = 10
|
||||
SetType byte = 11
|
||||
SSizeType byte = 12
|
||||
|
||||
maxDataType byte = 100
|
||||
|
||||
/*
|
||||
I make a big mistake about TTL time key format and have to use a new one (change 101 to 103).
|
||||
You must run the ledis-upgrade-ttl to upgrade db.
|
||||
*/
|
||||
ObsoleteExpTimeType byte = 101
|
||||
ExpMetaType byte = 102
|
||||
ExpTimeType byte = 103
|
||||
|
||||
MetaType byte = 201
|
||||
)
|
||||
|
||||
// TypeName is the map of type -> name
|
||||
var TypeName = map[byte]string{
|
||||
KVType: "kv",
|
||||
HashType: "hash",
|
||||
HSizeType: "hsize",
|
||||
ListType: "list",
|
||||
LMetaType: "lmeta",
|
||||
ZSetType: "zset",
|
||||
ZSizeType: "zsize",
|
||||
ZScoreType: "zscore",
|
||||
// BitType: "bit",
|
||||
// BitMetaType: "bitmeta",
|
||||
SetType: "set",
|
||||
SSizeType: "ssize",
|
||||
ExpTimeType: "exptime",
|
||||
ExpMetaType: "expmeta",
|
||||
}
|
||||
|
||||
const (
|
||||
defaultScanCount int = 10
|
||||
)
|
||||
|
||||
var (
|
||||
errKeySize = errors.New("invalid key size")
|
||||
errValueSize = errors.New("invalid value size")
|
||||
errHashFieldSize = errors.New("invalid hash field size")
|
||||
errSetMemberSize = errors.New("invalid set member size")
|
||||
errZSetMemberSize = errors.New("invalid zset member size")
|
||||
errExpireValue = errors.New("invalid expire value")
|
||||
errListIndex = errors.New("invalid list index")
|
||||
)
|
||||
|
||||
// For different const size configuration
|
||||
const (
|
||||
// max allowed databases
|
||||
MaxDatabases int = 10240
|
||||
|
||||
// max key size
|
||||
MaxKeySize int = 1024
|
||||
|
||||
// max hash field size
|
||||
MaxHashFieldSize int = 1024
|
||||
|
||||
// max zset member size
|
||||
MaxZSetMemberSize int = 1024
|
||||
|
||||
// max set member size
|
||||
MaxSetMemberSize int = 1024
|
||||
|
||||
// max value size
|
||||
MaxValueSize int = 1024 * 1024 * 1024
|
||||
)
|
||||
|
||||
// For different common errors
|
||||
var (
|
||||
ErrScoreMiss = errors.New("zset score miss")
|
||||
ErrWriteInROnly = errors.New("write not support in readonly mode")
|
||||
ErrRplInRDWR = errors.New("replication not support in read write mode")
|
||||
ErrRplNotSupport = errors.New("replication not support")
|
||||
)
|
||||
|
||||
// const (
|
||||
// DBAutoCommit uint8 = 0x0
|
||||
// DBInTransaction uint8 = 0x1
|
||||
// DBInMulti uint8 = 0x2
|
||||
// )
|
||||
|
||||
// For bit operation
|
||||
const (
|
||||
BitAND = "and"
|
||||
BitOR = "or"
|
||||
BitXOR = "xor"
|
||||
BitNot = "not"
|
||||
)
|
58
vendor/github.com/siddontang/ledisdb/ledis/doc.go
generated
vendored
58
vendor/github.com/siddontang/ledisdb/ledis/doc.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
// Package ledis is a high performance embedded NoSQL.
|
||||
//
|
||||
// Ledis supports various data structure like kv, list, hash and zset like redis.
|
||||
//
|
||||
// Other features include replication, data with a limited time-to-live.
|
||||
//
|
||||
// Usage
|
||||
//
|
||||
// First create a ledis instance before use:
|
||||
//
|
||||
// l := ledis.Open(cfg)
|
||||
//
|
||||
// cfg is a Config instance which contains configuration for ledis use,
|
||||
// like DataDir (root directory for ledis working to store data).
|
||||
//
|
||||
// After you create a ledis instance, you can select a DB to store you data:
|
||||
//
|
||||
// db, _ := l.Select(0)
|
||||
//
|
||||
// DB must be selected by a index, ledis supports only 16 databases, so the index range is [0-15].
|
||||
//
|
||||
// KV
|
||||
//
|
||||
// KV is the most basic ledis type like any other key-value database.
|
||||
//
|
||||
// err := db.Set(key, value)
|
||||
// value, err := db.Get(key)
|
||||
//
|
||||
// List
|
||||
//
|
||||
// List is simply lists of values, sorted by insertion order.
|
||||
// You can push or pop value on the list head (left) or tail (right).
|
||||
//
|
||||
// err := db.LPush(key, value1)
|
||||
// err := db.RPush(key, value2)
|
||||
// value1, err := db.LPop(key)
|
||||
// value2, err := db.RPop(key)
|
||||
//
|
||||
// Hash
|
||||
//
|
||||
// Hash is a map between fields and values.
|
||||
//
|
||||
// n, err := db.HSet(key, field1, value1)
|
||||
// n, err := db.HSet(key, field2, value2)
|
||||
// value1, err := db.HGet(key, field1)
|
||||
// value2, err := db.HGet(key, field2)
|
||||
//
|
||||
// ZSet
|
||||
//
|
||||
// ZSet is a sorted collections of values.
|
||||
// Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score.
|
||||
// Members are unique, but score may be same.
|
||||
//
|
||||
// n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2})
|
||||
// ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1)
|
||||
//
|
||||
//
|
||||
package ledis
|
220
vendor/github.com/siddontang/ledisdb/ledis/dump.go
generated
vendored
220
vendor/github.com/siddontang/ledisdb/ledis/dump.go
generated
vendored
@ -1,220 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/siddontang/go/snappy"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
// DumpHead is the head of a dump.
|
||||
type DumpHead struct {
|
||||
CommitID uint64
|
||||
}
|
||||
|
||||
// Read reads meta from the Reader.
|
||||
func (h *DumpHead) Read(r io.Reader) error {
|
||||
return binary.Read(r, binary.BigEndian, &h.CommitID)
|
||||
}
|
||||
|
||||
// Write writes meta to the Writer
|
||||
func (h *DumpHead) Write(w io.Writer) error {
|
||||
return binary.Write(w, binary.BigEndian, h.CommitID)
|
||||
}
|
||||
|
||||
// DumpFile dumps data to the file
|
||||
func (l *Ledis) DumpFile(path string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return l.Dump(f)
|
||||
}
|
||||
|
||||
// Dump dumps data to the Writer.
|
||||
func (l *Ledis) Dump(w io.Writer) error {
|
||||
var err error
|
||||
|
||||
var commitID uint64
|
||||
var snap *store.Snapshot
|
||||
|
||||
l.wLock.Lock()
|
||||
|
||||
if l.r != nil {
|
||||
if commitID, err = l.r.LastCommitID(); err != nil {
|
||||
l.wLock.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if snap, err = l.ldb.NewSnapshot(); err != nil {
|
||||
l.wLock.Unlock()
|
||||
return err
|
||||
}
|
||||
defer snap.Close()
|
||||
|
||||
l.wLock.Unlock()
|
||||
|
||||
wb := bufio.NewWriterSize(w, 4096)
|
||||
|
||||
h := &DumpHead{commitID}
|
||||
|
||||
if err = h.Write(wb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
it := snap.NewIterator()
|
||||
defer it.Close()
|
||||
it.SeekToFirst()
|
||||
|
||||
compressBuf := make([]byte, 4096)
|
||||
|
||||
var key []byte
|
||||
var value []byte
|
||||
for ; it.Valid(); it.Next() {
|
||||
key = it.RawKey()
|
||||
value = it.RawValue()
|
||||
|
||||
if key, err = snappy.Encode(compressBuf, key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = wb.Write(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if value, err = snappy.Encode(compressBuf, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = wb.Write(value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = wb.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressBuf = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadDumpFile clears all data and loads dump file to db
|
||||
func (l *Ledis) LoadDumpFile(path string) (*DumpHead, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return l.LoadDump(f)
|
||||
}
|
||||
|
||||
// LoadDump clears all data and loads dump file to db
|
||||
func (l *Ledis) LoadDump(r io.Reader) (*DumpHead, error) {
|
||||
l.wLock.Lock()
|
||||
defer l.wLock.Unlock()
|
||||
|
||||
var err error
|
||||
if err = l.flushAll(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rb := bufio.NewReaderSize(r, 4096)
|
||||
|
||||
h := new(DumpHead)
|
||||
|
||||
if err = h.Read(rb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var keyLen uint16
|
||||
var valueLen uint32
|
||||
|
||||
var keyBuf bytes.Buffer
|
||||
var valueBuf bytes.Buffer
|
||||
|
||||
deKeyBuf := make([]byte, 4096)
|
||||
deValueBuf := make([]byte, 4096)
|
||||
|
||||
var key, value []byte
|
||||
|
||||
wb := l.ldb.NewWriteBatch()
|
||||
defer wb.Close()
|
||||
|
||||
n := 0
|
||||
|
||||
for {
|
||||
if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF {
|
||||
return nil, err
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
wb.Put(key, value)
|
||||
n++
|
||||
if n%1024 == 0 {
|
||||
if err = wb.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// if err = l.ldb.Put(key, value); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
keyBuf.Reset()
|
||||
valueBuf.Reset()
|
||||
}
|
||||
|
||||
if err = wb.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
deKeyBuf = nil
|
||||
deValueBuf = nil
|
||||
|
||||
if l.r != nil {
|
||||
if err := l.r.UpdateCommitID(h.CommitID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
136
vendor/github.com/siddontang/ledisdb/ledis/event.go
generated
vendored
136
vendor/github.com/siddontang/ledisdb/ledis/event.go
generated
vendored
@ -1,136 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/siddontang/go/hack"
|
||||
)
|
||||
|
||||
var errInvalidEvent = errors.New("invalid event")
|
||||
|
||||
func formatEventKey(buf []byte, k []byte) ([]byte, error) {
|
||||
if len(k) < 2 {
|
||||
return nil, errInvalidEvent
|
||||
}
|
||||
|
||||
buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...)
|
||||
buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...)
|
||||
|
||||
db := new(DB)
|
||||
index, _, err := decodeDBIndex(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db.setIndex(index)
|
||||
|
||||
//to do format at respective place
|
||||
|
||||
switch k[1] {
|
||||
case KVType:
|
||||
key, err := db.decodeKVKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
case HashType:
|
||||
key, field, err := db.hDecodeHashKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendQuote(buf, hack.String(field))
|
||||
case HSizeType:
|
||||
key, err := db.hDecodeSizeKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
case ListType:
|
||||
key, seq, err := db.lDecodeListKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendInt(buf, int64(seq), 10)
|
||||
case LMetaType:
|
||||
key, err := db.lDecodeMetaKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
case ZSetType:
|
||||
key, m, err := db.zDecodeSetKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendQuote(buf, hack.String(m))
|
||||
case ZSizeType:
|
||||
key, err := db.zDecodeSizeKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
case ZScoreType:
|
||||
key, m, score, err := db.zDecodeScoreKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendQuote(buf, hack.String(m))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendInt(buf, score, 10)
|
||||
case SetType:
|
||||
key, member, err := db.sDecodeSetKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendQuote(buf, hack.String(member))
|
||||
case SSizeType:
|
||||
key, err := db.sDecodeSizeKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
case ExpTimeType:
|
||||
tp, key, t, err := db.expDecodeTimeKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = append(buf, TypeName[tp]...)
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendInt(buf, t, 10)
|
||||
case ExpMetaType:
|
||||
tp, key, err := db.expDecodeMetaKey(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf = append(buf, TypeName[tp]...)
|
||||
buf = append(buf, ' ')
|
||||
buf = strconv.AppendQuote(buf, hack.String(key))
|
||||
default:
|
||||
return nil, errInvalidEvent
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
248
vendor/github.com/siddontang/ledisdb/ledis/ledis.go
generated
vendored
248
vendor/github.com/siddontang/ledisdb/ledis/ledis.go
generated
vendored
@ -1,248 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/filelock"
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/rpl"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
// Ledis is the core structure to handle the database.
|
||||
type Ledis struct {
|
||||
cfg *config.Config
|
||||
|
||||
ldb *store.DB
|
||||
|
||||
dbLock sync.Mutex
|
||||
dbs map[int]*DB
|
||||
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
|
||||
//for replication
|
||||
r *rpl.Replication
|
||||
rc chan struct{}
|
||||
rbatch *store.WriteBatch
|
||||
rDoneCh chan struct{}
|
||||
rhs []NewLogEventHandler
|
||||
|
||||
wLock sync.RWMutex //allow one write at same time
|
||||
commitLock sync.Mutex //allow one write commit at same time
|
||||
|
||||
lock io.Closer
|
||||
|
||||
ttlCheckers []*ttlChecker
|
||||
ttlCheckerCh chan *ttlChecker
|
||||
}
|
||||
|
||||
// Open opens the Ledis with a config.
|
||||
func Open(cfg *config.Config) (*Ledis, error) {
|
||||
if len(cfg.DataDir) == 0 {
|
||||
cfg.DataDir = config.DefaultDataDir
|
||||
}
|
||||
|
||||
if cfg.Databases == 0 {
|
||||
cfg.Databases = 16
|
||||
} else if cfg.Databases > MaxDatabases {
|
||||
cfg.Databases = MaxDatabases
|
||||
}
|
||||
|
||||
os.MkdirAll(cfg.DataDir, 0755)
|
||||
|
||||
var err error
|
||||
|
||||
l := new(Ledis)
|
||||
l.cfg = cfg
|
||||
|
||||
if l.lock, err = filelock.Lock(path.Join(cfg.DataDir, "LOCK")); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.quit = make(chan struct{})
|
||||
|
||||
if l.ldb, err = store.Open(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.UseReplication {
|
||||
if l.r, err = rpl.NewReplication(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.rc = make(chan struct{}, 1)
|
||||
l.rbatch = l.ldb.NewWriteBatch()
|
||||
l.rDoneCh = make(chan struct{}, 1)
|
||||
|
||||
l.wg.Add(1)
|
||||
go l.onReplication()
|
||||
|
||||
//first we must try wait all replication ok
|
||||
//maybe some logs are not committed
|
||||
l.WaitReplication()
|
||||
} else {
|
||||
l.r = nil
|
||||
}
|
||||
|
||||
l.dbs = make(map[int]*DB, 16)
|
||||
|
||||
l.checkTTL()
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Close closes the Ledis.
|
||||
func (l *Ledis) Close() {
|
||||
close(l.quit)
|
||||
l.wg.Wait()
|
||||
|
||||
l.ldb.Close()
|
||||
|
||||
if l.r != nil {
|
||||
l.r.Close()
|
||||
//l.r = nil
|
||||
}
|
||||
|
||||
if l.lock != nil {
|
||||
l.lock.Close()
|
||||
//l.lock = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Select chooses a database.
|
||||
func (l *Ledis) Select(index int) (*DB, error) {
|
||||
if index < 0 || index >= l.cfg.Databases {
|
||||
return nil, fmt.Errorf("invalid db index %d, must in [0, %d]", index, l.cfg.Databases-1)
|
||||
}
|
||||
|
||||
l.dbLock.Lock()
|
||||
defer l.dbLock.Unlock()
|
||||
|
||||
db, ok := l.dbs[index]
|
||||
if ok {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
db = l.newDB(index)
|
||||
l.dbs[index] = db
|
||||
|
||||
go func(db *DB) {
|
||||
l.ttlCheckerCh <- db.ttlChecker
|
||||
}(db)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// FlushAll will clear all data and replication logs
|
||||
func (l *Ledis) FlushAll() error {
|
||||
l.wLock.Lock()
|
||||
defer l.wLock.Unlock()
|
||||
|
||||
return l.flushAll()
|
||||
}
|
||||
|
||||
func (l *Ledis) flushAll() error {
|
||||
it := l.ldb.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
it.SeekToFirst()
|
||||
|
||||
w := l.ldb.NewWriteBatch()
|
||||
defer w.Rollback()
|
||||
|
||||
n := 0
|
||||
for ; it.Valid(); it.Next() {
|
||||
n++
|
||||
if n == 10000 {
|
||||
if err := w.Commit(); err != nil {
|
||||
log.Fatalf("flush all commit error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
n = 0
|
||||
}
|
||||
w.Delete(it.RawKey())
|
||||
}
|
||||
|
||||
if err := w.Commit(); err != nil {
|
||||
log.Fatalf("flush all commit error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if l.r != nil {
|
||||
if err := l.r.Clear(); err != nil {
|
||||
log.Fatalf("flush all replication clear error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsReadOnly returns whether Ledis is read only or not.
|
||||
func (l *Ledis) IsReadOnly() bool {
|
||||
if l.cfg.GetReadonly() {
|
||||
return true
|
||||
} else if l.r != nil {
|
||||
if b, _ := l.r.CommitIDBehind(); b {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *Ledis) checkTTL() {
|
||||
l.ttlCheckers = make([]*ttlChecker, 0, 16)
|
||||
l.ttlCheckerCh = make(chan *ttlChecker, 16)
|
||||
|
||||
if l.cfg.TTLCheckInterval == 0 {
|
||||
l.cfg.TTLCheckInterval = 1
|
||||
}
|
||||
|
||||
l.wg.Add(1)
|
||||
go func() {
|
||||
defer l.wg.Done()
|
||||
|
||||
tick := time.NewTicker(time.Duration(l.cfg.TTLCheckInterval) * time.Second)
|
||||
defer tick.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if l.IsReadOnly() {
|
||||
break
|
||||
}
|
||||
|
||||
for _, c := range l.ttlCheckers {
|
||||
c.check()
|
||||
}
|
||||
case c := <-l.ttlCheckerCh:
|
||||
l.ttlCheckers = append(l.ttlCheckers, c)
|
||||
c.check()
|
||||
case <-l.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
// StoreStat returns the statistics.
|
||||
func (l *Ledis) StoreStat() *store.Stat {
|
||||
return l.ldb.Stat()
|
||||
}
|
||||
|
||||
// CompactStore compacts the backend storage.
|
||||
func (l *Ledis) CompactStore() error {
|
||||
l.wLock.Lock()
|
||||
defer l.wLock.Unlock()
|
||||
|
||||
return l.ldb.Compact()
|
||||
}
|
208
vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go
generated
vendored
208
vendor/github.com/siddontang/ledisdb/ledis/ledis_db.go
generated
vendored
@ -1,208 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
type ibucket interface {
|
||||
Get(key []byte) ([]byte, error)
|
||||
GetSlice(key []byte) (store.Slice, error)
|
||||
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
|
||||
NewIterator() *store.Iterator
|
||||
|
||||
NewWriteBatch() *store.WriteBatch
|
||||
|
||||
RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
|
||||
RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator
|
||||
RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
|
||||
RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator
|
||||
}
|
||||
|
||||
// DB is the database.
|
||||
type DB struct {
|
||||
l *Ledis
|
||||
|
||||
sdb *store.DB
|
||||
|
||||
bucket ibucket
|
||||
|
||||
index int
|
||||
|
||||
// buffer to store index varint
|
||||
indexVarBuf []byte
|
||||
|
||||
kvBatch *batch
|
||||
listBatch *batch
|
||||
hashBatch *batch
|
||||
zsetBatch *batch
|
||||
// binBatch *batch
|
||||
setBatch *batch
|
||||
|
||||
// status uint8
|
||||
|
||||
ttlChecker *ttlChecker
|
||||
|
||||
lbkeys *lBlockKeys
|
||||
}
|
||||
|
||||
func (l *Ledis) newDB(index int) *DB {
|
||||
d := new(DB)
|
||||
|
||||
d.l = l
|
||||
|
||||
d.sdb = l.ldb
|
||||
|
||||
d.bucket = d.sdb
|
||||
|
||||
// d.status = DBAutoCommit
|
||||
d.setIndex(index)
|
||||
|
||||
d.kvBatch = d.newBatch()
|
||||
d.listBatch = d.newBatch()
|
||||
d.hashBatch = d.newBatch()
|
||||
d.zsetBatch = d.newBatch()
|
||||
// d.binBatch = d.newBatch()
|
||||
d.setBatch = d.newBatch()
|
||||
|
||||
d.lbkeys = newLBlockKeys()
|
||||
|
||||
d.ttlChecker = d.newTTLChecker()
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
func decodeDBIndex(buf []byte) (int, int, error) {
|
||||
index, n := binary.Uvarint(buf)
|
||||
if n == 0 {
|
||||
return 0, 0, fmt.Errorf("buf is too small to save index")
|
||||
} else if n < 0 {
|
||||
return 0, 0, fmt.Errorf("value larger than 64 bits")
|
||||
} else if index > uint64(MaxDatabases) {
|
||||
return 0, 0, fmt.Errorf("value %d is larger than max databases %d", index, MaxDatabases)
|
||||
}
|
||||
return int(index), n, nil
|
||||
}
|
||||
|
||||
func (db *DB) setIndex(index int) {
|
||||
db.index = index
|
||||
// the most size for varint is 10 bytes
|
||||
buf := make([]byte, 10)
|
||||
n := binary.PutUvarint(buf, uint64(index))
|
||||
|
||||
db.indexVarBuf = buf[0:n]
|
||||
}
|
||||
|
||||
func (db *DB) checkKeyIndex(buf []byte) (int, error) {
|
||||
if len(buf) < len(db.indexVarBuf) {
|
||||
return 0, fmt.Errorf("key is too small")
|
||||
} else if !bytes.Equal(db.indexVarBuf, buf[0:len(db.indexVarBuf)]) {
|
||||
return 0, fmt.Errorf("invalid db index")
|
||||
}
|
||||
|
||||
return len(db.indexVarBuf), nil
|
||||
}
|
||||
|
||||
func (db *DB) newTTLChecker() *ttlChecker {
|
||||
c := new(ttlChecker)
|
||||
c.db = db
|
||||
c.txs = make([]*batch, maxDataType)
|
||||
c.cbs = make([]onExpired, maxDataType)
|
||||
c.nc = 0
|
||||
|
||||
c.register(KVType, db.kvBatch, db.delete)
|
||||
c.register(ListType, db.listBatch, db.lDelete)
|
||||
c.register(HashType, db.hashBatch, db.hDelete)
|
||||
c.register(ZSetType, db.zsetBatch, db.zDelete)
|
||||
// c.register(BitType, db.binBatch, db.bDelete)
|
||||
c.register(SetType, db.setBatch, db.sDelete)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (db *DB) newBatch() *batch {
|
||||
return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock})
|
||||
}
|
||||
|
||||
// Index gets the index of database.
|
||||
func (db *DB) Index() int {
|
||||
return int(db.index)
|
||||
}
|
||||
|
||||
// func (db *DB) IsAutoCommit() bool {
|
||||
// return db.status == DBAutoCommit
|
||||
// }
|
||||
|
||||
// FlushAll flushes the data.
|
||||
func (db *DB) FlushAll() (drop int64, err error) {
|
||||
all := [...](func() (int64, error)){
|
||||
db.flush,
|
||||
db.lFlush,
|
||||
db.hFlush,
|
||||
db.zFlush,
|
||||
db.sFlush}
|
||||
|
||||
for _, flush := range all {
|
||||
n, e := flush()
|
||||
if e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
|
||||
drop += n
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) {
|
||||
var deleteFunc func(t *batch, key []byte) int64
|
||||
var metaDataType byte
|
||||
switch dataType {
|
||||
case KVType:
|
||||
deleteFunc = db.delete
|
||||
metaDataType = KVType
|
||||
case ListType:
|
||||
deleteFunc = db.lDelete
|
||||
metaDataType = LMetaType
|
||||
case HashType:
|
||||
deleteFunc = db.hDelete
|
||||
metaDataType = HSizeType
|
||||
case ZSetType:
|
||||
deleteFunc = db.zDelete
|
||||
metaDataType = ZSizeType
|
||||
// case BitType:
|
||||
// deleteFunc = db.bDelete
|
||||
// metaDataType = BitMetaType
|
||||
case SetType:
|
||||
deleteFunc = db.sDelete
|
||||
metaDataType = SSizeType
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType])
|
||||
}
|
||||
|
||||
var keys [][]byte
|
||||
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
|
||||
for len(keys) != 0 || err != nil {
|
||||
for _, key := range keys {
|
||||
deleteFunc(t, key)
|
||||
db.rmExpire(t, dataType, key)
|
||||
|
||||
}
|
||||
|
||||
if err = t.Commit(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
drop += int64(len(keys))
|
||||
keys, err = db.scanGeneric(metaDataType, nil, 1024, false, "", false)
|
||||
}
|
||||
return
|
||||
}
|
195
vendor/github.com/siddontang/ledisdb/ledis/migrate.go
generated
vendored
195
vendor/github.com/siddontang/ledisdb/ledis/migrate.go
generated
vendored
@ -1,195 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/siddontang/rdb"
|
||||
)
|
||||
|
||||
/*
|
||||
To support redis <-> ledisdb, the dump value format is the same as redis.
|
||||
We will not support bitmap, and may add bit operations for kv later.
|
||||
|
||||
But you must know that we use int64 for zset score, not double.
|
||||
Only support rdb version 6.
|
||||
*/
|
||||
|
||||
// Dump dumps the KV value of key
|
||||
func (db *DB) Dump(key []byte) ([]byte, error) {
|
||||
v, err := db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if v == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rdb.Dump(rdb.String(v))
|
||||
}
|
||||
|
||||
// LDump dumps the list value of key
|
||||
func (db *DB) LDump(key []byte) ([]byte, error) {
|
||||
v, err := db.LRange(key, 0, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(v) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rdb.Dump(rdb.List(v))
|
||||
}
|
||||
|
||||
// HDump dumps the hash value of key
|
||||
func (db *DB) HDump(key []byte) ([]byte, error) {
|
||||
v, err := db.HGetAll(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(v) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := make(rdb.Hash, len(v))
|
||||
for i := 0; i < len(v); i++ {
|
||||
o[i].Field = v[i].Field
|
||||
o[i].Value = v[i].Value
|
||||
}
|
||||
|
||||
return rdb.Dump(o)
|
||||
}
|
||||
|
||||
// SDump dumps the set value of key
|
||||
func (db *DB) SDump(key []byte) ([]byte, error) {
|
||||
v, err := db.SMembers(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(v) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rdb.Dump(rdb.Set(v))
|
||||
}
|
||||
|
||||
// ZDump dumps the zset value of key
|
||||
func (db *DB) ZDump(key []byte) ([]byte, error) {
|
||||
v, err := db.ZRangeByScore(key, MinScore, MaxScore, 0, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(v) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o := make(rdb.ZSet, len(v))
|
||||
for i := 0; i < len(v); i++ {
|
||||
o[i].Member = v[i].Member
|
||||
o[i].Score = float64(v[i].Score)
|
||||
}
|
||||
|
||||
return rdb.Dump(o)
|
||||
}
|
||||
|
||||
// Restore restores a key into database.
|
||||
func (db *DB) Restore(key []byte, ttl int64, data []byte) error {
|
||||
d, err := rdb.DecodeDump(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//ttl is milliseconds, but we only support seconds
|
||||
//later may support milliseconds
|
||||
if ttl > 0 {
|
||||
ttl = ttl / 1e3
|
||||
if ttl == 0 {
|
||||
ttl = 1
|
||||
}
|
||||
}
|
||||
|
||||
switch value := d.(type) {
|
||||
case rdb.String:
|
||||
if _, err = db.Del(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = db.Set(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ttl > 0 {
|
||||
if _, err = db.Expire(key, ttl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case rdb.Hash:
|
||||
//first clear old key
|
||||
if _, err = db.HClear(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fv := make([]FVPair, len(value))
|
||||
for i := 0; i < len(value); i++ {
|
||||
fv[i] = FVPair{Field: value[i].Field, Value: value[i].Value}
|
||||
}
|
||||
|
||||
if err = db.HMset(key, fv...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ttl > 0 {
|
||||
if _, err = db.HExpire(key, ttl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case rdb.List:
|
||||
//first clear old key
|
||||
if _, err = db.LClear(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.RPush(key, value...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ttl > 0 {
|
||||
if _, err = db.LExpire(key, ttl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case rdb.ZSet:
|
||||
//first clear old key
|
||||
if _, err = db.ZClear(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sp := make([]ScorePair, len(value))
|
||||
for i := 0; i < len(value); i++ {
|
||||
sp[i] = ScorePair{int64(value[i].Score), value[i].Member}
|
||||
}
|
||||
|
||||
if _, err = db.ZAdd(key, sp...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ttl > 0 {
|
||||
if _, err = db.ZExpire(key, ttl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case rdb.Set:
|
||||
//first clear old key
|
||||
if _, err = db.SClear(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = db.SAdd(key, value...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ttl > 0 {
|
||||
if _, err = db.SExpire(key, ttl); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid data type %T", d)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
259
vendor/github.com/siddontang/ledisdb/ledis/replication.go
generated
vendored
259
vendor/github.com/siddontang/ledisdb/ledis/replication.go
generated
vendored
@ -1,259 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/go/snappy"
|
||||
"github.com/siddontang/ledisdb/rpl"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
const (
|
||||
maxReplLogSize = 1 * 1024 * 1024
|
||||
)
|
||||
|
||||
// For replication error.
|
||||
var (
|
||||
ErrLogMissed = errors.New("log is pured in server")
|
||||
)
|
||||
|
||||
// ReplicationUsed returns whether replication is used or not.
|
||||
func (l *Ledis) ReplicationUsed() bool {
|
||||
return l.r != nil
|
||||
}
|
||||
|
||||
func (l *Ledis) handleReplication() error {
|
||||
l.wLock.Lock()
|
||||
defer l.wLock.Unlock()
|
||||
|
||||
defer AsyncNotify(l.rDoneCh)
|
||||
|
||||
rl := &rpl.Log{}
|
||||
|
||||
var err error
|
||||
for {
|
||||
if err = l.r.NextNeedCommitLog(rl); err != nil {
|
||||
if err != rpl.ErrNoBehindLog {
|
||||
log.Errorf("get next commit log err, %s", err.Error)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
l.rbatch.Rollback()
|
||||
|
||||
if rl.Compression == 1 {
|
||||
//todo optimize
|
||||
if rl.Data, err = snappy.Decode(nil, rl.Data); err != nil {
|
||||
log.Errorf("decode log error %s", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if bd, err := store.NewBatchData(rl.Data); err != nil {
|
||||
log.Errorf("decode batch log error %s", err.Error())
|
||||
return err
|
||||
} else if err = bd.Replay(l.rbatch); err != nil {
|
||||
log.Errorf("replay batch log error %s", err.Error())
|
||||
}
|
||||
|
||||
l.commitLock.Lock()
|
||||
if err = l.rbatch.Commit(); err != nil {
|
||||
log.Errorf("commit log error %s", err.Error())
|
||||
} else if err = l.r.UpdateCommitID(rl.ID); err != nil {
|
||||
log.Errorf("update commit id error %s", err.Error())
|
||||
}
|
||||
|
||||
l.commitLock.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Ledis) onReplication() {
|
||||
defer l.wg.Done()
|
||||
|
||||
l.noticeReplication()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-l.rc:
|
||||
l.handleReplication()
|
||||
case <-l.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WaitReplication waits replication done
|
||||
func (l *Ledis) WaitReplication() error {
|
||||
if !l.ReplicationUsed() {
|
||||
return ErrRplNotSupport
|
||||
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
l.noticeReplication()
|
||||
|
||||
select {
|
||||
case <-l.rDoneCh:
|
||||
case <-l.quit:
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
b, err := l.r.CommitIDBehind()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !b {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.New("wait replication too many times")
|
||||
}
|
||||
|
||||
// StoreLogsFromReader stores logs from the Reader
|
||||
func (l *Ledis) StoreLogsFromReader(rb io.Reader) error {
|
||||
if !l.ReplicationUsed() {
|
||||
return ErrRplNotSupport
|
||||
} else if !l.cfg.Readonly {
|
||||
return ErrRplInRDWR
|
||||
}
|
||||
|
||||
log := &rpl.Log{}
|
||||
|
||||
for {
|
||||
if err := log.Decode(rb); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := l.r.StoreLog(log); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
l.noticeReplication()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Ledis) noticeReplication() {
|
||||
AsyncNotify(l.rc)
|
||||
}
|
||||
|
||||
// StoreLogsFromData stores logs from data.
|
||||
func (l *Ledis) StoreLogsFromData(data []byte) error {
|
||||
rb := bytes.NewReader(data)
|
||||
|
||||
return l.StoreLogsFromReader(rb)
|
||||
}
|
||||
|
||||
// ReadLogsTo reads logs and write to the Writer.
|
||||
func (l *Ledis) ReadLogsTo(startLogID uint64, w io.Writer) (n int, nextLogID uint64, err error) {
|
||||
if !l.ReplicationUsed() {
|
||||
// no replication log
|
||||
nextLogID = 0
|
||||
err = ErrRplNotSupport
|
||||
return
|
||||
}
|
||||
|
||||
var firtID, lastID uint64
|
||||
|
||||
firtID, err = l.r.FirstLogID()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if startLogID < firtID {
|
||||
err = ErrLogMissed
|
||||
return
|
||||
}
|
||||
|
||||
lastID, err = l.r.LastLogID()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
nextLogID = startLogID
|
||||
|
||||
log := &rpl.Log{}
|
||||
for i := startLogID; i <= lastID; i++ {
|
||||
if err = l.r.GetLog(i, log); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = log.Encode(w); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
nextLogID = i + 1
|
||||
|
||||
n += log.Size()
|
||||
|
||||
if n > maxReplLogSize {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadLogsToTimeout tries to read events, if no events read,
|
||||
// tres to wait the new event singal until timeout seconds
|
||||
func (l *Ledis) ReadLogsToTimeout(startLogID uint64, w io.Writer, timeout int, quitCh chan struct{}) (n int, nextLogID uint64, err error) {
|
||||
n, nextLogID, err = l.ReadLogsTo(startLogID, w)
|
||||
if err != nil {
|
||||
return
|
||||
} else if n != 0 {
|
||||
return
|
||||
}
|
||||
//no events read
|
||||
select {
|
||||
case <-l.r.WaitLog():
|
||||
case <-time.After(time.Duration(timeout) * time.Second):
|
||||
case <-quitCh:
|
||||
return
|
||||
}
|
||||
return l.ReadLogsTo(startLogID, w)
|
||||
}
|
||||
|
||||
func (l *Ledis) propagate(rl *rpl.Log) {
|
||||
for _, h := range l.rhs {
|
||||
h(rl)
|
||||
}
|
||||
}
|
||||
|
||||
// NewLogEventHandler is the handler to handle new log event.
|
||||
type NewLogEventHandler func(rl *rpl.Log)
|
||||
|
||||
// AddNewLogEventHandler adds the handler for the new log event
|
||||
func (l *Ledis) AddNewLogEventHandler(h NewLogEventHandler) error {
|
||||
if !l.ReplicationUsed() {
|
||||
return ErrRplNotSupport
|
||||
}
|
||||
|
||||
l.rhs = append(l.rhs, h)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReplicationStat returns the statistics of repliaciton.
|
||||
func (l *Ledis) ReplicationStat() (*rpl.Stat, error) {
|
||||
if !l.ReplicationUsed() {
|
||||
return nil, ErrRplNotSupport
|
||||
}
|
||||
|
||||
return l.r.Stat()
|
||||
}
|
402
vendor/github.com/siddontang/ledisdb/ledis/scan.go
generated
vendored
402
vendor/github.com/siddontang/ledisdb/ledis/scan.go
generated
vendored
@ -1,402 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"regexp"
|
||||
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
var errDataType = errors.New("error data type")
|
||||
var errMetaKey = errors.New("error meta key")
|
||||
|
||||
//Scan scans the data. If inclusive is true, scan range [cursor, inf) else (cursor, inf)
|
||||
func (db *DB) Scan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
|
||||
storeDataType, err := getDataStoreType(dataType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, false)
|
||||
}
|
||||
|
||||
// RevScan scans the data reversed. if inclusive is true, revscan range (-inf, cursor] else (inf, cursor)
|
||||
func (db *DB) RevScan(dataType DataType, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
|
||||
storeDataType, err := getDataStoreType(dataType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.scanGeneric(storeDataType, cursor, count, inclusive, match, true)
|
||||
}
|
||||
|
||||
func getDataStoreType(dataType DataType) (byte, error) {
|
||||
var storeDataType byte
|
||||
switch dataType {
|
||||
case KV:
|
||||
storeDataType = KVType
|
||||
case LIST:
|
||||
storeDataType = LMetaType
|
||||
case HASH:
|
||||
storeDataType = HSizeType
|
||||
case SET:
|
||||
storeDataType = SSizeType
|
||||
case ZSET:
|
||||
storeDataType = ZSizeType
|
||||
default:
|
||||
return 0, errDataType
|
||||
}
|
||||
return storeDataType, nil
|
||||
}
|
||||
|
||||
func buildMatchRegexp(match string) (*regexp.Regexp, error) {
|
||||
var err error
|
||||
var r *regexp.Regexp
|
||||
|
||||
if len(match) > 0 {
|
||||
if r, err = regexp.Compile(match); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (db *DB) buildScanIterator(minKey []byte, maxKey []byte, inclusive bool, reverse bool) *store.RangeLimitIterator {
|
||||
tp := store.RangeOpen
|
||||
|
||||
if !reverse {
|
||||
if inclusive {
|
||||
tp = store.RangeROpen
|
||||
}
|
||||
} else {
|
||||
if inclusive {
|
||||
tp = store.RangeLOpen
|
||||
}
|
||||
}
|
||||
|
||||
var it *store.RangeLimitIterator
|
||||
if !reverse {
|
||||
it = db.bucket.RangeIterator(minKey, maxKey, tp)
|
||||
} else {
|
||||
it = db.bucket.RevRangeIterator(minKey, maxKey, tp)
|
||||
}
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (db *DB) buildScanKeyRange(storeDataType byte, key []byte, reverse bool) (minKey []byte, maxKey []byte, err error) {
|
||||
if !reverse {
|
||||
if minKey, err = db.encodeScanMinKey(storeDataType, key); err != nil {
|
||||
return
|
||||
}
|
||||
if maxKey, err = db.encodeScanMaxKey(storeDataType, nil); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if minKey, err = db.encodeScanMinKey(storeDataType, nil); err != nil {
|
||||
return
|
||||
}
|
||||
if maxKey, err = db.encodeScanMaxKey(storeDataType, key); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func checkScanCount(count int) int {
|
||||
if count <= 0 {
|
||||
count = defaultScanCount
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
func (db *DB) scanGeneric(storeDataType byte, key []byte, count int,
|
||||
inclusive bool, match string, reverse bool) ([][]byte, error) {
|
||||
|
||||
r, err := buildMatchRegexp(match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
minKey, maxKey, err := db.buildScanKeyRange(storeDataType, key, reverse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count = checkScanCount(count)
|
||||
|
||||
it := db.buildScanIterator(minKey, maxKey, inclusive, reverse)
|
||||
|
||||
v := make([][]byte, 0, count)
|
||||
|
||||
for i := 0; it.Valid() && i < count; it.Next() {
|
||||
if k, err := db.decodeScanKey(storeDataType, it.Key()); err != nil {
|
||||
continue
|
||||
} else if r != nil && !r.Match(k) {
|
||||
continue
|
||||
} else {
|
||||
v = append(v, k)
|
||||
i++
|
||||
}
|
||||
}
|
||||
it.Close()
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (db *DB) encodeScanMinKey(storeDataType byte, key []byte) ([]byte, error) {
|
||||
return db.encodeScanKey(storeDataType, key)
|
||||
}
|
||||
|
||||
func (db *DB) encodeScanMaxKey(storeDataType byte, key []byte) ([]byte, error) {
|
||||
if len(key) > 0 {
|
||||
return db.encodeScanKey(storeDataType, key)
|
||||
}
|
||||
|
||||
k, err := db.encodeScanKey(storeDataType, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k[len(k)-1] = storeDataType + 1
|
||||
return k, nil
|
||||
}
|
||||
|
||||
func (db *DB) encodeScanKey(storeDataType byte, key []byte) ([]byte, error) {
|
||||
switch storeDataType {
|
||||
case KVType:
|
||||
return db.encodeKVKey(key), nil
|
||||
case LMetaType:
|
||||
return db.lEncodeMetaKey(key), nil
|
||||
case HSizeType:
|
||||
return db.hEncodeSizeKey(key), nil
|
||||
case ZSizeType:
|
||||
return db.zEncodeSizeKey(key), nil
|
||||
case SSizeType:
|
||||
return db.sEncodeSizeKey(key), nil
|
||||
default:
|
||||
return nil, errDataType
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) decodeScanKey(storeDataType byte, ek []byte) (key []byte, err error) {
|
||||
switch storeDataType {
|
||||
case KVType:
|
||||
key, err = db.decodeKVKey(ek)
|
||||
case LMetaType:
|
||||
key, err = db.lDecodeMetaKey(ek)
|
||||
case HSizeType:
|
||||
key, err = db.hDecodeSizeKey(ek)
|
||||
case ZSizeType:
|
||||
key, err = db.zDecodeSizeKey(ek)
|
||||
case SSizeType:
|
||||
key, err = db.sDecodeSizeKey(ek)
|
||||
default:
|
||||
err = errDataType
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// for specail data scan
|
||||
|
||||
func (db *DB) buildDataScanKeyRange(storeDataType byte, key []byte, cursor []byte, reverse bool) (minKey []byte, maxKey []byte, err error) {
|
||||
if !reverse {
|
||||
if minKey, err = db.encodeDataScanMinKey(storeDataType, key, cursor); err != nil {
|
||||
return
|
||||
}
|
||||
if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, nil); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if minKey, err = db.encodeDataScanMinKey(storeDataType, key, nil); err != nil {
|
||||
return
|
||||
}
|
||||
if maxKey, err = db.encodeDataScanMaxKey(storeDataType, key, cursor); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) encodeDataScanMinKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
|
||||
return db.encodeDataScanKey(storeDataType, key, cursor)
|
||||
}
|
||||
|
||||
func (db *DB) encodeDataScanMaxKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
|
||||
if len(cursor) > 0 {
|
||||
return db.encodeDataScanKey(storeDataType, key, cursor)
|
||||
}
|
||||
|
||||
k, err := db.encodeDataScanKey(storeDataType, key, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// here, the last byte is the start seperator, set it to stop seperator
|
||||
k[len(k)-1] = k[len(k)-1] + 1
|
||||
return k, nil
|
||||
}
|
||||
|
||||
func (db *DB) encodeDataScanKey(storeDataType byte, key []byte, cursor []byte) ([]byte, error) {
|
||||
switch storeDataType {
|
||||
case HashType:
|
||||
return db.hEncodeHashKey(key, cursor), nil
|
||||
case ZSetType:
|
||||
return db.zEncodeSetKey(key, cursor), nil
|
||||
case SetType:
|
||||
return db.sEncodeSetKey(key, cursor), nil
|
||||
default:
|
||||
return nil, errDataType
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) buildDataScanIterator(storeDataType byte, key []byte, cursor []byte, count int,
|
||||
inclusive bool, reverse bool) (*store.RangeLimitIterator, error) {
|
||||
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
minKey, maxKey, err := db.buildDataScanKeyRange(storeDataType, key, cursor, reverse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
it := db.buildScanIterator(minKey, maxKey, inclusive, reverse)
|
||||
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (db *DB) hScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]FVPair, error) {
|
||||
count = checkScanCount(count)
|
||||
|
||||
r, err := buildMatchRegexp(match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := make([]FVPair, 0, count)
|
||||
|
||||
it, err := db.buildDataScanIterator(HashType, key, cursor, count, inclusive, reverse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer it.Close()
|
||||
|
||||
for i := 0; it.Valid() && i < count; it.Next() {
|
||||
_, f, err := db.hDecodeHashKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if r != nil && !r.Match(f) {
|
||||
continue
|
||||
}
|
||||
|
||||
v = append(v, FVPair{Field: f, Value: it.Value()})
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// HScan scans data for hash.
|
||||
func (db *DB) HScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
|
||||
return db.hScanGeneric(key, cursor, count, inclusive, match, false)
|
||||
}
|
||||
|
||||
// HRevScan reversed scans data for hash.
|
||||
func (db *DB) HRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]FVPair, error) {
|
||||
return db.hScanGeneric(key, cursor, count, inclusive, match, true)
|
||||
}
|
||||
|
||||
func (db *DB) sScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([][]byte, error) {
|
||||
count = checkScanCount(count)
|
||||
|
||||
r, err := buildMatchRegexp(match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := make([][]byte, 0, count)
|
||||
|
||||
it, err := db.buildDataScanIterator(SetType, key, cursor, count, inclusive, reverse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer it.Close()
|
||||
|
||||
for i := 0; it.Valid() && i < count; it.Next() {
|
||||
_, m, err := db.sDecodeSetKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if r != nil && !r.Match(m) {
|
||||
continue
|
||||
}
|
||||
|
||||
v = append(v, m)
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// SScan scans data for set.
|
||||
func (db *DB) SScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
|
||||
return db.sScanGeneric(key, cursor, count, inclusive, match, false)
|
||||
}
|
||||
|
||||
// SRevScan scans data reversed for set.
|
||||
func (db *DB) SRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([][]byte, error) {
|
||||
return db.sScanGeneric(key, cursor, count, inclusive, match, true)
|
||||
}
|
||||
|
||||
func (db *DB) zScanGeneric(key []byte, cursor []byte, count int, inclusive bool, match string, reverse bool) ([]ScorePair, error) {
|
||||
count = checkScanCount(count)
|
||||
|
||||
r, err := buildMatchRegexp(match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := make([]ScorePair, 0, count)
|
||||
|
||||
it, err := db.buildDataScanIterator(ZSetType, key, cursor, count, inclusive, reverse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer it.Close()
|
||||
|
||||
for i := 0; it.Valid() && i < count; it.Next() {
|
||||
_, m, err := db.zDecodeSetKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if r != nil && !r.Match(m) {
|
||||
continue
|
||||
}
|
||||
|
||||
score, err := Int64(it.Value(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v = append(v, ScorePair{Score: score, Member: m})
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// ZScan scans data for zset.
|
||||
func (db *DB) ZScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
|
||||
return db.zScanGeneric(key, cursor, count, inclusive, match, false)
|
||||
}
|
||||
|
||||
// ZRevScan scans data reversed for zset.
|
||||
func (db *DB) ZRevScan(key []byte, cursor []byte, count int, inclusive bool, match string) ([]ScorePair, error) {
|
||||
return db.zScanGeneric(key, cursor, count, inclusive, match, true)
|
||||
}
|
235
vendor/github.com/siddontang/ledisdb/ledis/sort.go
generated
vendored
235
vendor/github.com/siddontang/ledisdb/ledis/sort.go
generated
vendored
@ -1,235 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
// Limit is for sort.
|
||||
type Limit struct {
|
||||
Offset int
|
||||
Size int
|
||||
}
|
||||
|
||||
func getSortRange(values [][]byte, offset int, size int) (int, int) {
|
||||
var start = 0
|
||||
if offset > 0 {
|
||||
start = offset
|
||||
}
|
||||
|
||||
valueLen := len(values)
|
||||
var end = valueLen - 1
|
||||
if size > 0 {
|
||||
end = start + size - 1
|
||||
}
|
||||
|
||||
if start >= valueLen {
|
||||
start = valueLen - 1
|
||||
end = valueLen - 2
|
||||
}
|
||||
|
||||
if end >= valueLen {
|
||||
end = valueLen - 1
|
||||
}
|
||||
|
||||
return start, end
|
||||
}
|
||||
|
||||
var hashPattern = []byte("*->")
|
||||
|
||||
func (db *DB) lookupKeyByPattern(pattern []byte, subKey []byte) []byte {
|
||||
// If the pattern is #, return the substitution key itself
|
||||
if bytes.Equal(pattern, []byte{'#'}) {
|
||||
return subKey
|
||||
}
|
||||
|
||||
// If we can't find '*' in the pattern, return nil
|
||||
if !bytes.Contains(pattern, []byte{'*'}) {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := pattern
|
||||
var field []byte
|
||||
|
||||
// Find out if we're dealing with a hash dereference
|
||||
if n := bytes.Index(pattern, hashPattern); n > 0 && n+3 < len(pattern) {
|
||||
key = pattern[0 : n+1]
|
||||
field = pattern[n+3:]
|
||||
}
|
||||
|
||||
// Perform the '*' substitution
|
||||
key = bytes.Replace(key, []byte{'*'}, subKey, 1)
|
||||
|
||||
var value []byte
|
||||
if field == nil {
|
||||
value, _ = db.Get(key)
|
||||
} else {
|
||||
value, _ = db.HGet(key, field)
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
type sortItem struct {
|
||||
value []byte
|
||||
cmpValue []byte
|
||||
score float64
|
||||
}
|
||||
|
||||
type sortItemSlice struct {
|
||||
alpha bool
|
||||
sortByPattern bool
|
||||
items []sortItem
|
||||
}
|
||||
|
||||
func (s *sortItemSlice) Len() int {
|
||||
return len(s.items)
|
||||
}
|
||||
|
||||
func (s *sortItemSlice) Swap(i, j int) {
|
||||
s.items[i], s.items[j] = s.items[j], s.items[i]
|
||||
}
|
||||
|
||||
func (s *sortItemSlice) Less(i, j int) bool {
|
||||
s1 := s.items[i]
|
||||
s2 := s.items[j]
|
||||
if !s.alpha {
|
||||
if s1.score < s2.score {
|
||||
return true
|
||||
} else if s1.score > s2.score {
|
||||
return false
|
||||
} else {
|
||||
return bytes.Compare(s1.value, s2.value) < 0
|
||||
}
|
||||
} else {
|
||||
if s.sortByPattern {
|
||||
if s1.cmpValue == nil || s2.cmpValue == nil {
|
||||
if s1.cmpValue == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
// Unlike redis, we only use bytes compare
|
||||
return bytes.Compare(s1.cmpValue, s2.cmpValue) < 0
|
||||
}
|
||||
|
||||
// Unlike redis, we only use bytes compare
|
||||
return bytes.Compare(s1.value, s2.value) < 0
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) xsort(values [][]byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
|
||||
if len(values) == 0 {
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
|
||||
start, end := getSortRange(values, offset, size)
|
||||
|
||||
dontsort := 0
|
||||
|
||||
if sortBy != nil {
|
||||
if !bytes.Contains(sortBy, []byte{'*'}) {
|
||||
dontsort = 1
|
||||
}
|
||||
}
|
||||
|
||||
items := &sortItemSlice{
|
||||
alpha: alpha,
|
||||
sortByPattern: sortBy != nil,
|
||||
items: make([]sortItem, len(values)),
|
||||
}
|
||||
|
||||
for i, value := range values {
|
||||
items.items[i].value = value
|
||||
items.items[i].score = 0
|
||||
items.items[i].cmpValue = nil
|
||||
|
||||
if dontsort == 0 {
|
||||
var cmpValue []byte
|
||||
if sortBy != nil {
|
||||
cmpValue = db.lookupKeyByPattern(sortBy, value)
|
||||
} else {
|
||||
// use value iteself to sort by
|
||||
cmpValue = value
|
||||
}
|
||||
|
||||
if cmpValue == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if alpha {
|
||||
if sortBy != nil {
|
||||
items.items[i].cmpValue = cmpValue
|
||||
}
|
||||
} else {
|
||||
score, err := strconv.ParseFloat(string(cmpValue), 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s scores can't be converted into double", cmpValue)
|
||||
}
|
||||
items.items[i].score = score
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dontsort == 0 {
|
||||
if !desc {
|
||||
sort.Sort(items)
|
||||
} else {
|
||||
sort.Sort(sort.Reverse(items))
|
||||
}
|
||||
}
|
||||
|
||||
resLen := end - start + 1
|
||||
if len(sortGet) > 0 {
|
||||
resLen = len(sortGet) * (end - start + 1)
|
||||
}
|
||||
|
||||
res := make([][]byte, 0, resLen)
|
||||
for i := start; i <= end; i++ {
|
||||
if len(sortGet) == 0 {
|
||||
res = append(res, items.items[i].value)
|
||||
} else {
|
||||
for _, getPattern := range sortGet {
|
||||
v := db.lookupKeyByPattern(getPattern, items.items[i].value)
|
||||
res = append(res, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// XLSort sorts list.
|
||||
func (db *DB) XLSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
|
||||
values, err := db.LRange(key, 0, -1)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
|
||||
}
|
||||
|
||||
// XSSort sorts set.
|
||||
func (db *DB) XSSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
|
||||
values, err := db.SMembers(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
|
||||
}
|
||||
|
||||
// XZSort sorts zset.
|
||||
func (db *DB) XZSort(key []byte, offset int, size int, alpha bool, desc bool, sortBy []byte, sortGet [][]byte) ([][]byte, error) {
|
||||
values, err := db.ZRangeByLex(key, nil, nil, store.RangeClose, 0, -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.xsort(values, offset, size, alpha, desc, sortBy, sortGet)
|
||||
}
|
556
vendor/github.com/siddontang/ledisdb/ledis/t_hash.go
generated
vendored
556
vendor/github.com/siddontang/ledisdb/ledis/t_hash.go
generated
vendored
@ -1,556 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/num"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
// FVPair is the pair of field and value.
|
||||
type FVPair struct {
|
||||
Field []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
var errHashKey = errors.New("invalid hash key")
|
||||
var errHSizeKey = errors.New("invalid hsize key")
|
||||
|
||||
const (
|
||||
hashStartSep byte = ':'
|
||||
hashStopSep byte = hashStartSep + 1
|
||||
)
|
||||
|
||||
func checkHashKFSize(key []byte, field []byte) error {
|
||||
if len(key) > MaxKeySize || len(key) == 0 {
|
||||
return errKeySize
|
||||
} else if len(field) > MaxHashFieldSize || len(field) == 0 {
|
||||
return errHashFieldSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) hEncodeSizeKey(key []byte) []byte {
|
||||
buf := make([]byte, len(key)+1+len(db.indexVarBuf))
|
||||
|
||||
pos := 0
|
||||
n := copy(buf, db.indexVarBuf)
|
||||
|
||||
pos += n
|
||||
buf[pos] = HSizeType
|
||||
|
||||
pos++
|
||||
copy(buf[pos:], key)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) {
|
||||
pos, err := db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pos+1 > len(ek) || ek[pos] != HSizeType {
|
||||
return nil, errHSizeKey
|
||||
}
|
||||
pos++
|
||||
|
||||
return ek[pos:], nil
|
||||
}
|
||||
|
||||
func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte {
|
||||
buf := make([]byte, len(key)+len(field)+1+1+2+len(db.indexVarBuf))
|
||||
|
||||
pos := 0
|
||||
n := copy(buf, db.indexVarBuf)
|
||||
pos += n
|
||||
|
||||
buf[pos] = HashType
|
||||
pos++
|
||||
|
||||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
|
||||
pos += 2
|
||||
|
||||
copy(buf[pos:], key)
|
||||
pos += len(key)
|
||||
|
||||
buf[pos] = hashStartSep
|
||||
pos++
|
||||
copy(buf[pos:], field)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) {
|
||||
pos, err := db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if pos+1 > len(ek) || ek[pos] != HashType {
|
||||
return nil, nil, errHashKey
|
||||
}
|
||||
pos++
|
||||
|
||||
if pos+2 > len(ek) {
|
||||
return nil, nil, errHashKey
|
||||
}
|
||||
|
||||
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
|
||||
pos += 2
|
||||
|
||||
if keyLen+pos > len(ek) {
|
||||
return nil, nil, errHashKey
|
||||
}
|
||||
|
||||
key := ek[pos : pos+keyLen]
|
||||
pos += keyLen
|
||||
|
||||
if ek[pos] != hashStartSep {
|
||||
return nil, nil, errHashKey
|
||||
}
|
||||
|
||||
pos++
|
||||
field := ek[pos:]
|
||||
return key, field, nil
|
||||
}
|
||||
|
||||
func (db *DB) hEncodeStartKey(key []byte) []byte {
|
||||
return db.hEncodeHashKey(key, nil)
|
||||
}
|
||||
|
||||
func (db *DB) hEncodeStopKey(key []byte) []byte {
|
||||
k := db.hEncodeHashKey(key, nil)
|
||||
|
||||
k[len(k)-1] = hashStopSep
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) {
|
||||
t := db.hashBatch
|
||||
|
||||
ek := db.hEncodeHashKey(key, field)
|
||||
|
||||
var n int64 = 1
|
||||
if v, _ := db.bucket.Get(ek); v != nil {
|
||||
n = 0
|
||||
} else {
|
||||
if _, err := db.hIncrSize(key, 1); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
t.Put(ek, value)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ps : here just focus on deleting the hash data,
|
||||
// any other likes expire is ignore.
|
||||
func (db *DB) hDelete(t *batch, key []byte) int64 {
|
||||
sk := db.hEncodeSizeKey(key)
|
||||
start := db.hEncodeStartKey(key)
|
||||
stop := db.hEncodeStopKey(key)
|
||||
|
||||
var num int64
|
||||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
|
||||
for ; it.Valid(); it.Next() {
|
||||
t.Delete(it.Key())
|
||||
num++
|
||||
}
|
||||
it.Close()
|
||||
|
||||
t.Delete(sk)
|
||||
return num
|
||||
}
|
||||
|
||||
func (db *DB) hExpireAt(key []byte, when int64) (int64, error) {
|
||||
t := db.hashBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if hlen, err := db.HLen(key); err != nil || hlen == 0 {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
db.expireAt(t, HashType, key, when)
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// HLen returns the lengh of hash.
|
||||
func (db *DB) HLen(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return Int64(db.bucket.Get(db.hEncodeSizeKey(key)))
|
||||
}
|
||||
|
||||
// HSet sets the field with value of key.
|
||||
func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) {
|
||||
if err := checkHashKFSize(key, field); err != nil {
|
||||
return 0, err
|
||||
} else if err := checkValueSize(value); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.hashBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
n, err := db.hSetItem(key, field, value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// HGet gets the value of the field.
|
||||
func (db *DB) HGet(key []byte, field []byte) ([]byte, error) {
|
||||
if err := checkHashKFSize(key, field); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db.bucket.Get(db.hEncodeHashKey(key, field))
|
||||
}
|
||||
|
||||
// HMset sets multi field-values.
|
||||
func (db *DB) HMset(key []byte, args ...FVPair) error {
|
||||
t := db.hashBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var err error
|
||||
var ek []byte
|
||||
var num int64
|
||||
for i := 0; i < len(args); i++ {
|
||||
if err := checkHashKFSize(key, args[i].Field); err != nil {
|
||||
return err
|
||||
} else if err := checkValueSize(args[i].Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ek = db.hEncodeHashKey(key, args[i].Field)
|
||||
|
||||
if v, err := db.bucket.Get(ek); err != nil {
|
||||
return err
|
||||
} else if v == nil {
|
||||
num++
|
||||
}
|
||||
|
||||
t.Put(ek, args[i].Value)
|
||||
}
|
||||
|
||||
if _, err = db.hIncrSize(key, num); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//todo add binglog
|
||||
err = t.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
// HMget gets multi values of fields
|
||||
func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) {
|
||||
var ek []byte
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
r := make([][]byte, len(args))
|
||||
for i := 0; i < len(args); i++ {
|
||||
if err := checkHashKFSize(key, args[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ek = db.hEncodeHashKey(key, args[i])
|
||||
|
||||
r[i] = it.Find(ek)
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// HDel deletes the fields.
|
||||
func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) {
|
||||
t := db.hashBatch
|
||||
|
||||
var ek []byte
|
||||
var v []byte
|
||||
var err error
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
var num int64
|
||||
for i := 0; i < len(args); i++ {
|
||||
if err := checkHashKFSize(key, args[i]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ek = db.hEncodeHashKey(key, args[i])
|
||||
|
||||
v = it.RawFind(ek)
|
||||
if v == nil {
|
||||
continue
|
||||
} else {
|
||||
num++
|
||||
t.Delete(ek)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = db.hIncrSize(key, -num); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) {
|
||||
t := db.hashBatch
|
||||
sk := db.hEncodeSizeKey(key)
|
||||
|
||||
var err error
|
||||
var size int64
|
||||
if size, err = Int64(db.bucket.Get(sk)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += delta
|
||||
if size <= 0 {
|
||||
size = 0
|
||||
t.Delete(sk)
|
||||
db.rmExpire(t, HashType, key)
|
||||
} else {
|
||||
t.Put(sk, PutInt64(size))
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// HIncrBy increases the value of field by delta.
|
||||
func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) {
|
||||
if err := checkHashKFSize(key, field); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.hashBatch
|
||||
var ek []byte
|
||||
var err error
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
ek = db.hEncodeHashKey(key, field)
|
||||
|
||||
var n int64
|
||||
if n, err = StrInt64(db.bucket.Get(ek)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n += delta
|
||||
|
||||
_, err = db.hSetItem(key, field, num.FormatInt64ToSlice(n))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// HGetAll returns all field-values.
|
||||
func (db *DB) HGetAll(key []byte) ([]FVPair, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := db.hEncodeStartKey(key)
|
||||
stop := db.hEncodeStopKey(key)
|
||||
|
||||
v := make([]FVPair, 0, 16)
|
||||
|
||||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
_, f, err := db.hDecodeHashKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v = append(v, FVPair{Field: f, Value: it.Value()})
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// HKeys returns the all fields.
|
||||
func (db *DB) HKeys(key []byte) ([][]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := db.hEncodeStartKey(key)
|
||||
stop := db.hEncodeStopKey(key)
|
||||
|
||||
v := make([][]byte, 0, 16)
|
||||
|
||||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
_, f, err := db.hDecodeHashKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v = append(v, f)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// HValues returns all values
|
||||
func (db *DB) HValues(key []byte) ([][]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := db.hEncodeStartKey(key)
|
||||
stop := db.hEncodeStopKey(key)
|
||||
|
||||
v := make([][]byte, 0, 16)
|
||||
|
||||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
_, _, err := db.hDecodeHashKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v = append(v, it.Value())
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// HClear clears the data.
|
||||
func (db *DB) HClear(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.hashBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
num := db.hDelete(t, key)
|
||||
db.rmExpire(t, HashType, key)
|
||||
|
||||
err := t.Commit()
|
||||
return num, err
|
||||
}
|
||||
|
||||
// HMclear cleans multi data.
|
||||
func (db *DB) HMclear(keys ...[]byte) (int64, error) {
|
||||
t := db.hashBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for _, key := range keys {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
db.hDelete(t, key)
|
||||
db.rmExpire(t, HashType, key)
|
||||
}
|
||||
|
||||
err := t.Commit()
|
||||
return int64(len(keys)), err
|
||||
}
|
||||
|
||||
func (db *DB) hFlush() (drop int64, err error) {
|
||||
t := db.hashBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
return db.flushType(t, HashType)
|
||||
}
|
||||
|
||||
// HExpire expires the data with duration.
|
||||
func (db *DB) HExpire(key []byte, duration int64) (int64, error) {
|
||||
if duration <= 0 {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.hExpireAt(key, time.Now().Unix()+duration)
|
||||
}
|
||||
|
||||
// HExpireAt expires the data at time when.
|
||||
func (db *DB) HExpireAt(key []byte, when int64) (int64, error) {
|
||||
if when <= time.Now().Unix() {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.hExpireAt(key, when)
|
||||
}
|
||||
|
||||
// HTTL gets the TTL of data.
|
||||
func (db *DB) HTTL(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return db.ttl(HashType, key)
|
||||
}
|
||||
|
||||
// HPersist removes the TTL of data.
|
||||
func (db *DB) HPersist(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.hashBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
n, err := db.rmExpire(t, HashType, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// HKeyExists checks whether data exists or not.
|
||||
func (db *DB) HKeyExists(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
sk := db.hEncodeSizeKey(key)
|
||||
v, err := db.bucket.Get(sk)
|
||||
if v != nil && err == nil {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
794
vendor/github.com/siddontang/ledisdb/ledis/t_kv.go
generated
vendored
794
vendor/github.com/siddontang/ledisdb/ledis/t_kv.go
generated
vendored
@ -1,794 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/num"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
// KVPair is the pair of key-value.
|
||||
type KVPair struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
var errKVKey = errors.New("invalid encode kv key")
|
||||
|
||||
func checkKeySize(key []byte) error {
|
||||
if len(key) > MaxKeySize || len(key) == 0 {
|
||||
return errKeySize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkValueSize(value []byte) error {
|
||||
if len(value) > MaxValueSize {
|
||||
return errValueSize
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) encodeKVKey(key []byte) []byte {
|
||||
ek := make([]byte, len(key)+1+len(db.indexVarBuf))
|
||||
pos := copy(ek, db.indexVarBuf)
|
||||
ek[pos] = KVType
|
||||
pos++
|
||||
copy(ek[pos:], key)
|
||||
return ek
|
||||
}
|
||||
|
||||
func (db *DB) decodeKVKey(ek []byte) ([]byte, error) {
|
||||
pos, err := db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pos+1 > len(ek) || ek[pos] != KVType {
|
||||
return nil, errKVKey
|
||||
}
|
||||
|
||||
pos++
|
||||
|
||||
return ek[pos:], nil
|
||||
}
|
||||
|
||||
func (db *DB) encodeKVMinKey() []byte {
|
||||
ek := db.encodeKVKey(nil)
|
||||
return ek
|
||||
}
|
||||
|
||||
func (db *DB) encodeKVMaxKey() []byte {
|
||||
ek := db.encodeKVKey(nil)
|
||||
ek[len(ek)-1] = KVType + 1
|
||||
return ek
|
||||
}
|
||||
|
||||
func (db *DB) incr(key []byte, delta int64) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var err error
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var n int64
|
||||
n, err = StrInt64(db.bucket.Get(key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n += delta
|
||||
|
||||
t.Put(key, num.FormatInt64ToSlice(n))
|
||||
|
||||
err = t.Commit()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ps : here just focus on deleting the key-value data,
|
||||
// any other likes expire is ignore.
|
||||
func (db *DB) delete(t *batch, key []byte) int64 {
|
||||
key = db.encodeKVKey(key)
|
||||
t.Delete(key)
|
||||
return 1
|
||||
}
|
||||
|
||||
func (db *DB) setExpireAt(key []byte, when int64) (int64, error) {
|
||||
t := db.kvBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if exist, err := db.Exists(key); err != nil || exist == 0 {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
db.expireAt(t, KVType, key, when)
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// Decr decreases the data.
|
||||
func (db *DB) Decr(key []byte) (int64, error) {
|
||||
return db.incr(key, -1)
|
||||
}
|
||||
|
||||
// DecrBy decreases the data by decrement.
|
||||
func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) {
|
||||
return db.incr(key, -decrement)
|
||||
}
|
||||
|
||||
// Del deletes the data.
|
||||
func (db *DB) Del(keys ...[]byte) (int64, error) {
|
||||
if len(keys) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
codedKeys := make([][]byte, len(keys))
|
||||
for i, k := range keys {
|
||||
codedKeys[i] = db.encodeKVKey(k)
|
||||
}
|
||||
|
||||
t := db.kvBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for i, k := range keys {
|
||||
t.Delete(codedKeys[i])
|
||||
db.rmExpire(t, KVType, k)
|
||||
}
|
||||
|
||||
err := t.Commit()
|
||||
return int64(len(keys)), err
|
||||
}
|
||||
|
||||
// Exists check data exists or not.
|
||||
func (db *DB) Exists(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var err error
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
var v []byte
|
||||
v, err = db.bucket.Get(key)
|
||||
if v != nil && err == nil {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Get gets the value.
|
||||
func (db *DB) Get(key []byte) ([]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
return db.bucket.Get(key)
|
||||
}
|
||||
|
||||
// GetSlice gets the slice of the data.
|
||||
func (db *DB) GetSlice(key []byte) (store.Slice, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
return db.bucket.GetSlice(key)
|
||||
}
|
||||
|
||||
// GetSet gets the value and sets new value.
|
||||
func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
} else if err := checkValueSize(value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
oldValue, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.Put(key, value)
|
||||
|
||||
err = t.Commit()
|
||||
|
||||
return oldValue, err
|
||||
}
|
||||
|
||||
// Incr increases the data.
|
||||
func (db *DB) Incr(key []byte) (int64, error) {
|
||||
return db.incr(key, 1)
|
||||
}
|
||||
|
||||
// IncrBy increases the data by increment.
|
||||
func (db *DB) IncrBy(key []byte, increment int64) (int64, error) {
|
||||
return db.incr(key, increment)
|
||||
}
|
||||
|
||||
// MGet gets multi data.
|
||||
func (db *DB) MGet(keys ...[]byte) ([][]byte, error) {
|
||||
values := make([][]byte, len(keys))
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
for i := range keys {
|
||||
if err := checkKeySize(keys[i]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
values[i] = it.Find(db.encodeKVKey(keys[i]))
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// MSet sets multi data.
|
||||
func (db *DB) MSet(args ...KVPair) error {
|
||||
if len(args) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
var err error
|
||||
var key []byte
|
||||
var value []byte
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
if err := checkKeySize(args[i].Key); err != nil {
|
||||
return err
|
||||
} else if err := checkValueSize(args[i].Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(args[i].Key)
|
||||
|
||||
value = args[i].Value
|
||||
|
||||
t.Put(key, value)
|
||||
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
// Set sets the data.
|
||||
func (db *DB) Set(key []byte, value []byte) error {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return err
|
||||
} else if err := checkValueSize(value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.Put(key, value)
|
||||
|
||||
err = t.Commit()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetNX sets the data if not existed.
|
||||
func (db *DB) SetNX(key []byte, value []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
} else if err := checkValueSize(value); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var err error
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
var n int64 = 1
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if v, err := db.bucket.Get(key); err != nil {
|
||||
return 0, err
|
||||
} else if v != nil {
|
||||
n = 0
|
||||
} else {
|
||||
t.Put(key, value)
|
||||
|
||||
err = t.Commit()
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// SetEX sets the data with a TTL.
|
||||
func (db *DB) SetEX(key []byte, duration int64, value []byte) error {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return err
|
||||
} else if err := checkValueSize(value); err != nil {
|
||||
return err
|
||||
} else if duration <= 0 {
|
||||
return errExpireValue
|
||||
}
|
||||
|
||||
ek := db.encodeKVKey(key)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.Put(ek, value)
|
||||
db.expireAt(t, KVType, key, time.Now().Unix()+duration)
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *DB) flush() (drop int64, err error) {
|
||||
t := db.kvBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return db.flushType(t, KVType)
|
||||
}
|
||||
|
||||
// Expire expires the data.
|
||||
func (db *DB) Expire(key []byte, duration int64) (int64, error) {
|
||||
if duration <= 0 {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.setExpireAt(key, time.Now().Unix()+duration)
|
||||
}
|
||||
|
||||
// ExpireAt expires the data at when.
|
||||
func (db *DB) ExpireAt(key []byte, when int64) (int64, error) {
|
||||
if when <= time.Now().Unix() {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.setExpireAt(key, when)
|
||||
}
|
||||
|
||||
// TTL returns the TTL of the data.
|
||||
func (db *DB) TTL(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return db.ttl(KVType, key)
|
||||
}
|
||||
|
||||
// Persist removes the TTL of the data.
|
||||
func (db *DB) Persist(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.kvBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
n, err := db.rmExpire(t, KVType, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// SetRange sets the data with new value from offset.
|
||||
func (db *DB) SetRange(key []byte, offset int, value []byte) (int64, error) {
|
||||
if len(value) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
} else if len(value)+offset > MaxValueSize {
|
||||
return 0, errValueSize
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
oldValue, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
extra := offset + len(value) - len(oldValue)
|
||||
if extra > 0 {
|
||||
oldValue = append(oldValue, make([]byte, extra)...)
|
||||
}
|
||||
|
||||
copy(oldValue[offset:], value)
|
||||
|
||||
t.Put(key, oldValue)
|
||||
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int64(len(oldValue)), nil
|
||||
}
|
||||
|
||||
func getRange(start int, end int, valLen int) (int, int) {
|
||||
if start < 0 {
|
||||
start = valLen + start
|
||||
}
|
||||
|
||||
if end < 0 {
|
||||
end = valLen + end
|
||||
}
|
||||
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
|
||||
if end < 0 {
|
||||
end = 0
|
||||
}
|
||||
|
||||
if end >= valLen {
|
||||
end = valLen - 1
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// GetRange gets the range of the data.
|
||||
func (db *DB) GetRange(key []byte, start int, end int) ([]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
value, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
valLen := len(value)
|
||||
|
||||
start, end = getRange(start, end, valLen)
|
||||
|
||||
if start > end {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return value[start : end+1], nil
|
||||
}
|
||||
|
||||
// StrLen returns the length of the data.
|
||||
func (db *DB) StrLen(key []byte) (int64, error) {
|
||||
s, err := db.GetSlice(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := s.Size()
|
||||
s.Free()
|
||||
return int64(n), nil
|
||||
}
|
||||
|
||||
// Append appends the value to the data.
|
||||
func (db *DB) Append(key []byte, value []byte) (int64, error) {
|
||||
if len(value) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
oldValue, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(oldValue)+len(value) > MaxValueSize {
|
||||
return 0, errValueSize
|
||||
}
|
||||
|
||||
oldValue = append(oldValue, value...)
|
||||
|
||||
t.Put(key, oldValue)
|
||||
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return int64(len(oldValue)), nil
|
||||
}
|
||||
|
||||
// BitOP does the bit operations in data.
|
||||
func (db *DB) BitOP(op string, destKey []byte, srcKeys ...[]byte) (int64, error) {
|
||||
if err := checkKeySize(destKey); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
op = strings.ToLower(op)
|
||||
if len(srcKeys) == 0 {
|
||||
return 0, nil
|
||||
} else if op == BitNot && len(srcKeys) > 1 {
|
||||
return 0, fmt.Errorf("BITOP NOT has only one srckey")
|
||||
} else if len(srcKeys) < 2 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
key := db.encodeKVKey(srcKeys[0])
|
||||
|
||||
value, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if op == BitNot {
|
||||
for i := 0; i < len(value); i++ {
|
||||
value[i] = ^value[i]
|
||||
}
|
||||
} else {
|
||||
for j := 1; j < len(srcKeys); j++ {
|
||||
if err := checkKeySize(srcKeys[j]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(srcKeys[j])
|
||||
ovalue, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if len(value) < len(ovalue) {
|
||||
value, ovalue = ovalue, value
|
||||
}
|
||||
|
||||
for i := 0; i < len(ovalue); i++ {
|
||||
switch op {
|
||||
case BitAND:
|
||||
value[i] &= ovalue[i]
|
||||
case BitOR:
|
||||
value[i] |= ovalue[i]
|
||||
case BitXOR:
|
||||
value[i] ^= ovalue[i]
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid op type: %s", op)
|
||||
}
|
||||
}
|
||||
|
||||
for i := len(ovalue); i < len(value); i++ {
|
||||
switch op {
|
||||
case BitAND:
|
||||
value[i] &= 0
|
||||
case BitOR:
|
||||
value[i] |= 0
|
||||
case BitXOR:
|
||||
value[i] ^= 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(destKey)
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
t.Put(key, value)
|
||||
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return int64(len(value)), nil
|
||||
}
|
||||
|
||||
var bitsInByte = [256]int32{0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3,
|
||||
4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3,
|
||||
3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4,
|
||||
5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4,
|
||||
3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4,
|
||||
5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2,
|
||||
2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3,
|
||||
4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
|
||||
3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4,
|
||||
5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6,
|
||||
6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5,
|
||||
6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8}
|
||||
|
||||
func numberBitCount(i uint32) uint32 {
|
||||
i = i - ((i >> 1) & 0x55555555)
|
||||
i = (i & 0x33333333) + ((i >> 2) & 0x33333333)
|
||||
return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24
|
||||
}
|
||||
|
||||
// BitCount returns the bit count of data.
|
||||
func (db *DB) BitCount(key []byte, start int, end int) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
value, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
start, end = getRange(start, end, len(value))
|
||||
value = value[start : end+1]
|
||||
|
||||
var n int64
|
||||
|
||||
pos := 0
|
||||
for ; pos+4 <= len(value); pos = pos + 4 {
|
||||
n += int64(numberBitCount(binary.BigEndian.Uint32(value[pos : pos+4])))
|
||||
}
|
||||
|
||||
for ; pos < len(value); pos++ {
|
||||
n += int64(bitsInByte[value[pos]])
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// BitPos returns the pos of the data.
|
||||
func (db *DB) BitPos(key []byte, on int, start int, end int) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if (on & ^1) != 0 {
|
||||
return 0, fmt.Errorf("bit must be 0 or 1, not %d", on)
|
||||
}
|
||||
|
||||
var skipValue uint8
|
||||
if on == 0 {
|
||||
skipValue = 0xFF
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
value, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
start, end = getRange(start, end, len(value))
|
||||
value = value[start : end+1]
|
||||
|
||||
for i, v := range value {
|
||||
if uint8(v) != skipValue {
|
||||
for j := 0; j < 8; j++ {
|
||||
isNull := uint8(v)&(1<<uint8(7-j)) == 0
|
||||
|
||||
if (on == 1 && !isNull) || (on == 0 && isNull) {
|
||||
return int64((start+i)*8 + j), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// SetBit sets the bit to the data.
|
||||
func (db *DB) SetBit(key []byte, offset int, on int) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if (on & ^1) != 0 {
|
||||
return 0, fmt.Errorf("bit must be 0 or 1, not %d", on)
|
||||
}
|
||||
|
||||
t := db.kvBatch
|
||||
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
value, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
byteOffset := int(uint32(offset) >> 3)
|
||||
extra := byteOffset + 1 - len(value)
|
||||
if extra > 0 {
|
||||
value = append(value, make([]byte, extra)...)
|
||||
}
|
||||
|
||||
byteVal := value[byteOffset]
|
||||
bit := 7 - uint8(uint32(offset)&0x7)
|
||||
bitVal := byteVal & (1 << bit)
|
||||
|
||||
byteVal &= ^(1 << bit)
|
||||
byteVal |= (uint8(on&0x1) << bit)
|
||||
|
||||
value[byteOffset] = byteVal
|
||||
|
||||
t.Put(key, value)
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if bitVal > 0 {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// GetBit gets the bit of data at offset.
|
||||
func (db *DB) GetBit(key []byte, offset int) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
key = db.encodeKVKey(key)
|
||||
|
||||
value, err := db.bucket.Get(key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
byteOffset := uint32(offset) >> 3
|
||||
bit := 7 - uint8(uint32(offset)&0x7)
|
||||
|
||||
if byteOffset >= uint32(len(value)) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
bitVal := value[byteOffset] & (1 << bit)
|
||||
if bitVal > 0 {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
808
vendor/github.com/siddontang/ledisdb/ledis/t_list.go
generated
vendored
808
vendor/github.com/siddontang/ledisdb/ledis/t_list.go
generated
vendored
@ -1,808 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/hack"
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/go/num"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
listHeadSeq int32 = 1
|
||||
listTailSeq int32 = 2
|
||||
|
||||
listMinSeq int32 = 1000
|
||||
listMaxSeq int32 = 1<<31 - 1000
|
||||
listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2
|
||||
)
|
||||
|
||||
var errLMetaKey = errors.New("invalid lmeta key")
|
||||
var errListKey = errors.New("invalid list key")
|
||||
var errListSeq = errors.New("invalid list sequence, overflow")
|
||||
|
||||
func (db *DB) lEncodeMetaKey(key []byte) []byte {
|
||||
buf := make([]byte, len(key)+1+len(db.indexVarBuf))
|
||||
pos := copy(buf, db.indexVarBuf)
|
||||
buf[pos] = LMetaType
|
||||
pos++
|
||||
|
||||
copy(buf[pos:], key)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) {
|
||||
pos, err := db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pos+1 > len(ek) || ek[pos] != LMetaType {
|
||||
return nil, errLMetaKey
|
||||
}
|
||||
|
||||
pos++
|
||||
return ek[pos:], nil
|
||||
}
|
||||
|
||||
func (db *DB) lEncodeListKey(key []byte, seq int32) []byte {
|
||||
buf := make([]byte, len(key)+7+len(db.indexVarBuf))
|
||||
|
||||
pos := copy(buf, db.indexVarBuf)
|
||||
|
||||
buf[pos] = ListType
|
||||
pos++
|
||||
|
||||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
|
||||
pos += 2
|
||||
|
||||
copy(buf[pos:], key)
|
||||
pos += len(key)
|
||||
|
||||
binary.BigEndian.PutUint32(buf[pos:], uint32(seq))
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) {
|
||||
pos := 0
|
||||
pos, err = db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if pos+1 > len(ek) || ek[pos] != ListType {
|
||||
err = errListKey
|
||||
return
|
||||
}
|
||||
|
||||
pos++
|
||||
|
||||
if pos+2 > len(ek) {
|
||||
err = errListKey
|
||||
return
|
||||
}
|
||||
|
||||
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
|
||||
pos += 2
|
||||
if keyLen+pos+4 != len(ek) {
|
||||
err = errListKey
|
||||
return
|
||||
}
|
||||
|
||||
key = ek[pos : pos+keyLen]
|
||||
seq = int32(binary.BigEndian.Uint32(ek[pos+keyLen:]))
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var headSeq int32
|
||||
var tailSeq int32
|
||||
var size int32
|
||||
var err error
|
||||
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
metaKey := db.lEncodeMetaKey(key)
|
||||
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
pushCnt := len(args)
|
||||
if pushCnt == 0 {
|
||||
return int64(size), nil
|
||||
}
|
||||
|
||||
seq := headSeq
|
||||
var delta int32 = -1
|
||||
if whereSeq == listTailSeq {
|
||||
seq = tailSeq
|
||||
delta = 1
|
||||
}
|
||||
|
||||
// append elements
|
||||
if size > 0 {
|
||||
seq += delta
|
||||
}
|
||||
|
||||
for i := 0; i < pushCnt; i++ {
|
||||
ek := db.lEncodeListKey(key, seq+int32(i)*delta)
|
||||
t.Put(ek, args[i])
|
||||
}
|
||||
|
||||
seq += int32(pushCnt-1) * delta
|
||||
if seq <= listMinSeq || seq >= listMaxSeq {
|
||||
return 0, errListSeq
|
||||
}
|
||||
|
||||
// set meta info
|
||||
if whereSeq == listHeadSeq {
|
||||
headSeq = seq
|
||||
} else {
|
||||
tailSeq = seq
|
||||
}
|
||||
|
||||
db.lSetMeta(metaKey, headSeq, tailSeq)
|
||||
|
||||
err = t.Commit()
|
||||
|
||||
if err == nil {
|
||||
db.lSignalAsReady(key)
|
||||
}
|
||||
|
||||
return int64(size) + int64(pushCnt), err
|
||||
}
|
||||
|
||||
func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var headSeq int32
|
||||
var tailSeq int32
|
||||
var size int32
|
||||
var err error
|
||||
|
||||
metaKey := db.lEncodeMetaKey(key)
|
||||
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if size == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var value []byte
|
||||
|
||||
seq := headSeq
|
||||
if whereSeq == listTailSeq {
|
||||
seq = tailSeq
|
||||
}
|
||||
|
||||
itemKey := db.lEncodeListKey(key, seq)
|
||||
value, err = db.bucket.Get(itemKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if whereSeq == listHeadSeq {
|
||||
headSeq++
|
||||
} else {
|
||||
tailSeq--
|
||||
}
|
||||
|
||||
t.Delete(itemKey)
|
||||
size = db.lSetMeta(metaKey, headSeq, tailSeq)
|
||||
if size == 0 {
|
||||
db.rmExpire(t, ListType, key)
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (db *DB) ltrim2(key []byte, startP, stopP int64) (err error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var headSeq int32
|
||||
var llen int32
|
||||
start := int32(startP)
|
||||
stop := int32(stopP)
|
||||
|
||||
ek := db.lEncodeMetaKey(key)
|
||||
if headSeq, _, llen, err = db.lGetMeta(nil, ek); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if start < 0 {
|
||||
start = llen + start
|
||||
}
|
||||
if stop < 0 {
|
||||
stop = llen + stop
|
||||
}
|
||||
if start >= llen || start > stop {
|
||||
db.lDelete(t, key)
|
||||
db.rmExpire(t, ListType, key)
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if stop >= llen {
|
||||
stop = llen - 1
|
||||
}
|
||||
|
||||
if start > 0 {
|
||||
for i := int32(0); i < start; i++ {
|
||||
t.Delete(db.lEncodeListKey(key, headSeq+i))
|
||||
}
|
||||
}
|
||||
if stop < int32(llen-1) {
|
||||
for i := int32(stop + 1); i < llen; i++ {
|
||||
t.Delete(db.lEncodeListKey(key, headSeq+i))
|
||||
}
|
||||
}
|
||||
|
||||
db.lSetMeta(ek, headSeq+start, headSeq+stop)
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
func (db *DB) ltrim(key []byte, trimSize, whereSeq int32) (int32, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if trimSize == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var headSeq int32
|
||||
var tailSeq int32
|
||||
var size int32
|
||||
var err error
|
||||
|
||||
metaKey := db.lEncodeMetaKey(key)
|
||||
headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if size == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var (
|
||||
trimStartSeq int32
|
||||
trimEndSeq int32
|
||||
)
|
||||
|
||||
if whereSeq == listHeadSeq {
|
||||
trimStartSeq = headSeq
|
||||
trimEndSeq = num.MinInt32(trimStartSeq+trimSize-1, tailSeq)
|
||||
headSeq = trimEndSeq + 1
|
||||
} else {
|
||||
trimEndSeq = tailSeq
|
||||
trimStartSeq = num.MaxInt32(trimEndSeq-trimSize+1, headSeq)
|
||||
tailSeq = trimStartSeq - 1
|
||||
}
|
||||
|
||||
for trimSeq := trimStartSeq; trimSeq <= trimEndSeq; trimSeq++ {
|
||||
itemKey := db.lEncodeListKey(key, trimSeq)
|
||||
t.Delete(itemKey)
|
||||
}
|
||||
|
||||
size = db.lSetMeta(metaKey, headSeq, tailSeq)
|
||||
if size == 0 {
|
||||
db.rmExpire(t, ListType, key)
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return trimEndSeq - trimStartSeq + 1, err
|
||||
}
|
||||
|
||||
// ps : here just focus on deleting the list data,
|
||||
// any other likes expire is ignore.
|
||||
func (db *DB) lDelete(t *batch, key []byte) int64 {
|
||||
mk := db.lEncodeMetaKey(key)
|
||||
|
||||
var headSeq int32
|
||||
var tailSeq int32
|
||||
var err error
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
headSeq, tailSeq, _, err = db.lGetMeta(it, mk)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
var num int64
|
||||
startKey := db.lEncodeListKey(key, headSeq)
|
||||
stopKey := db.lEncodeListKey(key, tailSeq)
|
||||
|
||||
rit := store.NewRangeIterator(it, &store.Range{
|
||||
Min: startKey,
|
||||
Max: stopKey,
|
||||
Type: store.RangeClose})
|
||||
for ; rit.Valid(); rit.Next() {
|
||||
t.Delete(rit.RawKey())
|
||||
num++
|
||||
}
|
||||
|
||||
t.Delete(mk)
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) {
|
||||
var v []byte
|
||||
if it != nil {
|
||||
v = it.Find(ek)
|
||||
} else {
|
||||
v, err = db.bucket.Get(ek)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
} else if v == nil {
|
||||
headSeq = listInitialSeq
|
||||
tailSeq = listInitialSeq
|
||||
size = 0
|
||||
return
|
||||
} else {
|
||||
headSeq = int32(binary.LittleEndian.Uint32(v[0:4]))
|
||||
tailSeq = int32(binary.LittleEndian.Uint32(v[4:8]))
|
||||
size = tailSeq - headSeq + 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 {
|
||||
t := db.listBatch
|
||||
|
||||
size := tailSeq - headSeq + 1
|
||||
if size < 0 {
|
||||
// todo : log error + panic
|
||||
log.Fatalf("invalid meta sequence range [%d, %d]", headSeq, tailSeq)
|
||||
} else if size == 0 {
|
||||
t.Delete(ek)
|
||||
} else {
|
||||
buf := make([]byte, 8)
|
||||
|
||||
binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq))
|
||||
binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq))
|
||||
|
||||
t.Put(ek, buf)
|
||||
}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
func (db *DB) lExpireAt(key []byte, when int64) (int64, error) {
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if llen, err := db.LLen(key); err != nil || llen == 0 {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
db.expireAt(t, ListType, key, when)
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// LIndex returns the value at index.
|
||||
func (db *DB) LIndex(key []byte, index int32) ([]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var seq int32
|
||||
var headSeq int32
|
||||
var tailSeq int32
|
||||
var err error
|
||||
|
||||
metaKey := db.lEncodeMetaKey(key)
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if index >= 0 {
|
||||
seq = headSeq + index
|
||||
} else {
|
||||
seq = tailSeq + index + 1
|
||||
}
|
||||
|
||||
sk := db.lEncodeListKey(key, seq)
|
||||
v := it.Find(sk)
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// LLen gets the length of the list.
|
||||
func (db *DB) LLen(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ek := db.lEncodeMetaKey(key)
|
||||
_, _, size, err := db.lGetMeta(nil, ek)
|
||||
return int64(size), err
|
||||
}
|
||||
|
||||
// LPop pops the value.
|
||||
func (db *DB) LPop(key []byte) ([]byte, error) {
|
||||
return db.lpop(key, listHeadSeq)
|
||||
}
|
||||
|
||||
// LTrim trims the value from start to stop.
|
||||
func (db *DB) LTrim(key []byte, start, stop int64) error {
|
||||
return db.ltrim2(key, start, stop)
|
||||
}
|
||||
|
||||
// LTrimFront trims the value from top.
|
||||
func (db *DB) LTrimFront(key []byte, trimSize int32) (int32, error) {
|
||||
return db.ltrim(key, trimSize, listHeadSeq)
|
||||
}
|
||||
|
||||
// LTrimBack trims the value from back.
|
||||
func (db *DB) LTrimBack(key []byte, trimSize int32) (int32, error) {
|
||||
return db.ltrim(key, trimSize, listTailSeq)
|
||||
}
|
||||
|
||||
// LPush push the value to the list.
|
||||
func (db *DB) LPush(key []byte, args ...[]byte) (int64, error) {
|
||||
return db.lpush(key, listHeadSeq, args...)
|
||||
}
|
||||
|
||||
// LSet sets the value at index.
|
||||
func (db *DB) LSet(key []byte, index int32, value []byte) error {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var seq int32
|
||||
var headSeq int32
|
||||
var tailSeq int32
|
||||
//var size int32
|
||||
var err error
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
metaKey := db.lEncodeMetaKey(key)
|
||||
|
||||
headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if index >= 0 {
|
||||
seq = headSeq + index
|
||||
} else {
|
||||
seq = tailSeq + index + 1
|
||||
}
|
||||
if seq < headSeq || seq > tailSeq {
|
||||
return errListIndex
|
||||
}
|
||||
sk := db.lEncodeListKey(key, seq)
|
||||
t.Put(sk, value)
|
||||
err = t.Commit()
|
||||
return err
|
||||
}
|
||||
|
||||
// LRange gets the value of list at range.
|
||||
func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var headSeq int32
|
||||
var llen int32
|
||||
var err error
|
||||
|
||||
metaKey := db.lEncodeMetaKey(key)
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if start < 0 {
|
||||
start = llen + start
|
||||
}
|
||||
if stop < 0 {
|
||||
stop = llen + stop
|
||||
}
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
|
||||
if start > stop || start >= llen {
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
|
||||
if stop >= llen {
|
||||
stop = llen - 1
|
||||
}
|
||||
|
||||
limit := (stop - start) + 1
|
||||
headSeq += start
|
||||
|
||||
v := make([][]byte, 0, limit)
|
||||
|
||||
startKey := db.lEncodeListKey(key, headSeq)
|
||||
rit := store.NewRangeLimitIterator(it,
|
||||
&store.Range{
|
||||
Min: startKey,
|
||||
Max: nil,
|
||||
Type: store.RangeClose},
|
||||
&store.Limit{
|
||||
Offset: 0,
|
||||
Count: int(limit)})
|
||||
|
||||
for ; rit.Valid(); rit.Next() {
|
||||
v = append(v, rit.Value())
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// RPop rpops the value.
|
||||
func (db *DB) RPop(key []byte) ([]byte, error) {
|
||||
return db.lpop(key, listTailSeq)
|
||||
}
|
||||
|
||||
// RPush rpushs the value .
|
||||
func (db *DB) RPush(key []byte, args ...[]byte) (int64, error) {
|
||||
return db.lpush(key, listTailSeq, args...)
|
||||
}
|
||||
|
||||
// LClear clears the list.
|
||||
func (db *DB) LClear(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
num := db.lDelete(t, key)
|
||||
db.rmExpire(t, ListType, key)
|
||||
|
||||
err := t.Commit()
|
||||
return num, err
|
||||
}
|
||||
|
||||
// LMclear clears multi lists.
|
||||
func (db *DB) LMclear(keys ...[]byte) (int64, error) {
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for _, key := range keys {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
db.lDelete(t, key)
|
||||
db.rmExpire(t, ListType, key)
|
||||
|
||||
}
|
||||
|
||||
err := t.Commit()
|
||||
return int64(len(keys)), err
|
||||
}
|
||||
|
||||
func (db *DB) lFlush() (drop int64, err error) {
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return db.flushType(t, ListType)
|
||||
}
|
||||
|
||||
// LExpire expires the list.
|
||||
func (db *DB) LExpire(key []byte, duration int64) (int64, error) {
|
||||
if duration <= 0 {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.lExpireAt(key, time.Now().Unix()+duration)
|
||||
}
|
||||
|
||||
// LExpireAt expires the list at when.
|
||||
func (db *DB) LExpireAt(key []byte, when int64) (int64, error) {
|
||||
if when <= time.Now().Unix() {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.lExpireAt(key, when)
|
||||
}
|
||||
|
||||
// LTTL gets the TTL of list.
|
||||
func (db *DB) LTTL(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return db.ttl(ListType, key)
|
||||
}
|
||||
|
||||
// LPersist removes the TTL of list.
|
||||
func (db *DB) LPersist(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.listBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
n, err := db.rmExpire(t, ListType, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (db *DB) lEncodeMinKey() []byte {
|
||||
return db.lEncodeMetaKey(nil)
|
||||
}
|
||||
|
||||
func (db *DB) lEncodeMaxKey() []byte {
|
||||
ek := db.lEncodeMetaKey(nil)
|
||||
ek[len(ek)-1] = LMetaType + 1
|
||||
return ek
|
||||
}
|
||||
|
||||
// BLPop pops the list with block way.
|
||||
func (db *DB) BLPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) {
|
||||
return db.lblockPop(keys, listHeadSeq, timeout)
|
||||
}
|
||||
|
||||
// BRPop bpops the list with block way.
|
||||
func (db *DB) BRPop(keys [][]byte, timeout time.Duration) ([]interface{}, error) {
|
||||
return db.lblockPop(keys, listTailSeq, timeout)
|
||||
}
|
||||
|
||||
// LKeyExists check list existed or not.
|
||||
func (db *DB) LKeyExists(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
sk := db.lEncodeMetaKey(key)
|
||||
v, err := db.bucket.Get(sk)
|
||||
if v != nil && err == nil {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func (db *DB) lblockPop(keys [][]byte, whereSeq int32, timeout time.Duration) ([]interface{}, error) {
|
||||
for {
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
if timeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
v, err := db.lbkeys.popOrWait(db, key, whereSeq, cancel)
|
||||
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
} else if v != nil {
|
||||
cancel()
|
||||
return []interface{}{key, v}, nil
|
||||
}
|
||||
}
|
||||
|
||||
//blocking wait
|
||||
<-ctx.Done()
|
||||
cancel()
|
||||
|
||||
//if ctx.Err() is a deadline exceeded (timeout) we return
|
||||
//otherwise we try to pop one of the keys again.
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) lSignalAsReady(key []byte) {
|
||||
db.lbkeys.signal(key)
|
||||
}
|
||||
|
||||
type lBlockKeys struct {
|
||||
sync.Mutex
|
||||
|
||||
keys map[string]*list.List
|
||||
}
|
||||
|
||||
func newLBlockKeys() *lBlockKeys {
|
||||
l := new(lBlockKeys)
|
||||
|
||||
l.keys = make(map[string]*list.List)
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *lBlockKeys) signal(key []byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
s := hack.String(key)
|
||||
fns, ok := l.keys[s]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for e := fns.Front(); e != nil; e = e.Next() {
|
||||
fn := e.Value.(context.CancelFunc)
|
||||
fn()
|
||||
}
|
||||
|
||||
delete(l.keys, s)
|
||||
}
|
||||
|
||||
func (l *lBlockKeys) popOrWait(db *DB, key []byte, whereSeq int32, fn context.CancelFunc) ([]interface{}, error) {
|
||||
v, err := db.lpop(key, whereSeq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if v != nil {
|
||||
return []interface{}{key, v}, nil
|
||||
}
|
||||
|
||||
l.Lock()
|
||||
|
||||
s := hack.String(key)
|
||||
chs, ok := l.keys[s]
|
||||
if !ok {
|
||||
chs = list.New()
|
||||
l.keys[s] = chs
|
||||
}
|
||||
|
||||
chs.PushBack(fn)
|
||||
l.Unlock()
|
||||
return nil, nil
|
||||
}
|
644
vendor/github.com/siddontang/ledisdb/ledis/t_set.go
generated
vendored
644
vendor/github.com/siddontang/ledisdb/ledis/t_set.go
generated
vendored
@ -1,644 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/hack"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
var errSetKey = errors.New("invalid set key")
|
||||
var errSSizeKey = errors.New("invalid ssize key")
|
||||
|
||||
// For set operation type.
|
||||
const (
|
||||
setStartSep byte = ':'
|
||||
setStopSep byte = setStartSep + 1
|
||||
UnionType byte = 51
|
||||
DiffType byte = 52
|
||||
InterType byte = 53
|
||||
)
|
||||
|
||||
func checkSetKMSize(key []byte, member []byte) error {
|
||||
if len(key) > MaxKeySize || len(key) == 0 {
|
||||
return errKeySize
|
||||
} else if len(member) > MaxSetMemberSize || len(member) == 0 {
|
||||
return errSetMemberSize
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) sEncodeSizeKey(key []byte) []byte {
|
||||
buf := make([]byte, len(key)+1+len(db.indexVarBuf))
|
||||
|
||||
pos := copy(buf, db.indexVarBuf)
|
||||
buf[pos] = SSizeType
|
||||
|
||||
pos++
|
||||
|
||||
copy(buf[pos:], key)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) {
|
||||
pos, err := db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if pos+1 > len(ek) || ek[pos] != SSizeType {
|
||||
return nil, errSSizeKey
|
||||
}
|
||||
pos++
|
||||
|
||||
return ek[pos:], nil
|
||||
}
|
||||
|
||||
func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte {
|
||||
buf := make([]byte, len(key)+len(member)+1+1+2+len(db.indexVarBuf))
|
||||
|
||||
pos := copy(buf, db.indexVarBuf)
|
||||
|
||||
buf[pos] = SetType
|
||||
pos++
|
||||
|
||||
binary.BigEndian.PutUint16(buf[pos:], uint16(len(key)))
|
||||
pos += 2
|
||||
|
||||
copy(buf[pos:], key)
|
||||
pos += len(key)
|
||||
|
||||
buf[pos] = setStartSep
|
||||
pos++
|
||||
copy(buf[pos:], member)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) {
|
||||
pos, err := db.checkKeyIndex(ek)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if pos+1 > len(ek) || ek[pos] != SetType {
|
||||
return nil, nil, errSetKey
|
||||
}
|
||||
|
||||
pos++
|
||||
|
||||
if pos+2 > len(ek) {
|
||||
return nil, nil, errSetKey
|
||||
}
|
||||
|
||||
keyLen := int(binary.BigEndian.Uint16(ek[pos:]))
|
||||
pos += 2
|
||||
|
||||
if keyLen+pos > len(ek) {
|
||||
return nil, nil, errSetKey
|
||||
}
|
||||
|
||||
key := ek[pos : pos+keyLen]
|
||||
pos += keyLen
|
||||
|
||||
if ek[pos] != hashStartSep {
|
||||
return nil, nil, errSetKey
|
||||
}
|
||||
|
||||
pos++
|
||||
member := ek[pos:]
|
||||
return key, member, nil
|
||||
}
|
||||
|
||||
func (db *DB) sEncodeStartKey(key []byte) []byte {
|
||||
return db.sEncodeSetKey(key, nil)
|
||||
}
|
||||
|
||||
func (db *DB) sEncodeStopKey(key []byte) []byte {
|
||||
k := db.sEncodeSetKey(key, nil)
|
||||
|
||||
k[len(k)-1] = setStopSep
|
||||
|
||||
return k
|
||||
}
|
||||
|
||||
func (db *DB) sFlush() (drop int64, err error) {
|
||||
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
return db.flushType(t, SetType)
|
||||
}
|
||||
|
||||
func (db *DB) sDelete(t *batch, key []byte) int64 {
|
||||
sk := db.sEncodeSizeKey(key)
|
||||
start := db.sEncodeStartKey(key)
|
||||
stop := db.sEncodeStopKey(key)
|
||||
|
||||
var num int64
|
||||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
|
||||
for ; it.Valid(); it.Next() {
|
||||
t.Delete(it.RawKey())
|
||||
num++
|
||||
}
|
||||
|
||||
it.Close()
|
||||
|
||||
t.Delete(sk)
|
||||
return num
|
||||
}
|
||||
|
||||
func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) {
|
||||
t := db.setBatch
|
||||
sk := db.sEncodeSizeKey(key)
|
||||
|
||||
var err error
|
||||
var size int64
|
||||
if size, err = Int64(db.bucket.Get(sk)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += delta
|
||||
if size <= 0 {
|
||||
size = 0
|
||||
t.Delete(sk)
|
||||
db.rmExpire(t, SetType, key)
|
||||
} else {
|
||||
t.Put(sk, PutInt64(size))
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (db *DB) sExpireAt(key []byte, when int64) (int64, error) {
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if scnt, err := db.SCard(key); err != nil || scnt == 0 {
|
||||
return 0, err
|
||||
}
|
||||
db.expireAt(t, SetType, key, when)
|
||||
if err := t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (db *DB) sSetItem(key []byte, member []byte) (int64, error) {
|
||||
t := db.setBatch
|
||||
ek := db.sEncodeSetKey(key, member)
|
||||
|
||||
var n int64 = 1
|
||||
if v, _ := db.bucket.Get(ek); v != nil {
|
||||
n = 0
|
||||
} else {
|
||||
if _, err := db.sIncrSize(key, 1); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
t.Put(ek, nil)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// SAdd adds the value to the set.
|
||||
func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) {
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var err error
|
||||
var ek []byte
|
||||
var num int64
|
||||
for i := 0; i < len(args); i++ {
|
||||
if err := checkSetKMSize(key, args[i]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ek = db.sEncodeSetKey(key, args[i])
|
||||
|
||||
if v, err := db.bucket.Get(ek); err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
num++
|
||||
}
|
||||
|
||||
t.Put(ek, nil)
|
||||
}
|
||||
|
||||
if _, err = db.sIncrSize(key, num); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return num, err
|
||||
|
||||
}
|
||||
|
||||
// SCard gets the size of set.
|
||||
func (db *DB) SCard(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
sk := db.sEncodeSizeKey(key)
|
||||
|
||||
return Int64(db.bucket.Get(sk))
|
||||
}
|
||||
|
||||
func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) {
|
||||
destMap := make(map[string]bool)
|
||||
|
||||
members, err := db.SMembers(keys[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, m := range members {
|
||||
destMap[hack.String(m)] = true
|
||||
}
|
||||
|
||||
for _, k := range keys[1:] {
|
||||
members, err := db.SMembers(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, m := range members {
|
||||
if _, ok := destMap[hack.String(m)]; !ok {
|
||||
continue
|
||||
} else if ok {
|
||||
delete(destMap, hack.String(m))
|
||||
}
|
||||
}
|
||||
// O - A = O, O is zero set.
|
||||
if len(destMap) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
slice := make([][]byte, len(destMap))
|
||||
idx := 0
|
||||
for k, v := range destMap {
|
||||
if !v {
|
||||
continue
|
||||
}
|
||||
slice[idx] = []byte(k)
|
||||
idx++
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
// SDiff gets the different of sets.
|
||||
func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) {
|
||||
v, err := db.sDiffGeneric(keys...)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// SDiffStore gets the different of sets and stores to dest set.
|
||||
func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) {
|
||||
n, err := db.sStoreGeneric(dstKey, DiffType, keys...)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// SKeyExists checks whether set existed or not.
|
||||
func (db *DB) SKeyExists(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
sk := db.sEncodeSizeKey(key)
|
||||
v, err := db.bucket.Get(sk)
|
||||
if v != nil && err == nil {
|
||||
return 1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) {
|
||||
destMap := make(map[string]bool)
|
||||
|
||||
members, err := db.SMembers(keys[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, m := range members {
|
||||
destMap[hack.String(m)] = true
|
||||
}
|
||||
|
||||
for _, key := range keys[1:] {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
members, err := db.SMembers(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(members) == 0 {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tempMap := make(map[string]bool)
|
||||
for _, member := range members {
|
||||
if err := checkKeySize(member); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := destMap[hack.String(member)]; ok {
|
||||
tempMap[hack.String(member)] = true //mark this item as selected
|
||||
}
|
||||
}
|
||||
destMap = tempMap //reduce the size of the result set
|
||||
if len(destMap) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
slice := make([][]byte, len(destMap))
|
||||
idx := 0
|
||||
for k, v := range destMap {
|
||||
if !v {
|
||||
continue
|
||||
}
|
||||
|
||||
slice[idx] = []byte(k)
|
||||
idx++
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
|
||||
}
|
||||
|
||||
// SInter intersects the sets.
|
||||
func (db *DB) SInter(keys ...[]byte) ([][]byte, error) {
|
||||
v, err := db.sInterGeneric(keys...)
|
||||
return v, err
|
||||
|
||||
}
|
||||
|
||||
// SInterStore intersects the sets and stores to dest set.
|
||||
func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) {
|
||||
n, err := db.sStoreGeneric(dstKey, InterType, keys...)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// SIsMember checks member in set.
|
||||
func (db *DB) SIsMember(key []byte, member []byte) (int64, error) {
|
||||
ek := db.sEncodeSetKey(key, member)
|
||||
|
||||
var n int64 = 1
|
||||
if v, err := db.bucket.Get(ek); err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
n = 0
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// SMembers gets members of set.
|
||||
func (db *DB) SMembers(key []byte) ([][]byte, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start := db.sEncodeStartKey(key)
|
||||
stop := db.sEncodeStopKey(key)
|
||||
|
||||
v := make([][]byte, 0, 16)
|
||||
|
||||
it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1)
|
||||
defer it.Close()
|
||||
|
||||
for ; it.Valid(); it.Next() {
|
||||
_, m, err := db.sDecodeSetKey(it.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v = append(v, m)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// SRem removes the members of set.
|
||||
func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) {
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
var ek []byte
|
||||
var v []byte
|
||||
var err error
|
||||
|
||||
it := db.bucket.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
var num int64
|
||||
for i := 0; i < len(args); i++ {
|
||||
if err := checkSetKMSize(key, args[i]); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ek = db.sEncodeSetKey(key, args[i])
|
||||
|
||||
v = it.RawFind(ek)
|
||||
if v == nil {
|
||||
continue
|
||||
} else {
|
||||
num++
|
||||
t.Delete(ek)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = db.sIncrSize(key, -num); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = t.Commit()
|
||||
return num, err
|
||||
|
||||
}
|
||||
|
||||
func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) {
|
||||
dstMap := make(map[string]bool)
|
||||
|
||||
for _, key := range keys {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
members, err := db.SMembers(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, member := range members {
|
||||
dstMap[hack.String(member)] = true
|
||||
}
|
||||
}
|
||||
|
||||
slice := make([][]byte, len(dstMap))
|
||||
idx := 0
|
||||
for k, v := range dstMap {
|
||||
if !v {
|
||||
continue
|
||||
}
|
||||
slice[idx] = []byte(k)
|
||||
idx++
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
// SUnion unions the sets.
|
||||
func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) {
|
||||
v, err := db.sUnionGeneric(keys...)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// SUnionStore unions the sets and stores to the dest set.
|
||||
func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) {
|
||||
n, err := db.sStoreGeneric(dstKey, UnionType, keys...)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) {
|
||||
if err := checkKeySize(dstKey); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
db.sDelete(t, dstKey)
|
||||
|
||||
var err error
|
||||
var ek []byte
|
||||
var v [][]byte
|
||||
|
||||
switch optType {
|
||||
case UnionType:
|
||||
v, err = db.sUnionGeneric(keys...)
|
||||
case DiffType:
|
||||
v, err = db.sDiffGeneric(keys...)
|
||||
case InterType:
|
||||
v, err = db.sInterGeneric(keys...)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, m := range v {
|
||||
if err := checkSetKMSize(dstKey, m); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
ek = db.sEncodeSetKey(dstKey, m)
|
||||
|
||||
if _, err := db.bucket.Get(ek); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t.Put(ek, nil)
|
||||
}
|
||||
|
||||
var n = int64(len(v))
|
||||
sk := db.sEncodeSizeKey(dstKey)
|
||||
t.Put(sk, PutInt64(n))
|
||||
|
||||
if err = t.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// SClear clears the set.
|
||||
func (db *DB) SClear(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
num := db.sDelete(t, key)
|
||||
db.rmExpire(t, SetType, key)
|
||||
|
||||
err := t.Commit()
|
||||
return num, err
|
||||
}
|
||||
|
||||
// SMclear clears multi sets.
|
||||
func (db *DB) SMclear(keys ...[]byte) (int64, error) {
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
for _, key := range keys {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
db.sDelete(t, key)
|
||||
db.rmExpire(t, SetType, key)
|
||||
}
|
||||
|
||||
err := t.Commit()
|
||||
return int64(len(keys)), err
|
||||
}
|
||||
|
||||
// SExpire expries the set.
|
||||
func (db *DB) SExpire(key []byte, duration int64) (int64, error) {
|
||||
if duration <= 0 {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.sExpireAt(key, time.Now().Unix()+duration)
|
||||
|
||||
}
|
||||
|
||||
// SExpireAt expires the set at when.
|
||||
func (db *DB) SExpireAt(key []byte, when int64) (int64, error) {
|
||||
if when <= time.Now().Unix() {
|
||||
return 0, errExpireValue
|
||||
}
|
||||
|
||||
return db.sExpireAt(key, when)
|
||||
|
||||
}
|
||||
|
||||
// STTL gets the TTL of set.
|
||||
func (db *DB) STTL(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return db.ttl(SetType, key)
|
||||
}
|
||||
|
||||
// SPersist removes the TTL of set.
|
||||
func (db *DB) SPersist(key []byte) (int64, error) {
|
||||
if err := checkKeySize(key); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
t := db.setBatch
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
n, err := db.rmExpire(t, SetType, key)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = t.Commit()
|
||||
return n, err
|
||||
}
|
217
vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go
generated
vendored
217
vendor/github.com/siddontang/ledisdb/ledis/t_ttl.go
generated
vendored
@ -1,217 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
var (
|
||||
errExpMetaKey = errors.New("invalid expire meta key")
|
||||
errExpTimeKey = errors.New("invalid expire time key")
|
||||
)
|
||||
|
||||
type onExpired func(*batch, []byte) int64
|
||||
|
||||
type ttlChecker struct {
|
||||
sync.Mutex
|
||||
db *DB
|
||||
txs []*batch
|
||||
cbs []onExpired
|
||||
|
||||
//next check time
|
||||
nc int64
|
||||
}
|
||||
|
||||
var errExpType = errors.New("invalid expire type")
|
||||
|
||||
func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte {
|
||||
buf := make([]byte, len(key)+10+len(db.indexVarBuf))
|
||||
|
||||
pos := copy(buf, db.indexVarBuf)
|
||||
|
||||
buf[pos] = ExpTimeType
|
||||
pos++
|
||||
|
||||
binary.BigEndian.PutUint64(buf[pos:], uint64(when))
|
||||
pos += 8
|
||||
|
||||
buf[pos] = dataType
|
||||
pos++
|
||||
|
||||
copy(buf[pos:], key)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte {
|
||||
buf := make([]byte, len(key)+2+len(db.indexVarBuf))
|
||||
|
||||
pos := copy(buf, db.indexVarBuf)
|
||||
buf[pos] = ExpMetaType
|
||||
pos++
|
||||
buf[pos] = dataType
|
||||
pos++
|
||||
|
||||
copy(buf[pos:], key)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) {
|
||||
pos, err := db.checkKeyIndex(mk)
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
if pos+2 > len(mk) || mk[pos] != ExpMetaType {
|
||||
return 0, nil, errExpMetaKey
|
||||
}
|
||||
|
||||
return mk[pos+1], mk[pos+2:], nil
|
||||
}
|
||||
|
||||
func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) {
|
||||
pos, err := db.checkKeyIndex(tk)
|
||||
if err != nil {
|
||||
return 0, nil, 0, err
|
||||
}
|
||||
|
||||
if pos+10 > len(tk) || tk[pos] != ExpTimeType {
|
||||
return 0, nil, 0, errExpTimeKey
|
||||
}
|
||||
|
||||
return tk[pos+9], tk[pos+10:], int64(binary.BigEndian.Uint64(tk[pos+1:])), nil
|
||||
}
|
||||
|
||||
func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) {
|
||||
db.expireAt(t, dataType, key, time.Now().Unix()+duration)
|
||||
}
|
||||
|
||||
func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) {
|
||||
mk := db.expEncodeMetaKey(dataType, key)
|
||||
tk := db.expEncodeTimeKey(dataType, key, when)
|
||||
|
||||
t.Put(tk, mk)
|
||||
t.Put(mk, PutInt64(when))
|
||||
|
||||
db.ttlChecker.setNextCheckTime(when, false)
|
||||
}
|
||||
|
||||
func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) {
|
||||
mk := db.expEncodeMetaKey(dataType, key)
|
||||
|
||||
if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 {
|
||||
t = -1
|
||||
} else {
|
||||
t -= time.Now().Unix()
|
||||
if t <= 0 {
|
||||
t = -1
|
||||
}
|
||||
// if t == -1 : to remove ????
|
||||
}
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) {
|
||||
mk := db.expEncodeMetaKey(dataType, key)
|
||||
v, err := db.bucket.Get(mk)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
when, err2 := Int64(v, nil)
|
||||
if err2 != nil {
|
||||
return 0, err2
|
||||
}
|
||||
|
||||
tk := db.expEncodeTimeKey(dataType, key, when)
|
||||
t.Delete(mk)
|
||||
t.Delete(tk)
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (c *ttlChecker) register(dataType byte, t *batch, f onExpired) {
|
||||
c.txs[dataType] = t
|
||||
c.cbs[dataType] = f
|
||||
}
|
||||
|
||||
func (c *ttlChecker) setNextCheckTime(when int64, force bool) {
|
||||
c.Lock()
|
||||
if force {
|
||||
c.nc = when
|
||||
} else if c.nc > when {
|
||||
c.nc = when
|
||||
}
|
||||
c.Unlock()
|
||||
}
|
||||
|
||||
func (c *ttlChecker) check() {
|
||||
now := time.Now().Unix()
|
||||
|
||||
c.Lock()
|
||||
nc := c.nc
|
||||
c.Unlock()
|
||||
|
||||
if now < nc {
|
||||
return
|
||||
}
|
||||
|
||||
nc = now + 3600
|
||||
|
||||
db := c.db
|
||||
dbGet := db.bucket.Get
|
||||
|
||||
minKey := db.expEncodeTimeKey(NoneType, nil, 0)
|
||||
maxKey := db.expEncodeTimeKey(maxDataType, nil, nc)
|
||||
|
||||
it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1)
|
||||
for ; it.Valid(); it.Next() {
|
||||
tk := it.RawKey()
|
||||
mk := it.RawValue()
|
||||
|
||||
dt, k, nt, err := db.expDecodeTimeKey(tk)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if nt > now {
|
||||
//the next ttl check time is nt!
|
||||
nc = nt
|
||||
break
|
||||
}
|
||||
|
||||
t := c.txs[dt]
|
||||
cb := c.cbs[dt]
|
||||
if tk == nil || cb == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Lock()
|
||||
|
||||
if exp, err := Int64(dbGet(mk)); err == nil {
|
||||
// check expire again
|
||||
if exp <= now {
|
||||
cb(t, k)
|
||||
t.Delete(tk)
|
||||
t.Delete(mk)
|
||||
|
||||
t.Commit()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
t.Unlock()
|
||||
}
|
||||
it.Close()
|
||||
|
||||
c.setNextCheckTime(nc, true)
|
||||
|
||||
return
|
||||
}
|
1093
vendor/github.com/siddontang/ledisdb/ledis/t_zset.go
generated
vendored
1093
vendor/github.com/siddontang/ledisdb/ledis/t_zset.go
generated
vendored
File diff suppressed because it is too large
Load Diff
103
vendor/github.com/siddontang/ledisdb/ledis/util.go
generated
vendored
103
vendor/github.com/siddontang/ledisdb/ledis/util.go
generated
vendored
@ -1,103 +0,0 @@
|
||||
package ledis
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/siddontang/go/hack"
|
||||
)
|
||||
|
||||
var errIntNumber = errors.New("invalid integer")
|
||||
|
||||
/*
|
||||
Below I forget why I use little endian to store int.
|
||||
Maybe I was foolish at that time.
|
||||
*/
|
||||
|
||||
// Int64 gets 64 integer with the little endian format.
|
||||
func Int64(v []byte, err error) (int64, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil || len(v) == 0 {
|
||||
return 0, nil
|
||||
} else if len(v) != 8 {
|
||||
return 0, errIntNumber
|
||||
}
|
||||
|
||||
return int64(binary.LittleEndian.Uint64(v)), nil
|
||||
}
|
||||
|
||||
// Uint64 gets unsigned 64 integer.
|
||||
func Uint64(v []byte, err error) (uint64, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil || len(v) == 0 {
|
||||
return 0, nil
|
||||
} else if len(v) != 8 {
|
||||
return 0, errIntNumber
|
||||
}
|
||||
|
||||
return binary.LittleEndian.Uint64(v), nil
|
||||
}
|
||||
|
||||
// PutInt64 puts the 64 integer.
|
||||
func PutInt64(v int64) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
// StrInt64 gets the 64 integer with string format.
|
||||
func StrInt64(v []byte, err error) (int64, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
return 0, nil
|
||||
} else {
|
||||
return strconv.ParseInt(hack.String(v), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// StrUint64 gets the unsigned 64 integer with string format.
|
||||
func StrUint64(v []byte, err error) (uint64, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
return 0, nil
|
||||
} else {
|
||||
return strconv.ParseUint(hack.String(v), 10, 64)
|
||||
}
|
||||
}
|
||||
|
||||
// StrInt32 gets the 32 integer with string format.
|
||||
func StrInt32(v []byte, err error) (int32, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
return 0, nil
|
||||
} else {
|
||||
res, err := strconv.ParseInt(hack.String(v), 10, 32)
|
||||
return int32(res), err
|
||||
}
|
||||
}
|
||||
|
||||
// StrInt8 ets the 8 integer with string format.
|
||||
func StrInt8(v []byte, err error) (int8, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
} else if v == nil {
|
||||
return 0, nil
|
||||
} else {
|
||||
res, err := strconv.ParseInt(hack.String(v), 10, 8)
|
||||
return int8(res), err
|
||||
}
|
||||
}
|
||||
|
||||
// AsyncNotify notices the channel.
|
||||
func AsyncNotify(ch chan struct{}) {
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
363
vendor/github.com/siddontang/ledisdb/rpl/file_io.go
generated
vendored
363
vendor/github.com/siddontang/ledisdb/rpl/file_io.go
generated
vendored
@ -1,363 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/edsrzf/mmap-go"
|
||||
"github.com/siddontang/go/log"
|
||||
)
|
||||
|
||||
//like leveldb or rocksdb file interface, haha!
|
||||
|
||||
type writeFile interface {
|
||||
Sync() error
|
||||
Write(b []byte) (n int, err error)
|
||||
Close() error
|
||||
ReadAt(buf []byte, offset int64) (int, error)
|
||||
Truncate(size int64) error
|
||||
SetOffset(o int64)
|
||||
Name() string
|
||||
Size() int
|
||||
Offset() int64
|
||||
}
|
||||
|
||||
type readFile interface {
|
||||
ReadAt(buf []byte, offset int64) (int, error)
|
||||
Close() error
|
||||
Size() int
|
||||
Name() string
|
||||
}
|
||||
|
||||
type rawWriteFile struct {
|
||||
writeFile
|
||||
f *os.File
|
||||
offset int64
|
||||
name string
|
||||
}
|
||||
|
||||
func newRawWriteFile(name string, size int64) (writeFile, error) {
|
||||
m := new(rawWriteFile)
|
||||
var err error
|
||||
|
||||
m.name = name
|
||||
|
||||
m.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Close() error {
|
||||
if err := m.f.Truncate(m.offset); err != nil {
|
||||
return fmt.Errorf("close truncate %s error %s", m.name, err.Error())
|
||||
}
|
||||
|
||||
if err := m.f.Close(); err != nil {
|
||||
return fmt.Errorf("close %s error %s", m.name, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Sync() error {
|
||||
return m.f.Sync()
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Write(b []byte) (n int, err error) {
|
||||
n, err = m.f.WriteAt(b, m.offset)
|
||||
if err != nil {
|
||||
return
|
||||
} else if n != len(b) {
|
||||
err = io.ErrShortWrite
|
||||
return
|
||||
}
|
||||
|
||||
m.offset += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
return m.f.ReadAt(buf, offset)
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Truncate(size int64) error {
|
||||
var err error
|
||||
if err = m.f.Truncate(size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.offset > size {
|
||||
m.offset = size
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) SetOffset(o int64) {
|
||||
m.offset = o
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Offset() int64 {
|
||||
return m.offset
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
func (m *rawWriteFile) Size() int {
|
||||
st, _ := m.f.Stat()
|
||||
return int(st.Size())
|
||||
}
|
||||
|
||||
type rawReadFile struct {
|
||||
readFile
|
||||
|
||||
f *os.File
|
||||
name string
|
||||
}
|
||||
|
||||
func newRawReadFile(name string) (readFile, error) {
|
||||
m := new(rawReadFile)
|
||||
|
||||
var err error
|
||||
m.f, err = os.Open(name)
|
||||
m.name = name
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (m *rawReadFile) Close() error {
|
||||
return m.f.Close()
|
||||
}
|
||||
|
||||
func (m *rawReadFile) Size() int {
|
||||
st, _ := m.f.Stat()
|
||||
return int(st.Size())
|
||||
}
|
||||
|
||||
func (m *rawReadFile) ReadAt(b []byte, offset int64) (int, error) {
|
||||
return m.f.ReadAt(b, offset)
|
||||
}
|
||||
|
||||
func (m *rawReadFile) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////
|
||||
|
||||
type mmapWriteFile struct {
|
||||
writeFile
|
||||
|
||||
f *os.File
|
||||
m mmap.MMap
|
||||
name string
|
||||
size int64
|
||||
offset int64
|
||||
}
|
||||
|
||||
func newMmapWriteFile(name string, size int64) (writeFile, error) {
|
||||
m := new(mmapWriteFile)
|
||||
|
||||
m.name = name
|
||||
|
||||
var err error
|
||||
|
||||
m.f, err = os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if size == 0 {
|
||||
st, _ := m.f.Stat()
|
||||
size = st.Size()
|
||||
}
|
||||
|
||||
if err = m.f.Truncate(size); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if m.m, err = mmap.Map(m.f, mmap.RDWR, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.size = size
|
||||
m.offset = 0
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Size() int {
|
||||
return int(m.size)
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Sync() error {
|
||||
return m.m.Flush()
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Close() error {
|
||||
if err := m.m.Unmap(); err != nil {
|
||||
return fmt.Errorf("unmap %s error %s", m.name, err.Error())
|
||||
}
|
||||
|
||||
if err := m.f.Truncate(m.offset); err != nil {
|
||||
return fmt.Errorf("close truncate %s error %s", m.name, err.Error())
|
||||
}
|
||||
|
||||
if err := m.f.Close(); err != nil {
|
||||
return fmt.Errorf("close %s error %s", m.name, err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Write(b []byte) (n int, err error) {
|
||||
extra := int64(len(b)) - (m.size - m.offset)
|
||||
if extra > 0 {
|
||||
newSize := m.size + extra + m.size/10
|
||||
if err = m.Truncate(newSize); err != nil {
|
||||
return
|
||||
}
|
||||
m.size = newSize
|
||||
}
|
||||
|
||||
n = copy(m.m[m.offset:], b)
|
||||
if n != len(b) {
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
|
||||
m.offset += int64(len(b))
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
if offset > m.offset {
|
||||
return 0, fmt.Errorf("invalid offset %d", offset)
|
||||
}
|
||||
|
||||
n := copy(buf, m.m[offset:m.offset])
|
||||
if n != len(buf) {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Truncate(size int64) error {
|
||||
var err error
|
||||
if err = m.m.Unmap(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = m.f.Truncate(size); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.m, err = mmap.Map(m.f, mmap.RDWR, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.size = size
|
||||
if m.offset > m.size {
|
||||
m.offset = m.size
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) SetOffset(o int64) {
|
||||
m.offset = o
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Offset() int64 {
|
||||
return m.offset
|
||||
}
|
||||
|
||||
func (m *mmapWriteFile) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
type mmapReadFile struct {
|
||||
readFile
|
||||
|
||||
f *os.File
|
||||
m mmap.MMap
|
||||
name string
|
||||
}
|
||||
|
||||
func newMmapReadFile(name string) (readFile, error) {
|
||||
m := new(mmapReadFile)
|
||||
|
||||
m.name = name
|
||||
|
||||
var err error
|
||||
m.f, err = os.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.m, err = mmap.Map(m.f, mmap.RDONLY, 0)
|
||||
return m, err
|
||||
}
|
||||
|
||||
func (m *mmapReadFile) ReadAt(buf []byte, offset int64) (int, error) {
|
||||
if int64(offset) > int64(len(m.m)) {
|
||||
return 0, fmt.Errorf("invalid offset %d", offset)
|
||||
}
|
||||
|
||||
n := copy(buf, m.m[offset:])
|
||||
if n != len(buf) {
|
||||
return n, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (m *mmapReadFile) Close() error {
|
||||
if m.m != nil {
|
||||
if err := m.m.Unmap(); err != nil {
|
||||
log.Errorf("unmap %s error %s", m.name, err.Error())
|
||||
}
|
||||
m.m = nil
|
||||
}
|
||||
|
||||
if m.f != nil {
|
||||
if err := m.f.Close(); err != nil {
|
||||
log.Errorf("close %s error %s", m.name, err.Error())
|
||||
}
|
||||
m.f = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mmapReadFile) Size() int {
|
||||
return len(m.m)
|
||||
}
|
||||
|
||||
func (m *mmapReadFile) Name() string {
|
||||
return m.name
|
||||
}
|
||||
|
||||
/////////////////////////////////////
|
||||
|
||||
func newWriteFile(useMmap bool, name string, size int64) (writeFile, error) {
|
||||
if useMmap {
|
||||
return newMmapWriteFile(name, size)
|
||||
} else {
|
||||
return newRawWriteFile(name, size)
|
||||
}
|
||||
}
|
||||
|
||||
func newReadFile(useMmap bool, name string) (readFile, error) {
|
||||
if useMmap {
|
||||
return newMmapReadFile(name)
|
||||
} else {
|
||||
return newRawReadFile(name)
|
||||
}
|
||||
}
|
416
vendor/github.com/siddontang/ledisdb/rpl/file_store.go
generated
vendored
416
vendor/github.com/siddontang/ledisdb/rpl/file_store.go
generated
vendored
@ -1,416 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/go/num"
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxLogFileSize = int64(256 * 1024 * 1024)
|
||||
|
||||
maxLogFileSize = int64(1024 * 1024 * 1024)
|
||||
|
||||
defaultLogNumInFile = int64(1024 * 1024)
|
||||
)
|
||||
|
||||
/*
|
||||
File Store:
|
||||
00000001.data
|
||||
00000001.meta
|
||||
00000002.data
|
||||
00000002.meta
|
||||
|
||||
data: log1 data | log2 data | magic data
|
||||
|
||||
if data has no magic data, it means that we don't close replication gracefully.
|
||||
so we must repair the log data
|
||||
log data: id (bigendian uint64), create time (bigendian uint32), compression (byte), data len(bigendian uint32), data
|
||||
split data = log0 data + [padding 0] -> file % pagesize() == 0
|
||||
|
||||
meta: log1 offset | log2 offset
|
||||
log offset: bigendian uint32 | bigendian uint32
|
||||
|
||||
//sha1 of github.com/siddontang/ledisdb 20 bytes
|
||||
magic data = "\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17"
|
||||
|
||||
we must guarantee that the log id is monotonic increment strictly.
|
||||
if log1's id is 1, log2 must be 2
|
||||
*/
|
||||
|
||||
type FileStore struct {
|
||||
LogStore
|
||||
|
||||
cfg *config.Config
|
||||
|
||||
base string
|
||||
|
||||
rm sync.RWMutex
|
||||
wm sync.Mutex
|
||||
|
||||
rs tableReaders
|
||||
w *tableWriter
|
||||
|
||||
quit chan struct{}
|
||||
}
|
||||
|
||||
func NewFileStore(base string, cfg *config.Config) (*FileStore, error) {
|
||||
s := new(FileStore)
|
||||
|
||||
s.quit = make(chan struct{})
|
||||
|
||||
var err error
|
||||
|
||||
if err = os.MkdirAll(base, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.base = base
|
||||
|
||||
if cfg.Replication.MaxLogFileSize == 0 {
|
||||
cfg.Replication.MaxLogFileSize = defaultMaxLogFileSize
|
||||
}
|
||||
|
||||
cfg.Replication.MaxLogFileSize = num.MinInt64(cfg.Replication.MaxLogFileSize, maxLogFileSize)
|
||||
|
||||
s.cfg = cfg
|
||||
|
||||
if err = s.load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index := int64(1)
|
||||
if len(s.rs) != 0 {
|
||||
index = s.rs[len(s.rs)-1].index + 1
|
||||
}
|
||||
|
||||
s.w = newTableWriter(s.base, index, cfg.Replication.MaxLogFileSize, cfg.Replication.UseMmap)
|
||||
s.w.SetSyncType(cfg.Replication.SyncLog)
|
||||
|
||||
go s.checkTableReaders()
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetLog(id uint64, l *Log) error {
|
||||
//first search in table writer
|
||||
if err := s.w.GetLog(id, l); err == nil {
|
||||
return nil
|
||||
} else if err != ErrLogNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
s.rm.RLock()
|
||||
t := s.rs.Search(id)
|
||||
|
||||
if t == nil {
|
||||
s.rm.RUnlock()
|
||||
|
||||
return ErrLogNotFound
|
||||
}
|
||||
|
||||
err := t.GetLog(id, l)
|
||||
s.rm.RUnlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *FileStore) FirstID() (uint64, error) {
|
||||
id := uint64(0)
|
||||
|
||||
s.rm.RLock()
|
||||
if len(s.rs) > 0 {
|
||||
id = s.rs[0].first
|
||||
} else {
|
||||
id = 0
|
||||
}
|
||||
s.rm.RUnlock()
|
||||
|
||||
if id > 0 {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
//if id = 0,
|
||||
|
||||
return s.w.First(), nil
|
||||
}
|
||||
|
||||
func (s *FileStore) LastID() (uint64, error) {
|
||||
id := s.w.Last()
|
||||
if id > 0 {
|
||||
return id, nil
|
||||
}
|
||||
|
||||
//if table writer has no last id, we may find in the last table reader
|
||||
|
||||
s.rm.RLock()
|
||||
if len(s.rs) > 0 {
|
||||
id = s.rs[len(s.rs)-1].last
|
||||
}
|
||||
s.rm.RUnlock()
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) StoreLog(l *Log) error {
|
||||
s.wm.Lock()
|
||||
err := s.storeLog(l)
|
||||
s.wm.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *FileStore) storeLog(l *Log) error {
|
||||
err := s.w.StoreLog(l)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errTableNeedFlush {
|
||||
return err
|
||||
}
|
||||
|
||||
var r *tableReader
|
||||
r, err = s.w.Flush()
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("write table flush error %s, can not store!!!", err.Error())
|
||||
|
||||
s.w.Close()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
s.rm.Lock()
|
||||
s.rs = append(s.rs, r)
|
||||
s.rm.Unlock()
|
||||
|
||||
err = s.w.StoreLog(l)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *FileStore) PurgeExpired(n int64) error {
|
||||
s.rm.Lock()
|
||||
|
||||
var purges []*tableReader
|
||||
|
||||
t := uint32(time.Now().Unix() - int64(n))
|
||||
|
||||
for i, r := range s.rs {
|
||||
if r.lastTime > t {
|
||||
purges = append([]*tableReader{}, s.rs[0:i]...)
|
||||
n := copy(s.rs, s.rs[i:])
|
||||
s.rs = s.rs[0:n]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
s.rm.Unlock()
|
||||
|
||||
s.purgeTableReaders(purges)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) Sync() error {
|
||||
return s.w.Sync()
|
||||
}
|
||||
|
||||
func (s *FileStore) Clear() error {
|
||||
s.wm.Lock()
|
||||
s.rm.Lock()
|
||||
|
||||
defer func() {
|
||||
s.rm.Unlock()
|
||||
s.wm.Unlock()
|
||||
}()
|
||||
|
||||
s.w.Close()
|
||||
|
||||
for i := range s.rs {
|
||||
s.rs[i].Close()
|
||||
}
|
||||
|
||||
s.rs = tableReaders{}
|
||||
|
||||
if err := os.RemoveAll(s.base); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(s.base, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.w = newTableWriter(s.base, 1, s.cfg.Replication.MaxLogFileSize, s.cfg.Replication.UseMmap)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) Close() error {
|
||||
close(s.quit)
|
||||
|
||||
s.wm.Lock()
|
||||
s.rm.Lock()
|
||||
|
||||
if r, err := s.w.Flush(); err != nil {
|
||||
if err != errNilHandler {
|
||||
log.Errorf("close err: %s", err.Error())
|
||||
}
|
||||
} else {
|
||||
r.Close()
|
||||
s.w.Close()
|
||||
}
|
||||
|
||||
for i := range s.rs {
|
||||
s.rs[i].Close()
|
||||
}
|
||||
|
||||
s.rs = tableReaders{}
|
||||
|
||||
s.rm.Unlock()
|
||||
s.wm.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) checkTableReaders() {
|
||||
t := time.NewTicker(60 * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
s.rm.Lock()
|
||||
|
||||
for _, r := range s.rs {
|
||||
if !r.Keepalived() {
|
||||
r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
purges := []*tableReader{}
|
||||
maxNum := s.cfg.Replication.MaxLogFileNum
|
||||
num := len(s.rs)
|
||||
if num > maxNum {
|
||||
purges = s.rs[:num-maxNum]
|
||||
s.rs = s.rs[num-maxNum:]
|
||||
}
|
||||
|
||||
s.rm.Unlock()
|
||||
|
||||
s.purgeTableReaders(purges)
|
||||
|
||||
case <-s.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileStore) purgeTableReaders(purges []*tableReader) {
|
||||
for _, r := range purges {
|
||||
dataName := fmtTableDataName(r.base, r.index)
|
||||
metaName := fmtTableMetaName(r.base, r.index)
|
||||
r.Close()
|
||||
if err := os.Remove(dataName); err != nil {
|
||||
log.Errorf("purge table data %s err: %s", dataName, err.Error())
|
||||
}
|
||||
if err := os.Remove(metaName); err != nil {
|
||||
log.Errorf("purge table meta %s err: %s", metaName, err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileStore) load() error {
|
||||
fs, err := ioutil.ReadDir(s.base)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.rs = make(tableReaders, 0, len(fs))
|
||||
|
||||
var r *tableReader
|
||||
var index int64
|
||||
for _, f := range fs {
|
||||
if _, err := fmt.Sscanf(f.Name(), "%08d.data", &index); err == nil {
|
||||
if r, err = newTableReader(s.base, index, s.cfg.Replication.UseMmap); err != nil {
|
||||
log.Errorf("load table %s err: %s", f.Name(), err.Error())
|
||||
} else {
|
||||
s.rs = append(s.rs, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.rs.check(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type tableReaders []*tableReader
|
||||
|
||||
func (ts tableReaders) Len() int {
|
||||
return len(ts)
|
||||
}
|
||||
|
||||
func (ts tableReaders) Swap(i, j int) {
|
||||
ts[i], ts[j] = ts[j], ts[i]
|
||||
}
|
||||
|
||||
func (ts tableReaders) Less(i, j int) bool {
|
||||
return ts[i].first < ts[j].first
|
||||
}
|
||||
|
||||
func (ts tableReaders) Search(id uint64) *tableReader {
|
||||
i, j := 0, len(ts)-1
|
||||
|
||||
for i <= j {
|
||||
h := i + (j-i)/2
|
||||
|
||||
if ts[h].first <= id && id <= ts[h].last {
|
||||
return ts[h]
|
||||
} else if ts[h].last < id {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h - 1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts tableReaders) check() error {
|
||||
if len(ts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sort.Sort(ts)
|
||||
|
||||
first := ts[0].first
|
||||
last := ts[0].last
|
||||
index := ts[0].index
|
||||
|
||||
if first == 0 || first > last {
|
||||
return fmt.Errorf("invalid log in table %s", ts[0])
|
||||
}
|
||||
|
||||
for i := 1; i < len(ts); i++ {
|
||||
if ts[i].first <= last {
|
||||
return fmt.Errorf("invalid first log id %d in table %s", ts[i].first, ts[i])
|
||||
}
|
||||
|
||||
if ts[i].index <= index {
|
||||
return fmt.Errorf("invalid index %d in table %s", ts[i].index, ts[i])
|
||||
}
|
||||
|
||||
first = ts[i].first
|
||||
last = ts[i].last
|
||||
index = ts[i].index
|
||||
}
|
||||
return nil
|
||||
}
|
571
vendor/github.com/siddontang/ledisdb/rpl/file_table.go
generated
vendored
571
vendor/github.com/siddontang/ledisdb/rpl/file_table.go
generated
vendored
@ -1,571 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/go/sync2"
|
||||
)
|
||||
|
||||
var (
|
||||
magic = []byte("\x1c\x1d\xb8\x88\xff\x9e\x45\x55\x40\xf0\x4c\xda\xe0\xce\x47\xde\x65\x48\x71\x17")
|
||||
errTableNeedFlush = errors.New("write table need flush")
|
||||
errNilHandler = errors.New("nil write handler")
|
||||
)
|
||||
|
||||
const tableReaderKeepaliveInterval int64 = 30
|
||||
|
||||
func fmtTableDataName(base string, index int64) string {
|
||||
return path.Join(base, fmt.Sprintf("%08d.data", index))
|
||||
}
|
||||
|
||||
func fmtTableMetaName(base string, index int64) string {
|
||||
return path.Join(base, fmt.Sprintf("%08d.meta", index))
|
||||
}
|
||||
|
||||
type tableReader struct {
|
||||
sync.Mutex
|
||||
|
||||
base string
|
||||
index int64
|
||||
|
||||
data readFile
|
||||
meta readFile
|
||||
|
||||
first uint64
|
||||
last uint64
|
||||
|
||||
lastTime uint32
|
||||
|
||||
lastReadTime sync2.AtomicInt64
|
||||
|
||||
useMmap bool
|
||||
}
|
||||
|
||||
func newTableReader(base string, index int64, useMmap bool) (*tableReader, error) {
|
||||
if index <= 0 {
|
||||
return nil, fmt.Errorf("invalid index %d", index)
|
||||
}
|
||||
t := new(tableReader)
|
||||
t.base = base
|
||||
t.index = index
|
||||
|
||||
t.useMmap = useMmap
|
||||
|
||||
var err error
|
||||
|
||||
if err = t.check(); err != nil {
|
||||
log.Errorf("check %d error: %s, try to repair", t.index, err.Error())
|
||||
|
||||
if err = t.repair(); err != nil {
|
||||
log.Errorf("repair %d error: %s", t.index, err.Error())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
t.close()
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *tableReader) String() string {
|
||||
return fmt.Sprintf("%d", t.index)
|
||||
}
|
||||
|
||||
func (t *tableReader) Close() {
|
||||
t.Lock()
|
||||
|
||||
t.close()
|
||||
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *tableReader) close() {
|
||||
if t.data != nil {
|
||||
t.data.Close()
|
||||
t.data = nil
|
||||
}
|
||||
|
||||
if t.meta != nil {
|
||||
t.meta.Close()
|
||||
t.meta = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tableReader) Keepalived() bool {
|
||||
l := t.lastReadTime.Get()
|
||||
if l > 0 && time.Now().Unix()-l > tableReaderKeepaliveInterval {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *tableReader) getLogPos(index int) (uint32, error) {
|
||||
var buf [4]byte
|
||||
if _, err := t.meta.ReadAt(buf[0:4], int64(index)*4); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return binary.BigEndian.Uint32(buf[0:4]), nil
|
||||
}
|
||||
|
||||
func (t *tableReader) checkData() error {
|
||||
var err error
|
||||
//check will use raw file mode
|
||||
if t.data, err = newReadFile(false, fmtTableDataName(t.base, t.index)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.data.Size() < len(magic) {
|
||||
return fmt.Errorf("data file %s size %d too short", t.data.Name(), t.data.Size())
|
||||
}
|
||||
|
||||
buf := make([]byte, len(magic))
|
||||
if _, err := t.data.ReadAt(buf, int64(t.data.Size()-len(magic))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(magic, buf) {
|
||||
return fmt.Errorf("data file %s invalid magic data %q", t.data.Name(), buf)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tableReader) checkMeta() error {
|
||||
var err error
|
||||
//check will use raw file mode
|
||||
if t.meta, err = newReadFile(false, fmtTableMetaName(t.base, t.index)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.meta.Size()%4 != 0 || t.meta.Size() == 0 {
|
||||
return fmt.Errorf("meta file %s invalid offset len %d, must 4 multiple and not 0", t.meta.Name(), t.meta.Size())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tableReader) check() error {
|
||||
var err error
|
||||
|
||||
if err := t.checkMeta(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := t.checkData(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
firstLogPos, _ := t.getLogPos(0)
|
||||
lastLogPos, _ := t.getLogPos(t.meta.Size()/4 - 1)
|
||||
|
||||
if firstLogPos != 0 {
|
||||
return fmt.Errorf("invalid first log pos %d, must 0", firstLogPos)
|
||||
}
|
||||
|
||||
var l Log
|
||||
if _, err = t.decodeLogHead(&l, t.data, int64(firstLogPos)); err != nil {
|
||||
return fmt.Errorf("decode first log err %s", err.Error())
|
||||
}
|
||||
|
||||
t.first = l.ID
|
||||
var n int64
|
||||
if n, err = t.decodeLogHead(&l, t.data, int64(lastLogPos)); err != nil {
|
||||
return fmt.Errorf("decode last log err %s", err.Error())
|
||||
} else if n+int64(len(magic)) != int64(t.data.Size()) {
|
||||
return fmt.Errorf("extra log data at offset %d", n)
|
||||
}
|
||||
|
||||
t.last = l.ID
|
||||
t.lastTime = l.CreateTime
|
||||
|
||||
if t.first > t.last {
|
||||
return fmt.Errorf("invalid log table first %d > last %d", t.first, t.last)
|
||||
} else if (t.last - t.first + 1) != uint64(t.meta.Size()/4) {
|
||||
return fmt.Errorf("invalid log table, first %d, last %d, and log num %d", t.first, t.last, t.meta.Size()/4)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tableReader) repair() error {
|
||||
t.close()
|
||||
|
||||
var err error
|
||||
var data writeFile
|
||||
var meta writeFile
|
||||
|
||||
//repair will use raw file mode
|
||||
data, err = newWriteFile(false, fmtTableDataName(t.base, t.index), 0)
|
||||
data.SetOffset(int64(data.Size()))
|
||||
|
||||
meta, err = newWriteFile(false, fmtTableMetaName(t.base, t.index), int64(defaultLogNumInFile*4))
|
||||
|
||||
var l Log
|
||||
var pos int64 = 0
|
||||
var nextPos int64 = 0
|
||||
b := make([]byte, 4)
|
||||
|
||||
t.first = 0
|
||||
t.last = 0
|
||||
|
||||
for {
|
||||
nextPos, err = t.decodeLogHead(&l, data, pos)
|
||||
if err != nil {
|
||||
//if error, we may lost all logs from pos
|
||||
log.Errorf("%s may lost logs from %d", data.Name(), pos)
|
||||
break
|
||||
}
|
||||
|
||||
if l.ID == 0 {
|
||||
log.Errorf("%s may lost logs from %d, invalid log 0", data.Name(), pos)
|
||||
break
|
||||
}
|
||||
|
||||
if t.first == 0 {
|
||||
t.first = l.ID
|
||||
}
|
||||
|
||||
if t.last == 0 {
|
||||
t.last = l.ID
|
||||
} else if l.ID <= t.last {
|
||||
log.Errorf("%s may lost logs from %d, invalid logid %d", t.data.Name(), pos, l.ID)
|
||||
break
|
||||
}
|
||||
|
||||
t.last = l.ID
|
||||
t.lastTime = l.CreateTime
|
||||
|
||||
binary.BigEndian.PutUint32(b, uint32(pos))
|
||||
meta.Write(b)
|
||||
|
||||
pos = nextPos
|
||||
|
||||
t.lastTime = l.CreateTime
|
||||
}
|
||||
|
||||
var e error
|
||||
if err := meta.Close(); err != nil {
|
||||
e = err
|
||||
}
|
||||
|
||||
data.SetOffset(pos)
|
||||
|
||||
if _, err = data.Write(magic); err != nil {
|
||||
log.Errorf("write magic error %s", err.Error())
|
||||
}
|
||||
|
||||
if err = data.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (t *tableReader) decodeLogHead(l *Log, r io.ReaderAt, pos int64) (int64, error) {
|
||||
dataLen, err := l.DecodeHeadAt(r, pos)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return pos + int64(l.HeadSize()) + int64(dataLen), nil
|
||||
}
|
||||
|
||||
func (t *tableReader) GetLog(id uint64, l *Log) error {
|
||||
if id < t.first || id > t.last {
|
||||
return ErrLogNotFound
|
||||
}
|
||||
|
||||
t.lastReadTime.Set(time.Now().Unix())
|
||||
|
||||
t.Lock()
|
||||
|
||||
if err := t.openTable(); err != nil {
|
||||
t.close()
|
||||
t.Unlock()
|
||||
return err
|
||||
}
|
||||
t.Unlock()
|
||||
|
||||
pos, err := t.getLogPos(int(id - t.first))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := l.DecodeAt(t.data, int64(pos)); err != nil {
|
||||
return err
|
||||
} else if l.ID != id {
|
||||
return fmt.Errorf("invalid log id %d != %d", l.ID, id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tableReader) openTable() error {
|
||||
var err error
|
||||
if t.data == nil {
|
||||
if t.data, err = newReadFile(t.useMmap, fmtTableDataName(t.base, t.index)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if t.meta == nil {
|
||||
if t.meta, err = newReadFile(t.useMmap, fmtTableMetaName(t.base, t.index)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type tableWriter struct {
|
||||
sync.RWMutex
|
||||
|
||||
data writeFile
|
||||
meta writeFile
|
||||
|
||||
base string
|
||||
index int64
|
||||
|
||||
first uint64
|
||||
last uint64
|
||||
lastTime uint32
|
||||
|
||||
maxLogSize int64
|
||||
|
||||
closed bool
|
||||
|
||||
syncType int
|
||||
|
||||
posBuf []byte
|
||||
|
||||
useMmap bool
|
||||
}
|
||||
|
||||
func newTableWriter(base string, index int64, maxLogSize int64, useMmap bool) *tableWriter {
|
||||
if index <= 0 {
|
||||
panic(fmt.Errorf("invalid index %d", index))
|
||||
}
|
||||
|
||||
t := new(tableWriter)
|
||||
|
||||
t.base = base
|
||||
t.index = index
|
||||
|
||||
t.maxLogSize = maxLogSize
|
||||
|
||||
t.closed = false
|
||||
|
||||
t.posBuf = make([]byte, 4)
|
||||
|
||||
t.useMmap = useMmap
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *tableWriter) String() string {
|
||||
return fmt.Sprintf("%d", t.index)
|
||||
}
|
||||
|
||||
func (t *tableWriter) SetMaxLogSize(s int64) {
|
||||
t.maxLogSize = s
|
||||
}
|
||||
|
||||
func (t *tableWriter) SetSyncType(tp int) {
|
||||
t.syncType = tp
|
||||
}
|
||||
|
||||
func (t *tableWriter) close() {
|
||||
if t.meta != nil {
|
||||
if err := t.meta.Close(); err != nil {
|
||||
log.Fatalf("close log meta error %s", err.Error())
|
||||
}
|
||||
t.meta = nil
|
||||
}
|
||||
|
||||
if t.data != nil {
|
||||
if _, err := t.data.Write(magic); err != nil {
|
||||
log.Fatalf("write magic error %s", err.Error())
|
||||
}
|
||||
|
||||
if err := t.data.Close(); err != nil {
|
||||
log.Fatalf("close log data error %s", err.Error())
|
||||
}
|
||||
t.data = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tableWriter) Close() {
|
||||
t.Lock()
|
||||
t.closed = true
|
||||
|
||||
t.close()
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *tableWriter) First() uint64 {
|
||||
t.Lock()
|
||||
id := t.first
|
||||
t.Unlock()
|
||||
return id
|
||||
}
|
||||
|
||||
func (t *tableWriter) Last() uint64 {
|
||||
t.Lock()
|
||||
id := t.last
|
||||
t.Unlock()
|
||||
return id
|
||||
}
|
||||
|
||||
func (t *tableWriter) Flush() (*tableReader, error) {
|
||||
t.Lock()
|
||||
|
||||
if t.data == nil || t.meta == nil {
|
||||
t.Unlock()
|
||||
return nil, errNilHandler
|
||||
}
|
||||
|
||||
tr := new(tableReader)
|
||||
tr.base = t.base
|
||||
tr.index = t.index
|
||||
|
||||
tr.first = t.first
|
||||
tr.last = t.last
|
||||
tr.lastTime = t.lastTime
|
||||
tr.useMmap = t.useMmap
|
||||
|
||||
t.close()
|
||||
|
||||
t.first = 0
|
||||
t.last = 0
|
||||
t.index = t.index + 1
|
||||
|
||||
t.Unlock()
|
||||
|
||||
return tr, nil
|
||||
}
|
||||
|
||||
func (t *tableWriter) StoreLog(l *Log) error {
|
||||
t.Lock()
|
||||
err := t.storeLog(l)
|
||||
t.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *tableWriter) openFile() error {
|
||||
var err error
|
||||
if t.data == nil {
|
||||
if t.data, err = newWriteFile(t.useMmap, fmtTableDataName(t.base, t.index), t.maxLogSize+t.maxLogSize/10+int64(len(magic))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if t.meta == nil {
|
||||
if t.meta, err = newWriteFile(t.useMmap, fmtTableMetaName(t.base, t.index), int64(defaultLogNumInFile*4)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *tableWriter) storeLog(l *Log) error {
|
||||
if l.ID == 0 {
|
||||
return ErrStoreLogID
|
||||
}
|
||||
|
||||
if t.closed {
|
||||
return fmt.Errorf("table writer is closed")
|
||||
}
|
||||
|
||||
if t.last > 0 && l.ID != t.last+1 {
|
||||
return ErrStoreLogID
|
||||
}
|
||||
|
||||
if t.data != nil && t.data.Offset() > t.maxLogSize {
|
||||
return errTableNeedFlush
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = t.openFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offsetPos := t.data.Offset()
|
||||
if err = l.Encode(t.data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(t.posBuf, uint32(offsetPos))
|
||||
if _, err = t.meta.Write(t.posBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if t.first == 0 {
|
||||
t.first = l.ID
|
||||
}
|
||||
|
||||
t.last = l.ID
|
||||
t.lastTime = l.CreateTime
|
||||
|
||||
if t.syncType == 2 {
|
||||
if err := t.data.Sync(); err != nil {
|
||||
log.Errorf("sync table error %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tableWriter) GetLog(id uint64, l *Log) error {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
|
||||
if id < t.first || id > t.last {
|
||||
return ErrLogNotFound
|
||||
}
|
||||
|
||||
var buf [4]byte
|
||||
if _, err := t.meta.ReadAt(buf[0:4], int64((id-t.first)*4)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offset := binary.BigEndian.Uint32(buf[0:4])
|
||||
|
||||
if err := l.DecodeAt(t.data, int64(offset)); err != nil {
|
||||
return err
|
||||
} else if l.ID != id {
|
||||
return fmt.Errorf("invalid log id %d != %d", id, l.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tableWriter) Sync() error {
|
||||
t.Lock()
|
||||
|
||||
var err error
|
||||
if t.data != nil {
|
||||
err = t.data.Sync()
|
||||
t.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
if t.meta != nil {
|
||||
err = t.meta.Sync()
|
||||
}
|
||||
|
||||
t.Unlock()
|
||||
|
||||
return err
|
||||
}
|
225
vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go
generated
vendored
225
vendor/github.com/siddontang/ledisdb/rpl/goleveldb_store.go
generated
vendored
@ -1,225 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/num"
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/store"
|
||||
)
|
||||
|
||||
type GoLevelDBStore struct {
|
||||
LogStore
|
||||
|
||||
m sync.Mutex
|
||||
db *store.DB
|
||||
|
||||
cfg *config.Config
|
||||
|
||||
first uint64
|
||||
last uint64
|
||||
|
||||
buf bytes.Buffer
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) FirstID() (uint64, error) {
|
||||
s.m.Lock()
|
||||
id, err := s.firstID()
|
||||
s.m.Unlock()
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) LastID() (uint64, error) {
|
||||
s.m.Lock()
|
||||
id, err := s.lastID()
|
||||
s.m.Unlock()
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) firstID() (uint64, error) {
|
||||
if s.first != InvalidLogID {
|
||||
return s.first, nil
|
||||
}
|
||||
|
||||
it := s.db.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
it.SeekToFirst()
|
||||
|
||||
if it.Valid() {
|
||||
s.first = num.BytesToUint64(it.RawKey())
|
||||
}
|
||||
|
||||
return s.first, nil
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) lastID() (uint64, error) {
|
||||
if s.last != InvalidLogID {
|
||||
return s.last, nil
|
||||
}
|
||||
|
||||
it := s.db.NewIterator()
|
||||
defer it.Close()
|
||||
|
||||
it.SeekToLast()
|
||||
|
||||
if it.Valid() {
|
||||
s.last = num.BytesToUint64(it.RawKey())
|
||||
}
|
||||
|
||||
return s.last, nil
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) GetLog(id uint64, log *Log) error {
|
||||
v, err := s.db.Get(num.Uint64ToBytes(id))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if v == nil {
|
||||
return ErrLogNotFound
|
||||
} else {
|
||||
return log.Decode(bytes.NewBuffer(v))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) StoreLog(log *Log) error {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
last, err := s.lastID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.last = InvalidLogID
|
||||
|
||||
s.buf.Reset()
|
||||
|
||||
if log.ID != last+1 {
|
||||
return ErrStoreLogID
|
||||
}
|
||||
|
||||
last = log.ID
|
||||
key := num.Uint64ToBytes(log.ID)
|
||||
|
||||
if err := log.Encode(&s.buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = s.db.Put(key, s.buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.last = last
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) PurgeExpired(n int64) error {
|
||||
if n <= 0 {
|
||||
return fmt.Errorf("invalid expired time %d", n)
|
||||
}
|
||||
|
||||
t := uint32(time.Now().Unix() - int64(n))
|
||||
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
s.reset()
|
||||
|
||||
it := s.db.NewIterator()
|
||||
it.SeekToFirst()
|
||||
|
||||
w := s.db.NewWriteBatch()
|
||||
defer w.Rollback()
|
||||
|
||||
l := new(Log)
|
||||
for ; it.Valid(); it.Next() {
|
||||
v := it.RawValue()
|
||||
|
||||
if err := l.Unmarshal(v); err != nil {
|
||||
return err
|
||||
} else if l.CreateTime > t {
|
||||
break
|
||||
} else {
|
||||
w.Delete(it.RawKey())
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) Sync() error {
|
||||
//no other way for sync, so ignore here
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) reset() {
|
||||
s.first = InvalidLogID
|
||||
s.last = InvalidLogID
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) Clear() error {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
if s.db != nil {
|
||||
s.db.Close()
|
||||
}
|
||||
|
||||
s.reset()
|
||||
os.RemoveAll(s.cfg.DBPath)
|
||||
|
||||
return s.open()
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) Close() error {
|
||||
s.m.Lock()
|
||||
defer s.m.Unlock()
|
||||
|
||||
if s.db == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := s.db.Close()
|
||||
s.db = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *GoLevelDBStore) open() error {
|
||||
var err error
|
||||
|
||||
s.first = InvalidLogID
|
||||
s.last = InvalidLogID
|
||||
|
||||
s.db, err = store.Open(s.cfg)
|
||||
return err
|
||||
}
|
||||
|
||||
func NewGoLevelDBStore(base string, syncLog int) (*GoLevelDBStore, error) {
|
||||
cfg := config.NewConfigDefault()
|
||||
cfg.DBName = "goleveldb"
|
||||
cfg.DBPath = base
|
||||
cfg.LevelDB.BlockSize = 16 * 1024 * 1024
|
||||
cfg.LevelDB.CacheSize = 64 * 1024 * 1024
|
||||
cfg.LevelDB.WriteBufferSize = 64 * 1024 * 1024
|
||||
cfg.LevelDB.Compression = false
|
||||
cfg.DBSyncCommit = syncLog
|
||||
|
||||
s := new(GoLevelDBStore)
|
||||
s.cfg = cfg
|
||||
|
||||
if err := s.open(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
167
vendor/github.com/siddontang/ledisdb/rpl/log.go
generated
vendored
167
vendor/github.com/siddontang/ledisdb/rpl/log.go
generated
vendored
@ -1,167 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const LogHeadSize = 17
|
||||
|
||||
type Log struct {
|
||||
ID uint64
|
||||
CreateTime uint32
|
||||
Compression uint8
|
||||
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (l *Log) HeadSize() int {
|
||||
return LogHeadSize
|
||||
}
|
||||
|
||||
func (l *Log) Size() int {
|
||||
return l.HeadSize() + len(l.Data)
|
||||
}
|
||||
|
||||
func (l *Log) Marshal() ([]byte, error) {
|
||||
buf := bytes.NewBuffer(make([]byte, l.Size()))
|
||||
buf.Reset()
|
||||
|
||||
if err := l.Encode(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (l *Log) Unmarshal(b []byte) error {
|
||||
buf := bytes.NewBuffer(b)
|
||||
|
||||
return l.Decode(buf)
|
||||
}
|
||||
|
||||
var headPool = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, LogHeadSize) },
|
||||
}
|
||||
|
||||
func (l *Log) Encode(w io.Writer) error {
|
||||
b := headPool.Get().([]byte)
|
||||
pos := 0
|
||||
|
||||
binary.BigEndian.PutUint64(b[pos:], l.ID)
|
||||
pos += 8
|
||||
binary.BigEndian.PutUint32(b[pos:], uint32(l.CreateTime))
|
||||
pos += 4
|
||||
b[pos] = l.Compression
|
||||
pos++
|
||||
binary.BigEndian.PutUint32(b[pos:], uint32(len(l.Data)))
|
||||
|
||||
n, err := w.Write(b)
|
||||
headPool.Put(b)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n != LogHeadSize {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
|
||||
if n, err = w.Write(l.Data); err != nil {
|
||||
return err
|
||||
} else if n != len(l.Data) {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Log) Decode(r io.Reader) error {
|
||||
length, err := l.DecodeHead(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.growData(int(length))
|
||||
|
||||
if _, err := io.ReadFull(r, l.Data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Log) DecodeHead(r io.Reader) (uint32, error) {
|
||||
buf := headPool.Get().([]byte)
|
||||
|
||||
if _, err := io.ReadFull(r, buf); err != nil {
|
||||
headPool.Put(buf)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
length := l.decodeHeadBuf(buf)
|
||||
|
||||
headPool.Put(buf)
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (l *Log) DecodeAt(r io.ReaderAt, pos int64) error {
|
||||
length, err := l.DecodeHeadAt(r, pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.growData(int(length))
|
||||
var n int
|
||||
n, err = r.ReadAt(l.Data, pos+int64(LogHeadSize))
|
||||
if err == io.EOF && n == len(l.Data) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *Log) growData(length int) {
|
||||
l.Data = l.Data[0:0]
|
||||
|
||||
if cap(l.Data) >= length {
|
||||
l.Data = l.Data[0:length]
|
||||
} else {
|
||||
l.Data = make([]byte, length)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Log) DecodeHeadAt(r io.ReaderAt, pos int64) (uint32, error) {
|
||||
buf := headPool.Get().([]byte)
|
||||
|
||||
n, err := r.ReadAt(buf, pos)
|
||||
if err != nil && err != io.EOF {
|
||||
headPool.Put(buf)
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
length := l.decodeHeadBuf(buf)
|
||||
headPool.Put(buf)
|
||||
|
||||
if err == io.EOF && (length != 0 || n != len(buf)) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
func (l *Log) decodeHeadBuf(buf []byte) uint32 {
|
||||
pos := 0
|
||||
l.ID = binary.BigEndian.Uint64(buf[pos:])
|
||||
pos += 8
|
||||
|
||||
l.CreateTime = binary.BigEndian.Uint32(buf[pos:])
|
||||
pos += 4
|
||||
|
||||
l.Compression = uint8(buf[pos])
|
||||
pos++
|
||||
|
||||
length := binary.BigEndian.Uint32(buf[pos:])
|
||||
return length
|
||||
}
|
336
vendor/github.com/siddontang/ledisdb/rpl/rpl.go
generated
vendored
336
vendor/github.com/siddontang/ledisdb/rpl/rpl.go
generated
vendored
@ -1,336 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/go/log"
|
||||
"github.com/siddontang/go/snappy"
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
)
|
||||
|
||||
type Stat struct {
|
||||
FirstID uint64
|
||||
LastID uint64
|
||||
CommitID uint64
|
||||
}
|
||||
|
||||
type Replication struct {
|
||||
m sync.Mutex
|
||||
|
||||
cfg *config.Config
|
||||
|
||||
s LogStore
|
||||
|
||||
commitID uint64
|
||||
commitLog *os.File
|
||||
|
||||
quit chan struct{}
|
||||
|
||||
wg sync.WaitGroup
|
||||
|
||||
nc chan struct{}
|
||||
|
||||
ncm sync.Mutex
|
||||
}
|
||||
|
||||
func NewReplication(cfg *config.Config) (*Replication, error) {
|
||||
if len(cfg.Replication.Path) == 0 {
|
||||
cfg.Replication.Path = path.Join(cfg.DataDir, "rpl")
|
||||
}
|
||||
|
||||
base := cfg.Replication.Path
|
||||
|
||||
r := new(Replication)
|
||||
|
||||
r.quit = make(chan struct{})
|
||||
r.nc = make(chan struct{})
|
||||
|
||||
r.cfg = cfg
|
||||
|
||||
var err error
|
||||
|
||||
switch cfg.Replication.StoreName {
|
||||
case "goleveldb":
|
||||
if r.s, err = NewGoLevelDBStore(path.Join(base, "wal"), cfg.Replication.SyncLog); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
if r.s, err = NewFileStore(path.Join(base, "ldb"), cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if r.commitLog, err = os.OpenFile(path.Join(base, "commit.log"), os.O_RDWR|os.O_CREATE, 0644); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s, _ := r.commitLog.Stat(); s.Size() == 0 {
|
||||
r.commitID = 0
|
||||
} else if err = binary.Read(r.commitLog, binary.BigEndian, &r.commitID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("staring replication with commit ID %d", r.commitID)
|
||||
|
||||
r.wg.Add(1)
|
||||
go r.run()
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *Replication) Close() error {
|
||||
close(r.quit)
|
||||
|
||||
r.wg.Wait()
|
||||
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
||||
log.Infof("closing replication with commit ID %d", r.commitID)
|
||||
|
||||
if r.s != nil {
|
||||
r.s.Close()
|
||||
r.s = nil
|
||||
}
|
||||
|
||||
if err := r.updateCommitID(r.commitID, true); err != nil {
|
||||
log.Errorf("update commit id err %s", err.Error())
|
||||
}
|
||||
|
||||
if r.commitLog != nil {
|
||||
r.commitLog.Close()
|
||||
r.commitLog = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Replication) Log(data []byte) (*Log, error) {
|
||||
if r.cfg.Replication.Compression {
|
||||
//todo optimize
|
||||
var err error
|
||||
if data, err = snappy.Encode(nil, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r.m.Lock()
|
||||
|
||||
lastID, err := r.s.LastID()
|
||||
if err != nil {
|
||||
r.m.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commitId := r.commitID
|
||||
if lastID < commitId {
|
||||
lastID = commitId
|
||||
} else if lastID > commitId {
|
||||
r.m.Unlock()
|
||||
return nil, ErrCommitIDBehind
|
||||
}
|
||||
|
||||
l := new(Log)
|
||||
l.ID = lastID + 1
|
||||
l.CreateTime = uint32(time.Now().Unix())
|
||||
|
||||
if r.cfg.Replication.Compression {
|
||||
l.Compression = 1
|
||||
} else {
|
||||
l.Compression = 0
|
||||
}
|
||||
|
||||
l.Data = data
|
||||
|
||||
if err = r.s.StoreLog(l); err != nil {
|
||||
r.m.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.m.Unlock()
|
||||
|
||||
r.ncm.Lock()
|
||||
close(r.nc)
|
||||
r.nc = make(chan struct{})
|
||||
r.ncm.Unlock()
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (r *Replication) WaitLog() <-chan struct{} {
|
||||
r.ncm.Lock()
|
||||
ch := r.nc
|
||||
r.ncm.Unlock()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (r *Replication) StoreLog(log *Log) error {
|
||||
r.m.Lock()
|
||||
err := r.s.StoreLog(log)
|
||||
r.m.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Replication) FirstLogID() (uint64, error) {
|
||||
r.m.Lock()
|
||||
id, err := r.s.FirstID()
|
||||
r.m.Unlock()
|
||||
|
||||
return id, err
|
||||
}
|
||||
|
||||
func (r *Replication) LastLogID() (uint64, error) {
|
||||
r.m.Lock()
|
||||
id, err := r.s.LastID()
|
||||
r.m.Unlock()
|
||||
return id, err
|
||||
}
|
||||
|
||||
func (r *Replication) LastCommitID() (uint64, error) {
|
||||
r.m.Lock()
|
||||
id := r.commitID
|
||||
r.m.Unlock()
|
||||
return id, nil
|
||||
}
|
||||
|
||||
func (r *Replication) UpdateCommitID(id uint64) error {
|
||||
r.m.Lock()
|
||||
err := r.updateCommitID(id, r.cfg.Replication.SyncLog == 2)
|
||||
r.m.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Replication) Stat() (*Stat, error) {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
||||
s := &Stat{}
|
||||
var err error
|
||||
|
||||
if s.FirstID, err = r.s.FirstID(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if s.LastID, err = r.s.LastID(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.CommitID = r.commitID
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (r *Replication) updateCommitID(id uint64, force bool) error {
|
||||
if force {
|
||||
if _, err := r.commitLog.Seek(0, os.SEEK_SET); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := binary.Write(r.commitLog, binary.BigEndian, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r.commitID = id
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Replication) CommitIDBehind() (bool, error) {
|
||||
r.m.Lock()
|
||||
|
||||
id, err := r.s.LastID()
|
||||
if err != nil {
|
||||
r.m.Unlock()
|
||||
return false, err
|
||||
}
|
||||
|
||||
behind := id > r.commitID
|
||||
r.m.Unlock()
|
||||
|
||||
return behind, nil
|
||||
}
|
||||
|
||||
func (r *Replication) GetLog(id uint64, log *Log) error {
|
||||
return r.s.GetLog(id, log)
|
||||
}
|
||||
|
||||
func (r *Replication) NextNeedCommitLog(log *Log) error {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
||||
id, err := r.s.LastID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if id <= r.commitID {
|
||||
return ErrNoBehindLog
|
||||
}
|
||||
|
||||
return r.s.GetLog(r.commitID+1, log)
|
||||
|
||||
}
|
||||
|
||||
func (r *Replication) Clear() error {
|
||||
return r.ClearWithCommitID(0)
|
||||
}
|
||||
|
||||
func (r *Replication) ClearWithCommitID(id uint64) error {
|
||||
r.m.Lock()
|
||||
defer r.m.Unlock()
|
||||
|
||||
if err := r.s.Clear(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.updateCommitID(id, true)
|
||||
}
|
||||
|
||||
func (r *Replication) run() {
|
||||
defer r.wg.Done()
|
||||
|
||||
syncTc := time.NewTicker(1 * time.Second)
|
||||
purgeTc := time.NewTicker(1 * time.Hour)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-purgeTc.C:
|
||||
n := (r.cfg.Replication.ExpiredLogDays * 24 * 3600)
|
||||
r.m.Lock()
|
||||
err := r.s.PurgeExpired(int64(n))
|
||||
r.m.Unlock()
|
||||
if err != nil {
|
||||
log.Errorf("purge expired log error %s", err.Error())
|
||||
}
|
||||
case <-syncTc.C:
|
||||
if r.cfg.Replication.SyncLog == 1 {
|
||||
r.m.Lock()
|
||||
err := r.s.Sync()
|
||||
r.m.Unlock()
|
||||
if err != nil {
|
||||
log.Errorf("sync store error %s", err.Error())
|
||||
}
|
||||
}
|
||||
if r.cfg.Replication.SyncLog != 2 {
|
||||
//we will sync commit id every 1 second
|
||||
r.m.Lock()
|
||||
err := r.updateCommitID(r.commitID, true)
|
||||
r.m.Unlock()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("sync commitid error %s", err.Error())
|
||||
}
|
||||
}
|
||||
case <-r.quit:
|
||||
syncTc.Stop()
|
||||
purgeTc.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
36
vendor/github.com/siddontang/ledisdb/rpl/store.go
generated
vendored
36
vendor/github.com/siddontang/ledisdb/rpl/store.go
generated
vendored
@ -1,36 +0,0 @@
|
||||
package rpl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
InvalidLogID uint64 = 0
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLogNotFound = errors.New("log not found")
|
||||
ErrStoreLogID = errors.New("log id is less")
|
||||
ErrNoBehindLog = errors.New("no behind commit log")
|
||||
ErrCommitIDBehind = errors.New("commit id is behind last log id")
|
||||
)
|
||||
|
||||
type LogStore interface {
|
||||
GetLog(id uint64, log *Log) error
|
||||
|
||||
FirstID() (uint64, error)
|
||||
LastID() (uint64, error)
|
||||
|
||||
// if log id is less than current last id, return error
|
||||
StoreLog(log *Log) error
|
||||
|
||||
// Delete logs before n seconds
|
||||
PurgeExpired(n int64) error
|
||||
|
||||
Sync() error
|
||||
|
||||
// Clear all logs
|
||||
Clear() error
|
||||
|
||||
Close() error
|
||||
}
|
169
vendor/github.com/siddontang/ledisdb/store/db.go
generated
vendored
169
vendor/github.com/siddontang/ledisdb/store/db.go
generated
vendored
@ -1,169 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
type DB struct {
|
||||
db driver.IDB
|
||||
name string
|
||||
|
||||
st *Stat
|
||||
|
||||
cfg *config.Config
|
||||
|
||||
lastCommit time.Time
|
||||
|
||||
m sync.Mutex
|
||||
}
|
||||
|
||||
func (db *DB) Close() error {
|
||||
return db.db.Close()
|
||||
}
|
||||
|
||||
func (db *DB) String() string {
|
||||
return db.name
|
||||
}
|
||||
|
||||
func (db *DB) NewIterator() *Iterator {
|
||||
db.st.IterNum.Add(1)
|
||||
|
||||
it := new(Iterator)
|
||||
it.it = db.db.NewIterator()
|
||||
it.st = db.st
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (db *DB) Get(key []byte) ([]byte, error) {
|
||||
t := time.Now()
|
||||
v, err := db.db.Get(key)
|
||||
db.st.statGet(v, err)
|
||||
db.st.GetTotalTime.Add(time.Now().Sub(t))
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (db *DB) Put(key []byte, value []byte) error {
|
||||
db.st.PutNum.Add(1)
|
||||
|
||||
if db.needSyncCommit() {
|
||||
return db.db.SyncPut(key, value)
|
||||
|
||||
} else {
|
||||
return db.db.Put(key, value)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) Delete(key []byte) error {
|
||||
db.st.DeleteNum.Add(1)
|
||||
|
||||
if db.needSyncCommit() {
|
||||
return db.db.SyncDelete(key)
|
||||
} else {
|
||||
return db.db.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) NewWriteBatch() *WriteBatch {
|
||||
db.st.BatchNum.Add(1)
|
||||
wb := new(WriteBatch)
|
||||
wb.wb = db.db.NewWriteBatch()
|
||||
wb.st = db.st
|
||||
wb.db = db
|
||||
return wb
|
||||
}
|
||||
|
||||
func (db *DB) NewSnapshot() (*Snapshot, error) {
|
||||
db.st.SnapshotNum.Add(1)
|
||||
|
||||
var err error
|
||||
s := &Snapshot{}
|
||||
if s.ISnapshot, err = db.db.NewSnapshot(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.st = db.st
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (db *DB) Compact() error {
|
||||
db.st.CompactNum.Add(1)
|
||||
|
||||
t := time.Now()
|
||||
err := db.db.Compact()
|
||||
|
||||
db.st.CompactTotalTime.Add(time.Now().Sub(t))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
|
||||
return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
|
||||
}
|
||||
|
||||
func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator {
|
||||
return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1})
|
||||
}
|
||||
|
||||
//count < 0, unlimit.
|
||||
//
|
||||
//offset must >= 0, if < 0, will get nothing.
|
||||
func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
|
||||
return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
|
||||
}
|
||||
|
||||
//count < 0, unlimit.
|
||||
//
|
||||
//offset must >= 0, if < 0, will get nothing.
|
||||
func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator {
|
||||
return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count})
|
||||
}
|
||||
|
||||
func (db *DB) Stat() *Stat {
|
||||
return db.st
|
||||
}
|
||||
|
||||
func (db *DB) needSyncCommit() bool {
|
||||
if db.cfg.DBSyncCommit == 0 {
|
||||
return false
|
||||
} else if db.cfg.DBSyncCommit == 2 {
|
||||
return true
|
||||
} else {
|
||||
n := time.Now()
|
||||
need := false
|
||||
db.m.Lock()
|
||||
|
||||
if n.Sub(db.lastCommit) > time.Second {
|
||||
need = true
|
||||
}
|
||||
db.lastCommit = n
|
||||
|
||||
db.m.Unlock()
|
||||
return need
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (db *DB) GetSlice(key []byte) (Slice, error) {
|
||||
if d, ok := db.db.(driver.ISliceGeter); ok {
|
||||
t := time.Now()
|
||||
v, err := d.GetSlice(key)
|
||||
db.st.statGet(v, err)
|
||||
db.st.GetTotalTime.Add(time.Now().Sub(t))
|
||||
return v, err
|
||||
} else {
|
||||
v, err := db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if v == nil {
|
||||
return nil, nil
|
||||
} else {
|
||||
return driver.GoSlice(v), nil
|
||||
}
|
||||
}
|
||||
}
|
57
vendor/github.com/siddontang/ledisdb/store/driver/driver.go
generated
vendored
57
vendor/github.com/siddontang/ledisdb/store/driver/driver.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
package driver
|
||||
|
||||
type IDB interface {
|
||||
Close() error
|
||||
|
||||
Get(key []byte) ([]byte, error)
|
||||
|
||||
Put(key []byte, value []byte) error
|
||||
Delete(key []byte) error
|
||||
|
||||
SyncPut(key []byte, value []byte) error
|
||||
SyncDelete(key []byte) error
|
||||
|
||||
NewIterator() IIterator
|
||||
|
||||
NewWriteBatch() IWriteBatch
|
||||
|
||||
NewSnapshot() (ISnapshot, error)
|
||||
|
||||
Compact() error
|
||||
}
|
||||
|
||||
type ISnapshot interface {
|
||||
Get(key []byte) ([]byte, error)
|
||||
NewIterator() IIterator
|
||||
Close()
|
||||
}
|
||||
|
||||
type IIterator interface {
|
||||
Close() error
|
||||
|
||||
First()
|
||||
Last()
|
||||
Seek(key []byte)
|
||||
|
||||
Next()
|
||||
Prev()
|
||||
|
||||
Valid() bool
|
||||
|
||||
Key() []byte
|
||||
Value() []byte
|
||||
}
|
||||
|
||||
type IWriteBatch interface {
|
||||
Put(key []byte, value []byte)
|
||||
Delete(key []byte)
|
||||
Commit() error
|
||||
SyncCommit() error
|
||||
Rollback() error
|
||||
Data() []byte
|
||||
Close()
|
||||
}
|
||||
|
||||
type ISliceGeter interface {
|
||||
GetSlice(key []byte) (ISlice, error)
|
||||
}
|
21
vendor/github.com/siddontang/ledisdb/store/driver/slice.go
generated
vendored
21
vendor/github.com/siddontang/ledisdb/store/driver/slice.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
package driver
|
||||
|
||||
type ISlice interface {
|
||||
Data() []byte
|
||||
Size() int
|
||||
Free()
|
||||
}
|
||||
|
||||
type GoSlice []byte
|
||||
|
||||
func (s GoSlice) Data() []byte {
|
||||
return []byte(s)
|
||||
}
|
||||
|
||||
func (s GoSlice) Size() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s GoSlice) Free() {
|
||||
|
||||
}
|
46
vendor/github.com/siddontang/ledisdb/store/driver/store.go
generated
vendored
46
vendor/github.com/siddontang/ledisdb/store/driver/store.go
generated
vendored
@ -1,46 +0,0 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
String() string
|
||||
Open(path string, cfg *config.Config) (IDB, error)
|
||||
Repair(path string, cfg *config.Config) error
|
||||
}
|
||||
|
||||
var dbs = map[string]Store{}
|
||||
|
||||
func Register(s Store) {
|
||||
name := s.String()
|
||||
if _, ok := dbs[name]; ok {
|
||||
panic(fmt.Errorf("store %s is registered", s))
|
||||
}
|
||||
|
||||
dbs[name] = s
|
||||
}
|
||||
|
||||
func ListStores() []string {
|
||||
s := []string{}
|
||||
for k := range dbs {
|
||||
s = append(s, k)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func GetStore(cfg *config.Config) (Store, error) {
|
||||
if len(cfg.DBName) == 0 {
|
||||
cfg.DBName = config.DefaultDBName
|
||||
}
|
||||
|
||||
s, ok := dbs[cfg.DBName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("store %s is not registered", cfg.DBName)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
39
vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go
generated
vendored
39
vendor/github.com/siddontang/ledisdb/store/goleveldb/batch.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
package goleveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type WriteBatch struct {
|
||||
db *DB
|
||||
wbatch *leveldb.Batch
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Put(key, value []byte) {
|
||||
w.wbatch.Put(key, value)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Delete(key []byte) {
|
||||
w.wbatch.Delete(key)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Commit() error {
|
||||
return w.db.db.Write(w.wbatch, nil)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) SyncCommit() error {
|
||||
return w.db.db.Write(w.wbatch, w.db.syncOpts)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Rollback() error {
|
||||
w.wbatch.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Close() {
|
||||
w.wbatch.Reset()
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Data() []byte {
|
||||
return w.wbatch.Dump()
|
||||
}
|
4
vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go
generated
vendored
4
vendor/github.com/siddontang/ledisdb/store/goleveldb/const.go
generated
vendored
@ -1,4 +0,0 @@
|
||||
package goleveldb
|
||||
|
||||
const DBName = "goleveldb"
|
||||
const MemDBName = "memory"
|
204
vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go
generated
vendored
204
vendor/github.com/siddontang/ledisdb/store/goleveldb/db.go
generated
vendored
@ -1,204 +0,0 @@
|
||||
package goleveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/cache"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
|
||||
"os"
|
||||
)
|
||||
|
||||
const defaultFilterBits int = 10
|
||||
|
||||
type Store struct {
|
||||
}
|
||||
|
||||
func (s Store) String() string {
|
||||
return DBName
|
||||
}
|
||||
|
||||
type MemStore struct {
|
||||
}
|
||||
|
||||
func (s MemStore) String() string {
|
||||
return MemDBName
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
path string
|
||||
|
||||
cfg *config.LevelDBConfig
|
||||
|
||||
db *leveldb.DB
|
||||
|
||||
opts *opt.Options
|
||||
|
||||
iteratorOpts *opt.ReadOptions
|
||||
|
||||
syncOpts *opt.WriteOptions
|
||||
|
||||
cache cache.Cache
|
||||
|
||||
filter filter.Filter
|
||||
}
|
||||
|
||||
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := new(DB)
|
||||
db.path = path
|
||||
db.cfg = &cfg.LevelDB
|
||||
|
||||
db.initOpts()
|
||||
|
||||
var err error
|
||||
db.db, err = leveldb.OpenFile(db.path, db.opts)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s Store) Repair(path string, cfg *config.Config) error {
|
||||
db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) {
|
||||
db := new(DB)
|
||||
db.path = path
|
||||
db.cfg = &cfg.LevelDB
|
||||
|
||||
db.initOpts()
|
||||
|
||||
var err error
|
||||
db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s MemStore) Repair(path string, cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) initOpts() {
|
||||
db.opts = newOptions(db.cfg)
|
||||
|
||||
db.iteratorOpts = &opt.ReadOptions{}
|
||||
db.iteratorOpts.DontFillCache = true
|
||||
|
||||
db.syncOpts = &opt.WriteOptions{}
|
||||
db.syncOpts.Sync = true
|
||||
}
|
||||
|
||||
func newOptions(cfg *config.LevelDBConfig) *opt.Options {
|
||||
opts := &opt.Options{}
|
||||
opts.ErrorIfMissing = false
|
||||
|
||||
opts.BlockCacheCapacity = cfg.CacheSize
|
||||
|
||||
//we must use bloomfilter
|
||||
opts.Filter = filter.NewBloomFilter(defaultFilterBits)
|
||||
|
||||
if !cfg.Compression {
|
||||
opts.Compression = opt.NoCompression
|
||||
} else {
|
||||
opts.Compression = opt.SnappyCompression
|
||||
}
|
||||
|
||||
opts.BlockSize = cfg.BlockSize
|
||||
opts.WriteBuffer = cfg.WriteBufferSize
|
||||
opts.OpenFilesCacheCapacity = cfg.MaxOpenFiles
|
||||
|
||||
//here we use default value, later add config support
|
||||
opts.CompactionTableSize = 32 * 1024 * 1024
|
||||
opts.WriteL0SlowdownTrigger = 16
|
||||
opts.WriteL0PauseTrigger = 64
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func (db *DB) Close() error {
|
||||
return db.db.Close()
|
||||
}
|
||||
|
||||
func (db *DB) Put(key, value []byte) error {
|
||||
return db.db.Put(key, value, nil)
|
||||
}
|
||||
|
||||
func (db *DB) Get(key []byte) ([]byte, error) {
|
||||
v, err := db.db.Get(key, nil)
|
||||
if err == leveldb.ErrNotFound {
|
||||
return nil, nil
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func (db *DB) Delete(key []byte) error {
|
||||
return db.db.Delete(key, nil)
|
||||
}
|
||||
|
||||
func (db *DB) SyncPut(key []byte, value []byte) error {
|
||||
return db.db.Put(key, value, db.syncOpts)
|
||||
}
|
||||
|
||||
func (db *DB) SyncDelete(key []byte) error {
|
||||
return db.db.Delete(key, db.syncOpts)
|
||||
}
|
||||
|
||||
func (db *DB) NewWriteBatch() driver.IWriteBatch {
|
||||
wb := &WriteBatch{
|
||||
db: db,
|
||||
wbatch: new(leveldb.Batch),
|
||||
}
|
||||
return wb
|
||||
}
|
||||
|
||||
func (db *DB) NewIterator() driver.IIterator {
|
||||
it := &Iterator{
|
||||
db.db.NewIterator(nil, db.iteratorOpts),
|
||||
}
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
|
||||
snapshot, err := db.db.GetSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &Snapshot{
|
||||
db: db,
|
||||
snp: snapshot,
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (db *DB) Compact() error {
|
||||
return db.db.CompactRange(util.Range{nil, nil})
|
||||
}
|
||||
|
||||
func init() {
|
||||
driver.Register(Store{})
|
||||
driver.Register(MemStore{})
|
||||
}
|
49
vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go
generated
vendored
49
vendor/github.com/siddontang/ledisdb/store/goleveldb/iterator.go
generated
vendored
@ -1,49 +0,0 @@
|
||||
package goleveldb
|
||||
|
||||
import (
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
)
|
||||
|
||||
type Iterator struct {
|
||||
it iterator.Iterator
|
||||
}
|
||||
|
||||
func (it *Iterator) Key() []byte {
|
||||
return it.it.Key()
|
||||
}
|
||||
|
||||
func (it *Iterator) Value() []byte {
|
||||
return it.it.Value()
|
||||
}
|
||||
|
||||
func (it *Iterator) Close() error {
|
||||
if it.it != nil {
|
||||
it.it.Release()
|
||||
it.it = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (it *Iterator) Valid() bool {
|
||||
return it.it.Valid()
|
||||
}
|
||||
|
||||
func (it *Iterator) Next() {
|
||||
it.it.Next()
|
||||
}
|
||||
|
||||
func (it *Iterator) Prev() {
|
||||
it.it.Prev()
|
||||
}
|
||||
|
||||
func (it *Iterator) First() {
|
||||
it.it.First()
|
||||
}
|
||||
|
||||
func (it *Iterator) Last() {
|
||||
it.it.Last()
|
||||
}
|
||||
|
||||
func (it *Iterator) Seek(key []byte) {
|
||||
it.it.Seek(key)
|
||||
}
|
26
vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go
generated
vendored
26
vendor/github.com/siddontang/ledisdb/store/goleveldb/snapshot.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package goleveldb
|
||||
|
||||
import (
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type Snapshot struct {
|
||||
db *DB
|
||||
snp *leveldb.Snapshot
|
||||
}
|
||||
|
||||
func (s *Snapshot) Get(key []byte) ([]byte, error) {
|
||||
return s.snp.Get(key, s.db.iteratorOpts)
|
||||
}
|
||||
|
||||
func (s *Snapshot) NewIterator() driver.IIterator {
|
||||
it := &Iterator{
|
||||
s.snp.NewIterator(nil, s.db.iteratorOpts),
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (s *Snapshot) Close() {
|
||||
s.snp.Release()
|
||||
}
|
334
vendor/github.com/siddontang/ledisdb/store/iterator.go
generated
vendored
334
vendor/github.com/siddontang/ledisdb/store/iterator.go
generated
vendored
@ -1,334 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
const (
|
||||
IteratorForward uint8 = 0
|
||||
IteratorBackward uint8 = 1
|
||||
)
|
||||
|
||||
const (
|
||||
RangeClose uint8 = 0x00
|
||||
RangeLOpen uint8 = 0x01
|
||||
RangeROpen uint8 = 0x10
|
||||
RangeOpen uint8 = 0x11
|
||||
)
|
||||
|
||||
// min must less or equal than max
|
||||
//
|
||||
// range type:
|
||||
//
|
||||
// close: [min, max]
|
||||
// open: (min, max)
|
||||
// lopen: (min, max]
|
||||
// ropen: [min, max)
|
||||
//
|
||||
type Range struct {
|
||||
Min []byte
|
||||
Max []byte
|
||||
|
||||
Type uint8
|
||||
}
|
||||
|
||||
type Limit struct {
|
||||
Offset int
|
||||
Count int
|
||||
}
|
||||
|
||||
type Iterator struct {
|
||||
it driver.IIterator
|
||||
st *Stat
|
||||
}
|
||||
|
||||
// Returns a copy of key.
|
||||
func (it *Iterator) Key() []byte {
|
||||
k := it.it.Key()
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return append([]byte{}, k...)
|
||||
}
|
||||
|
||||
// Returns a copy of value.
|
||||
func (it *Iterator) Value() []byte {
|
||||
v := it.it.Value()
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return append([]byte{}, v...)
|
||||
}
|
||||
|
||||
// Returns a reference of key.
|
||||
// you must be careful that it will be changed after next iterate.
|
||||
func (it *Iterator) RawKey() []byte {
|
||||
return it.it.Key()
|
||||
}
|
||||
|
||||
// Returns a reference of value.
|
||||
// you must be careful that it will be changed after next iterate.
|
||||
func (it *Iterator) RawValue() []byte {
|
||||
return it.it.Value()
|
||||
}
|
||||
|
||||
// Copy key to b, if b len is small or nil, returns a new one.
|
||||
func (it *Iterator) BufKey(b []byte) []byte {
|
||||
k := it.RawKey()
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
if b == nil {
|
||||
b = []byte{}
|
||||
}
|
||||
|
||||
b = b[0:0]
|
||||
return append(b, k...)
|
||||
}
|
||||
|
||||
// Copy value to b, if b len is small or nil, returns a new one.
|
||||
func (it *Iterator) BufValue(b []byte) []byte {
|
||||
v := it.RawValue()
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if b == nil {
|
||||
b = []byte{}
|
||||
}
|
||||
|
||||
b = b[0:0]
|
||||
return append(b, v...)
|
||||
}
|
||||
|
||||
func (it *Iterator) Close() {
|
||||
if it.it != nil {
|
||||
it.st.IterCloseNum.Add(1)
|
||||
it.it.Close()
|
||||
it.it = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (it *Iterator) Valid() bool {
|
||||
return it.it.Valid()
|
||||
}
|
||||
|
||||
func (it *Iterator) Next() {
|
||||
it.st.IterSeekNum.Add(1)
|
||||
it.it.Next()
|
||||
}
|
||||
|
||||
func (it *Iterator) Prev() {
|
||||
it.st.IterSeekNum.Add(1)
|
||||
it.it.Prev()
|
||||
}
|
||||
|
||||
func (it *Iterator) SeekToFirst() {
|
||||
it.st.IterSeekNum.Add(1)
|
||||
it.it.First()
|
||||
}
|
||||
|
||||
func (it *Iterator) SeekToLast() {
|
||||
it.st.IterSeekNum.Add(1)
|
||||
it.it.Last()
|
||||
}
|
||||
|
||||
func (it *Iterator) Seek(key []byte) {
|
||||
it.st.IterSeekNum.Add(1)
|
||||
it.it.Seek(key)
|
||||
}
|
||||
|
||||
// Finds by key, if not found, nil returns.
|
||||
func (it *Iterator) Find(key []byte) []byte {
|
||||
it.Seek(key)
|
||||
if it.Valid() {
|
||||
k := it.RawKey()
|
||||
if k == nil {
|
||||
return nil
|
||||
} else if bytes.Equal(k, key) {
|
||||
return it.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finds by key, if not found, nil returns, else a reference of value returns.
|
||||
// you must be careful that it will be changed after next iterate.
|
||||
func (it *Iterator) RawFind(key []byte) []byte {
|
||||
it.Seek(key)
|
||||
if it.Valid() {
|
||||
k := it.RawKey()
|
||||
if k == nil {
|
||||
return nil
|
||||
} else if bytes.Equal(k, key) {
|
||||
return it.RawValue()
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type RangeLimitIterator struct {
|
||||
it *Iterator
|
||||
|
||||
r *Range
|
||||
l *Limit
|
||||
|
||||
step int
|
||||
|
||||
//0 for IteratorForward, 1 for IteratorBackward
|
||||
direction uint8
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) Key() []byte {
|
||||
return it.it.Key()
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) Value() []byte {
|
||||
return it.it.Value()
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) RawKey() []byte {
|
||||
return it.it.RawKey()
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) RawValue() []byte {
|
||||
return it.it.RawValue()
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) BufKey(b []byte) []byte {
|
||||
return it.it.BufKey(b)
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) BufValue(b []byte) []byte {
|
||||
return it.it.BufValue(b)
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) Valid() bool {
|
||||
if it.l.Offset < 0 {
|
||||
return false
|
||||
} else if !it.it.Valid() {
|
||||
return false
|
||||
} else if it.l.Count >= 0 && it.step >= it.l.Count {
|
||||
return false
|
||||
}
|
||||
|
||||
if it.direction == IteratorForward {
|
||||
if it.r.Max != nil {
|
||||
r := bytes.Compare(it.it.RawKey(), it.r.Max)
|
||||
if it.r.Type&RangeROpen > 0 {
|
||||
return !(r >= 0)
|
||||
} else {
|
||||
return !(r > 0)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if it.r.Min != nil {
|
||||
r := bytes.Compare(it.it.RawKey(), it.r.Min)
|
||||
if it.r.Type&RangeLOpen > 0 {
|
||||
return !(r <= 0)
|
||||
} else {
|
||||
return !(r < 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) Next() {
|
||||
it.step++
|
||||
|
||||
if it.direction == IteratorForward {
|
||||
it.it.Next()
|
||||
} else {
|
||||
it.it.Prev()
|
||||
}
|
||||
}
|
||||
|
||||
func (it *RangeLimitIterator) Close() {
|
||||
it.it.Close()
|
||||
}
|
||||
|
||||
func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator {
|
||||
return rangeLimitIterator(i, r, l, IteratorForward)
|
||||
}
|
||||
|
||||
func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator {
|
||||
return rangeLimitIterator(i, r, l, IteratorBackward)
|
||||
}
|
||||
|
||||
func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator {
|
||||
return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward)
|
||||
}
|
||||
|
||||
func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator {
|
||||
return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward)
|
||||
}
|
||||
|
||||
func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator {
|
||||
it := new(RangeLimitIterator)
|
||||
|
||||
it.it = i
|
||||
|
||||
it.r = r
|
||||
it.l = l
|
||||
it.direction = direction
|
||||
|
||||
it.step = 0
|
||||
|
||||
if l.Offset < 0 {
|
||||
return it
|
||||
}
|
||||
|
||||
if direction == IteratorForward {
|
||||
if r.Min == nil {
|
||||
it.it.SeekToFirst()
|
||||
} else {
|
||||
it.it.Seek(r.Min)
|
||||
|
||||
if r.Type&RangeLOpen > 0 {
|
||||
if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) {
|
||||
it.it.Next()
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if r.Max == nil {
|
||||
it.it.SeekToLast()
|
||||
} else {
|
||||
it.it.Seek(r.Max)
|
||||
|
||||
if !it.it.Valid() {
|
||||
it.it.SeekToLast()
|
||||
} else {
|
||||
if !bytes.Equal(it.it.RawKey(), r.Max) {
|
||||
it.it.Prev()
|
||||
}
|
||||
}
|
||||
|
||||
if r.Type&RangeROpen > 0 {
|
||||
if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) {
|
||||
it.it.Prev()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < l.Offset; i++ {
|
||||
if it.it.Valid() {
|
||||
if it.direction == IteratorForward {
|
||||
it.it.Next()
|
||||
} else {
|
||||
it.it.Prev()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return it
|
||||
}
|
99
vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go
generated
vendored
99
vendor/github.com/siddontang/ledisdb/store/leveldb/batch.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include "leveldb/c.h"
|
||||
// #include "leveldb_ext.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type WriteBatch struct {
|
||||
db *DB
|
||||
wbatch *C.leveldb_writebatch_t
|
||||
}
|
||||
|
||||
func newWriteBatch(db *DB) *WriteBatch {
|
||||
w := new(WriteBatch)
|
||||
w.db = db
|
||||
w.wbatch = C.leveldb_writebatch_create()
|
||||
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Close() {
|
||||
if w.wbatch != nil {
|
||||
C.leveldb_writebatch_destroy(w.wbatch)
|
||||
w.wbatch = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Put(key, value []byte) {
|
||||
var k, v *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
if len(value) != 0 {
|
||||
v = (*C.char)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
lenk := len(key)
|
||||
lenv := len(value)
|
||||
|
||||
C.leveldb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Delete(key []byte) {
|
||||
C.leveldb_writebatch_delete(w.wbatch,
|
||||
(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Commit() error {
|
||||
return w.commit(w.db.writeOpts)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) SyncCommit() error {
|
||||
return w.commit(w.db.syncOpts)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Rollback() error {
|
||||
C.leveldb_writebatch_clear(w.wbatch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WriteBatch) commit(wb *WriteOptions) error {
|
||||
var errStr *C.char
|
||||
C.leveldb_write(w.db.db, wb.Opt, w.wbatch, &errStr)
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//export leveldb_writebatch_iterate_put
|
||||
func leveldb_writebatch_iterate_put(p unsafe.Pointer, k *C.char, klen C.size_t, v *C.char, vlen C.size_t) {
|
||||
b := (*leveldb.Batch)(p)
|
||||
key := slice(unsafe.Pointer(k), int(klen))
|
||||
value := slice(unsafe.Pointer(v), int(vlen))
|
||||
b.Put(key, value)
|
||||
}
|
||||
|
||||
//export leveldb_writebatch_iterate_delete
|
||||
func leveldb_writebatch_iterate_delete(p unsafe.Pointer, k *C.char, klen C.size_t) {
|
||||
b := (*leveldb.Batch)(p)
|
||||
key := slice(unsafe.Pointer(k), int(klen))
|
||||
b.Delete(key)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Data() []byte {
|
||||
gbatch := leveldb.Batch{}
|
||||
C.leveldb_writebatch_iterate_ext(w.wbatch,
|
||||
unsafe.Pointer(&gbatch))
|
||||
return gbatch.Dump()
|
||||
}
|
20
vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go
generated
vendored
20
vendor/github.com/siddontang/ledisdb/store/leveldb/cache.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include <stdint.h>
|
||||
// #include "leveldb/c.h"
|
||||
import "C"
|
||||
|
||||
type Cache struct {
|
||||
Cache *C.leveldb_cache_t
|
||||
}
|
||||
|
||||
func NewLRUCache(capacity int) *Cache {
|
||||
return &Cache{C.leveldb_cache_create_lru(C.size_t(capacity))}
|
||||
}
|
||||
|
||||
func (c *Cache) Close() {
|
||||
C.leveldb_cache_destroy(c.Cache)
|
||||
}
|
3
vendor/github.com/siddontang/ledisdb/store/leveldb/const.go
generated
vendored
3
vendor/github.com/siddontang/ledisdb/store/leveldb/const.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
package leveldb
|
||||
|
||||
const DBName = "leveldb"
|
314
vendor/github.com/siddontang/ledisdb/store/leveldb/db.go
generated
vendored
314
vendor/github.com/siddontang/ledisdb/store/leveldb/db.go
generated
vendored
@ -1,314 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
// Package leveldb is a wrapper for c++ leveldb
|
||||
package leveldb
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lleveldb
|
||||
#include <leveldb/c.h>
|
||||
#include "leveldb_ext.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
const defaultFilterBits int = 10
|
||||
|
||||
type Store struct {
|
||||
}
|
||||
|
||||
func (s Store) String() string {
|
||||
return DBName
|
||||
}
|
||||
|
||||
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := new(DB)
|
||||
db.path = path
|
||||
db.cfg = &cfg.LevelDB
|
||||
|
||||
if err := db.open(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s Store) Repair(path string, cfg *config.Config) error {
|
||||
db := new(DB)
|
||||
db.cfg = &cfg.LevelDB
|
||||
db.path = path
|
||||
|
||||
err := db.open()
|
||||
defer db.Close()
|
||||
|
||||
//open ok, do not need repair
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errStr *C.char
|
||||
ldbname := C.CString(path)
|
||||
defer C.leveldb_free(unsafe.Pointer(ldbname))
|
||||
|
||||
C.leveldb_repair_db(db.opts.Opt, ldbname, &errStr)
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
path string
|
||||
|
||||
cfg *config.LevelDBConfig
|
||||
|
||||
db *C.leveldb_t
|
||||
|
||||
opts *Options
|
||||
|
||||
//for default read and write options
|
||||
readOpts *ReadOptions
|
||||
writeOpts *WriteOptions
|
||||
iteratorOpts *ReadOptions
|
||||
|
||||
syncOpts *WriteOptions
|
||||
|
||||
cache *Cache
|
||||
|
||||
filter *FilterPolicy
|
||||
}
|
||||
|
||||
func (db *DB) open() error {
|
||||
db.initOptions(db.cfg)
|
||||
|
||||
var errStr *C.char
|
||||
ldbname := C.CString(db.path)
|
||||
defer C.leveldb_free(unsafe.Pointer(ldbname))
|
||||
|
||||
db.db = C.leveldb_open(db.opts.Opt, ldbname, &errStr)
|
||||
if errStr != nil {
|
||||
db.db = nil
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) initOptions(cfg *config.LevelDBConfig) {
|
||||
opts := NewOptions()
|
||||
|
||||
opts.SetCreateIfMissing(true)
|
||||
|
||||
db.cache = NewLRUCache(cfg.CacheSize)
|
||||
opts.SetCache(db.cache)
|
||||
|
||||
//we must use bloomfilter
|
||||
db.filter = NewBloomFilter(defaultFilterBits)
|
||||
opts.SetFilterPolicy(db.filter)
|
||||
|
||||
if !cfg.Compression {
|
||||
opts.SetCompression(NoCompression)
|
||||
} else {
|
||||
opts.SetCompression(SnappyCompression)
|
||||
}
|
||||
|
||||
opts.SetBlockSize(cfg.BlockSize)
|
||||
|
||||
opts.SetWriteBufferSize(cfg.WriteBufferSize)
|
||||
|
||||
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
|
||||
|
||||
opts.SetMaxFileSize(cfg.MaxFileSize)
|
||||
|
||||
db.opts = opts
|
||||
|
||||
db.readOpts = NewReadOptions()
|
||||
db.writeOpts = NewWriteOptions()
|
||||
|
||||
db.syncOpts = NewWriteOptions()
|
||||
db.syncOpts.SetSync(true)
|
||||
|
||||
db.iteratorOpts = NewReadOptions()
|
||||
db.iteratorOpts.SetFillCache(false)
|
||||
}
|
||||
|
||||
func (db *DB) Close() error {
|
||||
if db.db != nil {
|
||||
C.leveldb_close(db.db)
|
||||
db.db = nil
|
||||
}
|
||||
|
||||
db.opts.Close()
|
||||
|
||||
if db.cache != nil {
|
||||
db.cache.Close()
|
||||
}
|
||||
|
||||
if db.filter != nil {
|
||||
db.filter.Close()
|
||||
}
|
||||
|
||||
db.readOpts.Close()
|
||||
db.writeOpts.Close()
|
||||
db.iteratorOpts.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) Put(key, value []byte) error {
|
||||
return db.put(db.writeOpts, key, value)
|
||||
}
|
||||
|
||||
func (db *DB) Get(key []byte) ([]byte, error) {
|
||||
return db.get(db.readOpts, key)
|
||||
}
|
||||
|
||||
func (db *DB) Delete(key []byte) error {
|
||||
return db.delete(db.writeOpts, key)
|
||||
}
|
||||
|
||||
func (db *DB) SyncPut(key []byte, value []byte) error {
|
||||
return db.put(db.syncOpts, key, value)
|
||||
}
|
||||
|
||||
func (db *DB) SyncDelete(key []byte) error {
|
||||
return db.delete(db.syncOpts, key)
|
||||
}
|
||||
|
||||
func (db *DB) NewWriteBatch() driver.IWriteBatch {
|
||||
wb := newWriteBatch(db)
|
||||
|
||||
runtime.SetFinalizer(wb, func(w *WriteBatch) {
|
||||
w.Close()
|
||||
})
|
||||
|
||||
return wb
|
||||
}
|
||||
|
||||
func (db *DB) NewIterator() driver.IIterator {
|
||||
it := new(Iterator)
|
||||
|
||||
it.it = C.leveldb_create_iterator(db.db, db.iteratorOpts.Opt)
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
|
||||
snap := &Snapshot{
|
||||
db: db,
|
||||
snap: C.leveldb_create_snapshot(db.db),
|
||||
readOpts: NewReadOptions(),
|
||||
iteratorOpts: NewReadOptions(),
|
||||
}
|
||||
snap.readOpts.SetSnapshot(snap)
|
||||
snap.iteratorOpts.SetSnapshot(snap)
|
||||
snap.iteratorOpts.SetFillCache(false)
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (db *DB) put(wo *WriteOptions, key, value []byte) error {
|
||||
var errStr *C.char
|
||||
var k, v *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
if len(value) != 0 {
|
||||
v = (*C.char)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
lenk := len(key)
|
||||
lenv := len(value)
|
||||
C.leveldb_put(
|
||||
db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) {
|
||||
var errStr *C.char
|
||||
var vallen C.size_t
|
||||
var k *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
|
||||
value := C.leveldb_get(
|
||||
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return nil, saveError(errStr)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
defer C.leveldb_free(unsafe.Pointer(value))
|
||||
|
||||
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
|
||||
}
|
||||
|
||||
func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) {
|
||||
var errStr *C.char
|
||||
var vallen C.size_t
|
||||
var k *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
|
||||
value := C.leveldb_get(
|
||||
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return nil, saveError(errStr)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return NewCSlice(unsafe.Pointer(value), int(vallen)), nil
|
||||
}
|
||||
|
||||
func (db *DB) delete(wo *WriteOptions, key []byte) error {
|
||||
var errStr *C.char
|
||||
var k *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
|
||||
C.leveldb_delete(
|
||||
db.db, wo.Opt, k, C.size_t(len(key)), &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) Compact() error {
|
||||
C.leveldb_compact_range(db.db, nil, 0, nil, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) GetSlice(key []byte) (driver.ISlice, error) {
|
||||
return db.getSlice(db.readOpts, key)
|
||||
}
|
||||
|
||||
func init() {
|
||||
driver.Register(Store{})
|
||||
}
|
21
vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go
generated
vendored
21
vendor/github.com/siddontang/ledisdb/store/leveldb/filterpolicy.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include <stdlib.h>
|
||||
// #include "leveldb/c.h"
|
||||
import "C"
|
||||
|
||||
type FilterPolicy struct {
|
||||
Policy *C.leveldb_filterpolicy_t
|
||||
}
|
||||
|
||||
func NewBloomFilter(bitsPerKey int) *FilterPolicy {
|
||||
policy := C.leveldb_filterpolicy_create_bloom(C.int(bitsPerKey))
|
||||
return &FilterPolicy{policy}
|
||||
}
|
||||
|
||||
func (fp *FilterPolicy) Close() {
|
||||
C.leveldb_filterpolicy_destroy(fp.Policy)
|
||||
}
|
70
vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go
generated
vendored
70
vendor/github.com/siddontang/ledisdb/store/leveldb/iterator.go
generated
vendored
@ -1,70 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include <stdlib.h>
|
||||
// #include "leveldb/c.h"
|
||||
// #include "leveldb_ext.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Iterator struct {
|
||||
it *C.leveldb_iterator_t
|
||||
isValid C.uchar
|
||||
}
|
||||
|
||||
func (it *Iterator) Key() []byte {
|
||||
var klen C.size_t
|
||||
kdata := C.leveldb_iter_key(it.it, &klen)
|
||||
if kdata == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return slice(unsafe.Pointer(kdata), int(C.int(klen)))
|
||||
}
|
||||
|
||||
func (it *Iterator) Value() []byte {
|
||||
var vlen C.size_t
|
||||
vdata := C.leveldb_iter_value(it.it, &vlen)
|
||||
if vdata == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return slice(unsafe.Pointer(vdata), int(C.int(vlen)))
|
||||
}
|
||||
|
||||
func (it *Iterator) Close() error {
|
||||
if it.it != nil {
|
||||
C.leveldb_iter_destroy(it.it)
|
||||
it.it = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (it *Iterator) Valid() bool {
|
||||
return ucharToBool(it.isValid)
|
||||
}
|
||||
|
||||
func (it *Iterator) Next() {
|
||||
it.isValid = C.leveldb_iter_next_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) Prev() {
|
||||
it.isValid = C.leveldb_iter_prev_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) First() {
|
||||
it.isValid = C.leveldb_iter_seek_to_first_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) Last() {
|
||||
it.isValid = C.leveldb_iter_seek_to_last_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) Seek(key []byte) {
|
||||
it.isValid = C.leveldb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
|
||||
}
|
95
vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc
generated
vendored
95
vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.cc
generated
vendored
@ -1,95 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
#include "leveldb_ext.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
//#include <string>
|
||||
|
||||
//#include "leveldb/db.h"
|
||||
|
||||
//using namespace leveldb;
|
||||
|
||||
extern "C" {
|
||||
|
||||
// static bool SaveError(char** errptr, const Status& s) {
|
||||
// assert(errptr != NULL);
|
||||
// if (s.ok()) {
|
||||
// return false;
|
||||
// } else if (*errptr == NULL) {
|
||||
// *errptr = strdup(s.ToString().c_str());
|
||||
// } else {
|
||||
// free(*errptr);
|
||||
// *errptr = strdup(s.ToString().c_str());
|
||||
// }
|
||||
// return true;
|
||||
// }
|
||||
|
||||
// void* leveldb_get_ext(
|
||||
// leveldb_t* db,
|
||||
// const leveldb_readoptions_t* options,
|
||||
// const char* key, size_t keylen,
|
||||
// char** valptr,
|
||||
// size_t* vallen,
|
||||
// char** errptr) {
|
||||
|
||||
// std::string *tmp = new(std::string);
|
||||
|
||||
// //very tricky, maybe changed with c++ leveldb upgrade
|
||||
// Status s = (*(DB**)db)->Get(*(ReadOptions*)options, Slice(key, keylen), tmp);
|
||||
|
||||
// if (s.ok()) {
|
||||
// *valptr = (char*)tmp->data();
|
||||
// *vallen = tmp->size();
|
||||
// } else {
|
||||
// delete(tmp);
|
||||
// tmp = NULL;
|
||||
// *valptr = NULL;
|
||||
// *vallen = 0;
|
||||
// if (!s.IsNotFound()) {
|
||||
// SaveError(errptr, s);
|
||||
// }
|
||||
// }
|
||||
// return tmp;
|
||||
// }
|
||||
|
||||
// void leveldb_get_free_ext(void* context) {
|
||||
// std::string* s = (std::string*)context;
|
||||
|
||||
// delete(s);
|
||||
// }
|
||||
|
||||
|
||||
unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t* iter) {
|
||||
leveldb_iter_seek_to_first(iter);
|
||||
return leveldb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t* iter) {
|
||||
leveldb_iter_seek_to_last(iter);
|
||||
return leveldb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char leveldb_iter_seek_ext(leveldb_iterator_t* iter, const char* k, size_t klen) {
|
||||
leveldb_iter_seek(iter, k, klen);
|
||||
return leveldb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char leveldb_iter_next_ext(leveldb_iterator_t* iter) {
|
||||
leveldb_iter_next(iter);
|
||||
return leveldb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char leveldb_iter_prev_ext(leveldb_iterator_t* iter) {
|
||||
leveldb_iter_prev(iter);
|
||||
return leveldb_iter_valid(iter);
|
||||
}
|
||||
|
||||
extern void leveldb_writebatch_iterate_put(void*, const char* k, size_t klen, const char* v, size_t vlen);
|
||||
extern void leveldb_writebatch_iterate_delete(void*, const char* k, size_t klen);
|
||||
|
||||
void leveldb_writebatch_iterate_ext(leveldb_writebatch_t* w, void *p) {
|
||||
leveldb_writebatch_iterate(w, p,
|
||||
leveldb_writebatch_iterate_put, leveldb_writebatch_iterate_delete);
|
||||
}
|
||||
|
||||
}
|
41
vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h
generated
vendored
41
vendor/github.com/siddontang/ledisdb/store/leveldb/leveldb_ext.h
generated
vendored
@ -1,41 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
#ifndef LEVELDB_EXT_H
|
||||
#define LEVELDB_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "leveldb/c.h"
|
||||
|
||||
|
||||
// /* Returns NULL if not found. Otherwise stores the value in **valptr.
|
||||
// Stores the length of the value in *vallen.
|
||||
// Returns a context must be later to free*/
|
||||
// extern void* leveldb_get_ext(
|
||||
// leveldb_t* db,
|
||||
// const leveldb_readoptions_t* options,
|
||||
// const char* key, size_t keylen,
|
||||
// char** valptr,
|
||||
// size_t* vallen,
|
||||
// char** errptr);
|
||||
|
||||
// // Free context returns by leveldb_get_ext
|
||||
// extern void leveldb_get_free_ext(void* context);
|
||||
|
||||
|
||||
// Below iterator functions like leveldb iterator but returns valid status for iterator
|
||||
extern unsigned char leveldb_iter_seek_to_first_ext(leveldb_iterator_t*);
|
||||
extern unsigned char leveldb_iter_seek_to_last_ext(leveldb_iterator_t*);
|
||||
extern unsigned char leveldb_iter_seek_ext(leveldb_iterator_t*, const char* k, size_t klen);
|
||||
extern unsigned char leveldb_iter_next_ext(leveldb_iterator_t*);
|
||||
extern unsigned char leveldb_iter_prev_ext(leveldb_iterator_t*);
|
||||
|
||||
extern void leveldb_writebatch_iterate_ext(leveldb_writebatch_t*, void* p);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
7
vendor/github.com/siddontang/ledisdb/store/leveldb/levigo-license
generated
vendored
7
vendor/github.com/siddontang/ledisdb/store/leveldb/levigo-license
generated
vendored
@ -1,7 +0,0 @@
|
||||
Copyright (c) 2012 Jeffrey M Hodges
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
126
vendor/github.com/siddontang/ledisdb/store/leveldb/options.go
generated
vendored
126
vendor/github.com/siddontang/ledisdb/store/leveldb/options.go
generated
vendored
@ -1,126 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include "leveldb/c.h"
|
||||
import "C"
|
||||
|
||||
type CompressionOpt int
|
||||
|
||||
const (
|
||||
NoCompression = CompressionOpt(0)
|
||||
SnappyCompression = CompressionOpt(1)
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Opt *C.leveldb_options_t
|
||||
}
|
||||
|
||||
type ReadOptions struct {
|
||||
Opt *C.leveldb_readoptions_t
|
||||
}
|
||||
|
||||
type WriteOptions struct {
|
||||
Opt *C.leveldb_writeoptions_t
|
||||
}
|
||||
|
||||
func NewOptions() *Options {
|
||||
opt := C.leveldb_options_create()
|
||||
return &Options{opt}
|
||||
}
|
||||
|
||||
func NewReadOptions() *ReadOptions {
|
||||
opt := C.leveldb_readoptions_create()
|
||||
return &ReadOptions{opt}
|
||||
}
|
||||
|
||||
func NewWriteOptions() *WriteOptions {
|
||||
opt := C.leveldb_writeoptions_create()
|
||||
return &WriteOptions{opt}
|
||||
}
|
||||
|
||||
func (o *Options) Close() {
|
||||
C.leveldb_options_destroy(o.Opt)
|
||||
}
|
||||
|
||||
func (o *Options) SetComparator(cmp *C.leveldb_comparator_t) {
|
||||
C.leveldb_options_set_comparator(o.Opt, cmp)
|
||||
}
|
||||
|
||||
func (o *Options) SetErrorIfExists(error_if_exists bool) {
|
||||
eie := boolToUchar(error_if_exists)
|
||||
C.leveldb_options_set_error_if_exists(o.Opt, eie)
|
||||
}
|
||||
|
||||
func (o *Options) SetCache(cache *Cache) {
|
||||
C.leveldb_options_set_cache(o.Opt, cache.Cache)
|
||||
}
|
||||
|
||||
func (o *Options) SetWriteBufferSize(s int) {
|
||||
C.leveldb_options_set_write_buffer_size(o.Opt, C.size_t(s))
|
||||
}
|
||||
|
||||
func (o *Options) SetParanoidChecks(pc bool) {
|
||||
C.leveldb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxOpenFiles(n int) {
|
||||
C.leveldb_options_set_max_open_files(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxFileSize(n int) {
|
||||
C.leveldb_options_set_max_file_size(o.Opt, C.size_t(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetBlockSize(s int) {
|
||||
C.leveldb_options_set_block_size(o.Opt, C.size_t(s))
|
||||
}
|
||||
|
||||
func (o *Options) SetBlockRestartInterval(n int) {
|
||||
C.leveldb_options_set_block_restart_interval(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetCompression(t CompressionOpt) {
|
||||
C.leveldb_options_set_compression(o.Opt, C.int(t))
|
||||
}
|
||||
|
||||
func (o *Options) SetCreateIfMissing(b bool) {
|
||||
C.leveldb_options_set_create_if_missing(o.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (o *Options) SetFilterPolicy(fp *FilterPolicy) {
|
||||
var policy *C.leveldb_filterpolicy_t
|
||||
if fp != nil {
|
||||
policy = fp.Policy
|
||||
}
|
||||
C.leveldb_options_set_filter_policy(o.Opt, policy)
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) Close() {
|
||||
C.leveldb_readoptions_destroy(ro.Opt)
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
|
||||
C.leveldb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) SetFillCache(b bool) {
|
||||
C.leveldb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) SetSnapshot(snap *Snapshot) {
|
||||
var s *C.leveldb_snapshot_t
|
||||
if snap != nil {
|
||||
s = snap.snap
|
||||
}
|
||||
C.leveldb_readoptions_set_snapshot(ro.Opt, s)
|
||||
}
|
||||
|
||||
func (wo *WriteOptions) Close() {
|
||||
C.leveldb_writeoptions_destroy(wo.Opt)
|
||||
}
|
||||
|
||||
func (wo *WriteOptions) SetSync(b bool) {
|
||||
C.leveldb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
|
||||
}
|
40
vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go
generated
vendored
40
vendor/github.com/siddontang/ledisdb/store/leveldb/slice.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include "leveldb/c.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type CSlice struct {
|
||||
data unsafe.Pointer
|
||||
size int
|
||||
}
|
||||
|
||||
func NewCSlice(p unsafe.Pointer, n int) *CSlice {
|
||||
return &CSlice{p, n}
|
||||
}
|
||||
|
||||
func (s *CSlice) Data() []byte {
|
||||
var value []byte
|
||||
|
||||
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
|
||||
sH.Cap = int(s.size)
|
||||
sH.Len = int(s.size)
|
||||
sH.Data = uintptr(s.data)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *CSlice) Size() int {
|
||||
return int(s.size)
|
||||
}
|
||||
|
||||
func (s *CSlice) Free() {
|
||||
C.leveldb_free(s.data)
|
||||
}
|
39
vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go
generated
vendored
39
vendor/github.com/siddontang/ledisdb/store/leveldb/snapshot.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #cgo LDFLAGS: -lleveldb
|
||||
// #include "leveldb/c.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
type Snapshot struct {
|
||||
db *DB
|
||||
snap *C.leveldb_snapshot_t
|
||||
readOpts *ReadOptions
|
||||
iteratorOpts *ReadOptions
|
||||
}
|
||||
|
||||
func (s *Snapshot) Get(key []byte) ([]byte, error) {
|
||||
return s.db.get(s.readOpts, key)
|
||||
}
|
||||
|
||||
func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) {
|
||||
return s.db.getSlice(s.readOpts, key)
|
||||
}
|
||||
|
||||
func (s *Snapshot) NewIterator() driver.IIterator {
|
||||
it := new(Iterator)
|
||||
it.it = C.leveldb_create_iterator(s.db.db, s.db.iteratorOpts.Opt)
|
||||
return it
|
||||
|
||||
}
|
||||
|
||||
func (s *Snapshot) Close() {
|
||||
C.leveldb_release_snapshot(s.db.db, s.snap)
|
||||
s.iteratorOpts.Close()
|
||||
s.readOpts.Close()
|
||||
}
|
45
vendor/github.com/siddontang/ledisdb/store/leveldb/util.go
generated
vendored
45
vendor/github.com/siddontang/ledisdb/store/leveldb/util.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
// +build leveldb
|
||||
|
||||
package leveldb
|
||||
|
||||
// #include "leveldb/c.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func boolToUchar(b bool) C.uchar {
|
||||
uc := C.uchar(0)
|
||||
if b {
|
||||
uc = C.uchar(1)
|
||||
}
|
||||
return uc
|
||||
}
|
||||
|
||||
func ucharToBool(uc C.uchar) bool {
|
||||
if uc == C.uchar(0) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func saveError(errStr *C.char) error {
|
||||
if errStr != nil {
|
||||
gs := C.GoString(errStr)
|
||||
C.leveldb_free(unsafe.Pointer(errStr))
|
||||
return fmt.Errorf(gs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func slice(p unsafe.Pointer, n int) []byte {
|
||||
var b []byte
|
||||
pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
pbyte.Data = uintptr(p)
|
||||
pbyte.Len = n
|
||||
pbyte.Cap = n
|
||||
return b
|
||||
}
|
83
vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go
generated
vendored
83
vendor/github.com/siddontang/ledisdb/store/rocksdb/batch.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include "rocksdb/c.h"
|
||||
// #include "rocksdb_ext.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type WriteBatch struct {
|
||||
db *DB
|
||||
wbatch *C.rocksdb_writebatch_t
|
||||
commitOk bool
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Close() {
|
||||
if w.wbatch != nil {
|
||||
C.rocksdb_writebatch_destroy(w.wbatch)
|
||||
w.wbatch = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Put(key, value []byte) {
|
||||
w.commitOk = false
|
||||
|
||||
var k, v *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
if len(value) != 0 {
|
||||
v = (*C.char)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
lenk := len(key)
|
||||
lenv := len(value)
|
||||
|
||||
C.rocksdb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv))
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Delete(key []byte) {
|
||||
w.commitOk = false
|
||||
|
||||
C.rocksdb_writebatch_delete(w.wbatch,
|
||||
(*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Commit() error {
|
||||
return w.commit(w.db.writeOpts)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) SyncCommit() error {
|
||||
return w.commit(w.db.syncOpts)
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Rollback() error {
|
||||
if !w.commitOk {
|
||||
C.rocksdb_writebatch_clear(w.wbatch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WriteBatch) commit(wb *WriteOptions) error {
|
||||
w.commitOk = true
|
||||
|
||||
var errStr *C.char
|
||||
C.rocksdb_write_ext(w.db.db, wb.Opt, w.wbatch, &errStr)
|
||||
if errStr != nil {
|
||||
w.commitOk = false
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *WriteBatch) Data() []byte {
|
||||
var vallen C.size_t
|
||||
value := C.rocksdb_writebatch_data(w.wbatch, &vallen)
|
||||
|
||||
return slice(unsafe.Pointer(value), int(vallen))
|
||||
}
|
20
vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go
generated
vendored
20
vendor/github.com/siddontang/ledisdb/store/rocksdb/cache.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include <stdint.h>
|
||||
// #include "rocksdb/c.h"
|
||||
import "C"
|
||||
|
||||
type Cache struct {
|
||||
Cache *C.rocksdb_cache_t
|
||||
}
|
||||
|
||||
func NewLRUCache(capacity int) *Cache {
|
||||
return &Cache{C.rocksdb_cache_create_lru(C.size_t(capacity))}
|
||||
}
|
||||
|
||||
func (c *Cache) Close() {
|
||||
C.rocksdb_cache_destroy(c.Cache)
|
||||
}
|
3
vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go
generated
vendored
3
vendor/github.com/siddontang/ledisdb/store/rocksdb/const.go
generated
vendored
@ -1,3 +0,0 @@
|
||||
package rocksdb
|
||||
|
||||
const DBName = "rocksdb"
|
342
vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go
generated
vendored
342
vendor/github.com/siddontang/ledisdb/store/rocksdb/db.go
generated
vendored
@ -1,342 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
// Package rocksdb is a wrapper for c++ rocksdb
|
||||
package rocksdb
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lrocksdb
|
||||
#include <rocksdb/c.h>
|
||||
#include <stdlib.h>
|
||||
#include "rocksdb_ext.h"
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
const defaultFilterBits int = 10
|
||||
|
||||
type Store struct {
|
||||
}
|
||||
|
||||
func (s Store) String() string {
|
||||
return DBName
|
||||
}
|
||||
|
||||
func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := new(DB)
|
||||
db.path = path
|
||||
db.cfg = &cfg.RocksDB
|
||||
|
||||
if err := db.open(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (s Store) Repair(path string, cfg *config.Config) error {
|
||||
db := new(DB)
|
||||
db.path = path
|
||||
db.cfg = &cfg.RocksDB
|
||||
|
||||
err := db.open()
|
||||
defer db.Close()
|
||||
|
||||
//open ok, do not need repair
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errStr *C.char
|
||||
ldbname := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(ldbname))
|
||||
|
||||
C.rocksdb_repair_db(db.opts.Opt, ldbname, &errStr)
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
path string
|
||||
|
||||
cfg *config.RocksDBConfig
|
||||
|
||||
db *C.rocksdb_t
|
||||
|
||||
env *Env
|
||||
|
||||
opts *Options
|
||||
blockOpts *BlockBasedTableOptions
|
||||
|
||||
//for default read and write options
|
||||
readOpts *ReadOptions
|
||||
writeOpts *WriteOptions
|
||||
iteratorOpts *ReadOptions
|
||||
|
||||
syncOpts *WriteOptions
|
||||
|
||||
cache *Cache
|
||||
|
||||
filter *FilterPolicy
|
||||
}
|
||||
|
||||
func (db *DB) open() error {
|
||||
db.initOptions(db.cfg)
|
||||
|
||||
var errStr *C.char
|
||||
ldbname := C.CString(db.path)
|
||||
defer C.free(unsafe.Pointer(ldbname))
|
||||
|
||||
db.db = C.rocksdb_open(db.opts.Opt, ldbname, &errStr)
|
||||
if errStr != nil {
|
||||
db.db = nil
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) initOptions(cfg *config.RocksDBConfig) {
|
||||
opts := NewOptions()
|
||||
blockOpts := NewBlockBasedTableOptions()
|
||||
|
||||
opts.SetCreateIfMissing(true)
|
||||
|
||||
db.env = NewDefaultEnv()
|
||||
db.env.SetBackgroundThreads(cfg.BackgroundThreads)
|
||||
db.env.SetHighPriorityBackgroundThreads(cfg.HighPriorityBackgroundThreads)
|
||||
opts.SetEnv(db.env)
|
||||
|
||||
db.cache = NewLRUCache(cfg.CacheSize)
|
||||
blockOpts.SetCache(db.cache)
|
||||
|
||||
//we must use bloomfilter
|
||||
db.filter = NewBloomFilter(defaultFilterBits)
|
||||
blockOpts.SetFilterPolicy(db.filter)
|
||||
blockOpts.SetBlockSize(cfg.BlockSize)
|
||||
opts.SetBlockBasedTableFactory(blockOpts)
|
||||
|
||||
opts.SetCompression(CompressionOpt(cfg.Compression))
|
||||
opts.SetWriteBufferSize(cfg.WriteBufferSize)
|
||||
opts.SetMaxOpenFiles(cfg.MaxOpenFiles)
|
||||
opts.SetMaxBackgroundCompactions(cfg.MaxBackgroundCompactions)
|
||||
opts.SetMaxBackgroundFlushes(cfg.MaxBackgroundFlushes)
|
||||
opts.SetLevel0FileNumCompactionTrigger(cfg.Level0FileNumCompactionTrigger)
|
||||
opts.SetLevel0SlowdownWritesTrigger(cfg.Level0SlowdownWritesTrigger)
|
||||
opts.SetLevel0StopWritesTrigger(cfg.Level0StopWritesTrigger)
|
||||
opts.SetTargetFileSizeBase(cfg.TargetFileSizeBase)
|
||||
opts.SetTargetFileSizeMultiplier(cfg.TargetFileSizeMultiplier)
|
||||
opts.SetMaxBytesForLevelBase(cfg.MaxBytesForLevelBase)
|
||||
opts.SetMaxBytesForLevelMultiplier(cfg.MaxBytesForLevelMultiplier)
|
||||
opts.SetMinWriteBufferNumberToMerge(cfg.MinWriteBufferNumberToMerge)
|
||||
opts.DisableAutoCompactions(cfg.DisableAutoCompactions)
|
||||
opts.EnableStatistics(cfg.EnableStatistics)
|
||||
opts.UseFsync(cfg.UseFsync)
|
||||
opts.SetStatsDumpPeriodSec(cfg.StatsDumpPeriodSec)
|
||||
opts.SetMaxManifestFileSize(cfg.MaxManifestFileSize)
|
||||
|
||||
db.opts = opts
|
||||
db.blockOpts = blockOpts
|
||||
|
||||
db.readOpts = NewReadOptions()
|
||||
db.writeOpts = NewWriteOptions()
|
||||
db.writeOpts.DisableWAL(cfg.DisableWAL)
|
||||
|
||||
db.syncOpts = NewWriteOptions()
|
||||
db.syncOpts.SetSync(true)
|
||||
db.syncOpts.DisableWAL(cfg.DisableWAL)
|
||||
|
||||
db.iteratorOpts = NewReadOptions()
|
||||
db.iteratorOpts.SetFillCache(false)
|
||||
}
|
||||
|
||||
func (db *DB) Close() error {
|
||||
if db.db != nil {
|
||||
C.rocksdb_close(db.db)
|
||||
db.db = nil
|
||||
}
|
||||
|
||||
if db.filter != nil {
|
||||
db.filter.Close()
|
||||
}
|
||||
|
||||
if db.cache != nil {
|
||||
db.cache.Close()
|
||||
}
|
||||
|
||||
if db.env != nil {
|
||||
db.env.Close()
|
||||
}
|
||||
|
||||
//db.blockOpts.Close()
|
||||
|
||||
db.opts.Close()
|
||||
|
||||
db.readOpts.Close()
|
||||
db.writeOpts.Close()
|
||||
db.iteratorOpts.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) Put(key, value []byte) error {
|
||||
return db.put(db.writeOpts, key, value)
|
||||
}
|
||||
|
||||
func (db *DB) Get(key []byte) ([]byte, error) {
|
||||
return db.get(db.readOpts, key)
|
||||
}
|
||||
|
||||
func (db *DB) Delete(key []byte) error {
|
||||
return db.delete(db.writeOpts, key)
|
||||
}
|
||||
|
||||
func (db *DB) SyncPut(key []byte, value []byte) error {
|
||||
return db.put(db.syncOpts, key, value)
|
||||
}
|
||||
|
||||
func (db *DB) SyncDelete(key []byte) error {
|
||||
return db.delete(db.syncOpts, key)
|
||||
}
|
||||
|
||||
func (db *DB) NewWriteBatch() driver.IWriteBatch {
|
||||
wb := &WriteBatch{
|
||||
db: db,
|
||||
wbatch: C.rocksdb_writebatch_create(),
|
||||
}
|
||||
|
||||
runtime.SetFinalizer(wb, func(w *WriteBatch) {
|
||||
w.Close()
|
||||
})
|
||||
|
||||
return wb
|
||||
}
|
||||
|
||||
func (db *DB) NewIterator() driver.IIterator {
|
||||
it := new(Iterator)
|
||||
|
||||
it.it = C.rocksdb_create_iterator(db.db, db.iteratorOpts.Opt)
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (db *DB) NewSnapshot() (driver.ISnapshot, error) {
|
||||
snap := &Snapshot{
|
||||
db: db,
|
||||
snap: C.rocksdb_create_snapshot(db.db),
|
||||
readOpts: NewReadOptions(),
|
||||
iteratorOpts: NewReadOptions(),
|
||||
}
|
||||
snap.readOpts.SetSnapshot(snap)
|
||||
snap.iteratorOpts.SetSnapshot(snap)
|
||||
snap.iteratorOpts.SetFillCache(false)
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func (db *DB) put(wo *WriteOptions, key, value []byte) error {
|
||||
var errStr *C.char
|
||||
var k, v *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
if len(value) != 0 {
|
||||
v = (*C.char)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
lenk := len(key)
|
||||
lenv := len(value)
|
||||
C.rocksdb_put(
|
||||
db.db, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) get(ro *ReadOptions, key []byte) ([]byte, error) {
|
||||
var errStr *C.char
|
||||
var vallen C.size_t
|
||||
var k *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
|
||||
value := C.rocksdb_get(
|
||||
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return nil, saveError(errStr)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
defer C.free(unsafe.Pointer(value))
|
||||
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
|
||||
}
|
||||
|
||||
func (db *DB) getSlice(ro *ReadOptions, key []byte) (driver.ISlice, error) {
|
||||
var errStr *C.char
|
||||
var vallen C.size_t
|
||||
var k *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
|
||||
value := C.rocksdb_get(
|
||||
db.db, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return nil, saveError(errStr)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return NewCSlice(unsafe.Pointer(value), int(vallen)), nil
|
||||
}
|
||||
|
||||
func (db *DB) delete(wo *WriteOptions, key []byte) error {
|
||||
var errStr *C.char
|
||||
var k *C.char
|
||||
if len(key) != 0 {
|
||||
k = (*C.char)(unsafe.Pointer(&key[0]))
|
||||
}
|
||||
|
||||
C.rocksdb_delete(
|
||||
db.db, wo.Opt, k, C.size_t(len(key)), &errStr)
|
||||
|
||||
if errStr != nil {
|
||||
return saveError(errStr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) Compact() error {
|
||||
C.rocksdb_compact_range(db.db, nil, 0, nil, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) GetSlice(key []byte) (driver.ISlice, error) {
|
||||
return db.getSlice(db.readOpts, key)
|
||||
}
|
||||
|
||||
func init() {
|
||||
driver.Register(Store{})
|
||||
}
|
27
vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go
generated
vendored
27
vendor/github.com/siddontang/ledisdb/store/rocksdb/env.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include "rocksdb/c.h"
|
||||
import "C"
|
||||
|
||||
type Env struct {
|
||||
Env *C.rocksdb_env_t
|
||||
}
|
||||
|
||||
func NewDefaultEnv() *Env {
|
||||
return &Env{C.rocksdb_create_default_env()}
|
||||
}
|
||||
|
||||
func (env *Env) SetHighPriorityBackgroundThreads(n int) {
|
||||
C.rocksdb_env_set_high_priority_background_threads(env.Env, C.int(n))
|
||||
}
|
||||
|
||||
func (env *Env) SetBackgroundThreads(n int) {
|
||||
C.rocksdb_env_set_background_threads(env.Env, C.int(n))
|
||||
}
|
||||
|
||||
func (env *Env) Close() {
|
||||
C.rocksdb_env_destroy(env.Env)
|
||||
}
|
21
vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go
generated
vendored
21
vendor/github.com/siddontang/ledisdb/store/rocksdb/filterpolicy.go
generated
vendored
@ -1,21 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include <stdlib.h>
|
||||
// #include "rocksdb/c.h"
|
||||
import "C"
|
||||
|
||||
type FilterPolicy struct {
|
||||
Policy *C.rocksdb_filterpolicy_t
|
||||
}
|
||||
|
||||
func NewBloomFilter(bitsPerKey int) *FilterPolicy {
|
||||
policy := C.rocksdb_filterpolicy_create_bloom(C.int(bitsPerKey))
|
||||
return &FilterPolicy{policy}
|
||||
}
|
||||
|
||||
func (fp *FilterPolicy) Close() {
|
||||
C.rocksdb_filterpolicy_destroy(fp.Policy)
|
||||
}
|
70
vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go
generated
vendored
70
vendor/github.com/siddontang/ledisdb/store/rocksdb/iterator.go
generated
vendored
@ -1,70 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include <stdlib.h>
|
||||
// #include "rocksdb/c.h"
|
||||
// #include "rocksdb_ext.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type Iterator struct {
|
||||
it *C.rocksdb_iterator_t
|
||||
isValid C.uchar
|
||||
}
|
||||
|
||||
func (it *Iterator) Key() []byte {
|
||||
var klen C.size_t
|
||||
kdata := C.rocksdb_iter_key(it.it, &klen)
|
||||
if kdata == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return slice(unsafe.Pointer(kdata), int(C.int(klen)))
|
||||
}
|
||||
|
||||
func (it *Iterator) Value() []byte {
|
||||
var vlen C.size_t
|
||||
vdata := C.rocksdb_iter_value(it.it, &vlen)
|
||||
if vdata == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return slice(unsafe.Pointer(vdata), int(C.int(vlen)))
|
||||
}
|
||||
|
||||
func (it *Iterator) Close() error {
|
||||
if it.it != nil {
|
||||
C.rocksdb_iter_destroy(it.it)
|
||||
it.it = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (it *Iterator) Valid() bool {
|
||||
return ucharToBool(it.isValid)
|
||||
}
|
||||
|
||||
func (it *Iterator) Next() {
|
||||
it.isValid = C.rocksdb_iter_next_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) Prev() {
|
||||
it.isValid = C.rocksdb_iter_prev_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) First() {
|
||||
it.isValid = C.rocksdb_iter_seek_to_first_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) Last() {
|
||||
it.isValid = C.rocksdb_iter_seek_to_last_ext(it.it)
|
||||
}
|
||||
|
||||
func (it *Iterator) Seek(key []byte) {
|
||||
it.isValid = C.rocksdb_iter_seek_ext(it.it, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key)))
|
||||
}
|
229
vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go
generated
vendored
229
vendor/github.com/siddontang/ledisdb/store/rocksdb/options.go
generated
vendored
@ -1,229 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include "rocksdb/c.h"
|
||||
import "C"
|
||||
|
||||
type CompressionOpt int
|
||||
|
||||
const (
|
||||
NoCompression = CompressionOpt(0)
|
||||
SnappyCompression = CompressionOpt(1)
|
||||
ZlibCompression = CompressionOpt(2)
|
||||
Bz2Compression = CompressionOpt(3)
|
||||
Lz4Compression = CompressionOpt(4)
|
||||
Lz4hcCompression = CompressionOpt(5)
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Opt *C.rocksdb_options_t
|
||||
}
|
||||
|
||||
type ReadOptions struct {
|
||||
Opt *C.rocksdb_readoptions_t
|
||||
}
|
||||
|
||||
type WriteOptions struct {
|
||||
Opt *C.rocksdb_writeoptions_t
|
||||
}
|
||||
|
||||
type BlockBasedTableOptions struct {
|
||||
Opt *C.rocksdb_block_based_table_options_t
|
||||
}
|
||||
|
||||
func NewOptions() *Options {
|
||||
opt := C.rocksdb_options_create()
|
||||
return &Options{opt}
|
||||
}
|
||||
|
||||
func NewReadOptions() *ReadOptions {
|
||||
opt := C.rocksdb_readoptions_create()
|
||||
return &ReadOptions{opt}
|
||||
}
|
||||
|
||||
func NewWriteOptions() *WriteOptions {
|
||||
opt := C.rocksdb_writeoptions_create()
|
||||
return &WriteOptions{opt}
|
||||
}
|
||||
|
||||
func NewBlockBasedTableOptions() *BlockBasedTableOptions {
|
||||
opt := C.rocksdb_block_based_options_create()
|
||||
return &BlockBasedTableOptions{opt}
|
||||
}
|
||||
|
||||
func (o *Options) Close() {
|
||||
C.rocksdb_options_destroy(o.Opt)
|
||||
}
|
||||
|
||||
func (o *Options) IncreaseParallelism(n int) {
|
||||
C.rocksdb_options_increase_parallelism(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) OptimizeLevelStyleCompaction(n int) {
|
||||
C.rocksdb_options_optimize_level_style_compaction(o.Opt, C.uint64_t(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetComparator(cmp *C.rocksdb_comparator_t) {
|
||||
C.rocksdb_options_set_comparator(o.Opt, cmp)
|
||||
}
|
||||
|
||||
func (o *Options) SetErrorIfExists(error_if_exists bool) {
|
||||
eie := boolToUchar(error_if_exists)
|
||||
C.rocksdb_options_set_error_if_exists(o.Opt, eie)
|
||||
}
|
||||
|
||||
func (o *Options) SetEnv(env *Env) {
|
||||
C.rocksdb_options_set_env(o.Opt, env.Env)
|
||||
}
|
||||
|
||||
func (o *Options) SetWriteBufferSize(s int) {
|
||||
C.rocksdb_options_set_write_buffer_size(o.Opt, C.size_t(s))
|
||||
}
|
||||
|
||||
func (o *Options) SetParanoidChecks(pc bool) {
|
||||
C.rocksdb_options_set_paranoid_checks(o.Opt, boolToUchar(pc))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxOpenFiles(n int) {
|
||||
C.rocksdb_options_set_max_open_files(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetCompression(t CompressionOpt) {
|
||||
C.rocksdb_options_set_compression(o.Opt, C.int(t))
|
||||
}
|
||||
|
||||
func (o *Options) SetCreateIfMissing(b bool) {
|
||||
C.rocksdb_options_set_create_if_missing(o.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxWriteBufferNumber(n int) {
|
||||
C.rocksdb_options_set_max_write_buffer_number(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxBackgroundCompactions(n int) {
|
||||
C.rocksdb_options_set_max_background_compactions(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxBackgroundFlushes(n int) {
|
||||
C.rocksdb_options_set_max_background_flushes(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetNumLevels(n int) {
|
||||
C.rocksdb_options_set_num_levels(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetLevel0FileNumCompactionTrigger(n int) {
|
||||
C.rocksdb_options_set_level0_file_num_compaction_trigger(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetLevel0SlowdownWritesTrigger(n int) {
|
||||
C.rocksdb_options_set_level0_slowdown_writes_trigger(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetLevel0StopWritesTrigger(n int) {
|
||||
C.rocksdb_options_set_level0_stop_writes_trigger(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetTargetFileSizeBase(n int) {
|
||||
C.rocksdb_options_set_target_file_size_base(o.Opt, C.uint64_t(uint64(n)))
|
||||
}
|
||||
|
||||
func (o *Options) SetTargetFileSizeMultiplier(n int) {
|
||||
C.rocksdb_options_set_target_file_size_multiplier(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxBytesForLevelBase(n int) {
|
||||
C.rocksdb_options_set_max_bytes_for_level_base(o.Opt, C.uint64_t(uint64(n)))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxBytesForLevelMultiplier(n int) {
|
||||
C.rocksdb_options_set_max_bytes_for_level_multiplier(o.Opt, C.double(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetBlockBasedTableFactory(opt *BlockBasedTableOptions) {
|
||||
C.rocksdb_options_set_block_based_table_factory(o.Opt, opt.Opt)
|
||||
}
|
||||
|
||||
func (o *Options) SetMinWriteBufferNumberToMerge(n int) {
|
||||
C.rocksdb_options_set_min_write_buffer_number_to_merge(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *Options) DisableAutoCompactions(b bool) {
|
||||
C.rocksdb_options_set_disable_auto_compactions(o.Opt, boolToInt(b))
|
||||
}
|
||||
|
||||
func (o *Options) UseFsync(b bool) {
|
||||
C.rocksdb_options_set_use_fsync(o.Opt, boolToInt(b))
|
||||
}
|
||||
|
||||
func (o *Options) EnableStatistics(b bool) {
|
||||
if b {
|
||||
C.rocksdb_options_enable_statistics(o.Opt)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) SetStatsDumpPeriodSec(n int) {
|
||||
C.rocksdb_options_set_stats_dump_period_sec(o.Opt, C.uint(n))
|
||||
}
|
||||
|
||||
func (o *Options) SetMaxManifestFileSize(n int) {
|
||||
C.rocksdb_options_set_max_manifest_file_size(o.Opt, C.size_t(n))
|
||||
}
|
||||
|
||||
func (o *BlockBasedTableOptions) Close() {
|
||||
C.rocksdb_block_based_options_destroy(o.Opt)
|
||||
}
|
||||
|
||||
func (o *BlockBasedTableOptions) SetFilterPolicy(fp *FilterPolicy) {
|
||||
var policy *C.rocksdb_filterpolicy_t
|
||||
if fp != nil {
|
||||
policy = fp.Policy
|
||||
}
|
||||
C.rocksdb_block_based_options_set_filter_policy(o.Opt, policy)
|
||||
}
|
||||
|
||||
func (o *BlockBasedTableOptions) SetBlockSize(s int) {
|
||||
C.rocksdb_block_based_options_set_block_size(o.Opt, C.size_t(s))
|
||||
}
|
||||
|
||||
func (o *BlockBasedTableOptions) SetBlockRestartInterval(n int) {
|
||||
C.rocksdb_block_based_options_set_block_restart_interval(o.Opt, C.int(n))
|
||||
}
|
||||
|
||||
func (o *BlockBasedTableOptions) SetCache(cache *Cache) {
|
||||
C.rocksdb_block_based_options_set_block_cache(o.Opt, cache.Cache)
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) Close() {
|
||||
C.rocksdb_readoptions_destroy(ro.Opt)
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) SetVerifyChecksums(b bool) {
|
||||
C.rocksdb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) SetFillCache(b bool) {
|
||||
C.rocksdb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (ro *ReadOptions) SetSnapshot(snap *Snapshot) {
|
||||
var s *C.rocksdb_snapshot_t
|
||||
if snap != nil {
|
||||
s = snap.snap
|
||||
}
|
||||
C.rocksdb_readoptions_set_snapshot(ro.Opt, s)
|
||||
}
|
||||
|
||||
func (wo *WriteOptions) Close() {
|
||||
C.rocksdb_writeoptions_destroy(wo.Opt)
|
||||
}
|
||||
|
||||
func (wo *WriteOptions) SetSync(b bool) {
|
||||
C.rocksdb_writeoptions_set_sync(wo.Opt, boolToUchar(b))
|
||||
}
|
||||
|
||||
func (wo *WriteOptions) DisableWAL(b bool) {
|
||||
C.rocksdb_writeoptions_disable_WAL(wo.Opt, boolToInt(b))
|
||||
}
|
44
vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc
generated
vendored
44
vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.cc
generated
vendored
@ -1,44 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
#include "rocksdb_ext.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string>
|
||||
|
||||
extern "C" {
|
||||
|
||||
unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t* iter) {
|
||||
rocksdb_iter_seek_to_first(iter);
|
||||
return rocksdb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t* iter) {
|
||||
rocksdb_iter_seek_to_last(iter);
|
||||
return rocksdb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t* iter, const char* k, size_t klen) {
|
||||
rocksdb_iter_seek(iter, k, klen);
|
||||
return rocksdb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t* iter) {
|
||||
rocksdb_iter_next(iter);
|
||||
return rocksdb_iter_valid(iter);
|
||||
}
|
||||
|
||||
unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t* iter) {
|
||||
rocksdb_iter_prev(iter);
|
||||
return rocksdb_iter_valid(iter);
|
||||
}
|
||||
|
||||
void rocksdb_write_ext(rocksdb_t* db,
|
||||
const rocksdb_writeoptions_t* options,
|
||||
rocksdb_writebatch_t* batch, char** errptr) {
|
||||
rocksdb_write(db, options, batch, errptr);
|
||||
if(*errptr == NULL) {
|
||||
rocksdb_writebatch_clear(batch);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
24
vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h
generated
vendored
24
vendor/github.com/siddontang/ledisdb/store/rocksdb/rocksdb_ext.h
generated
vendored
@ -1,24 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
#ifndef ROCKSDB_EXT_H
|
||||
#define ROCKSDB_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "rocksdb/c.h"
|
||||
|
||||
// Below iterator functions like rocksdb iterator but returns valid status for iterator
|
||||
extern unsigned char rocksdb_iter_seek_to_first_ext(rocksdb_iterator_t*);
|
||||
extern unsigned char rocksdb_iter_seek_to_last_ext(rocksdb_iterator_t*);
|
||||
extern unsigned char rocksdb_iter_seek_ext(rocksdb_iterator_t*, const char* k, size_t klen);
|
||||
extern unsigned char rocksdb_iter_next_ext(rocksdb_iterator_t*);
|
||||
extern unsigned char rocksdb_iter_prev_ext(rocksdb_iterator_t*);
|
||||
extern void rocksdb_write_ext(rocksdb_t* db, const rocksdb_writeoptions_t* options, rocksdb_writebatch_t* batch, char** errptr);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
41
vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go
generated
vendored
41
vendor/github.com/siddontang/ledisdb/store/rocksdb/slice.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
//+build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include <rocksdb/c.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type CSlice struct {
|
||||
data unsafe.Pointer
|
||||
size int
|
||||
}
|
||||
|
||||
func NewCSlice(p unsafe.Pointer, n int) *CSlice {
|
||||
return &CSlice{p, n}
|
||||
}
|
||||
|
||||
func (s *CSlice) Data() []byte {
|
||||
var value []byte
|
||||
|
||||
sH := (*reflect.SliceHeader)(unsafe.Pointer(&value))
|
||||
sH.Cap = int(s.size)
|
||||
sH.Len = int(s.size)
|
||||
sH.Data = uintptr(s.data)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *CSlice) Size() int {
|
||||
return int(s.size)
|
||||
}
|
||||
|
||||
func (s *CSlice) Free() {
|
||||
C.free(s.data)
|
||||
}
|
39
vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go
generated
vendored
39
vendor/github.com/siddontang/ledisdb/store/rocksdb/snapshot.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #cgo LDFLAGS: -lrocksdb
|
||||
// #include "rocksdb/c.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
type Snapshot struct {
|
||||
db *DB
|
||||
snap *C.rocksdb_snapshot_t
|
||||
readOpts *ReadOptions
|
||||
iteratorOpts *ReadOptions
|
||||
}
|
||||
|
||||
func (s *Snapshot) Get(key []byte) ([]byte, error) {
|
||||
return s.db.get(s.readOpts, key)
|
||||
}
|
||||
|
||||
func (s *Snapshot) GetSlice(key []byte) (driver.ISlice, error) {
|
||||
return s.db.getSlice(s.readOpts, key)
|
||||
}
|
||||
|
||||
func (s *Snapshot) NewIterator() driver.IIterator {
|
||||
it := new(Iterator)
|
||||
it.it = C.rocksdb_create_iterator(s.db.db, s.db.iteratorOpts.Opt)
|
||||
return it
|
||||
|
||||
}
|
||||
|
||||
func (s *Snapshot) Close() {
|
||||
C.rocksdb_release_snapshot(s.db.db, s.snap)
|
||||
s.iteratorOpts.Close()
|
||||
s.readOpts.Close()
|
||||
}
|
54
vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go
generated
vendored
54
vendor/github.com/siddontang/ledisdb/store/rocksdb/util.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
// +build rocksdb
|
||||
|
||||
package rocksdb
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #include "rocksdb/c.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func boolToUchar(b bool) C.uchar {
|
||||
uc := C.uchar(0)
|
||||
if b {
|
||||
uc = C.uchar(1)
|
||||
}
|
||||
return uc
|
||||
}
|
||||
|
||||
func ucharToBool(uc C.uchar) bool {
|
||||
if uc == C.uchar(0) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func boolToInt(b bool) C.int {
|
||||
uc := C.int(0)
|
||||
if b {
|
||||
uc = C.int(1)
|
||||
}
|
||||
return uc
|
||||
}
|
||||
|
||||
func saveError(errStr *C.char) error {
|
||||
if errStr != nil {
|
||||
gs := C.GoString(errStr)
|
||||
C.free(unsafe.Pointer(errStr))
|
||||
return fmt.Errorf(gs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func slice(p unsafe.Pointer, n int) []byte {
|
||||
var b []byte
|
||||
pbyte := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
pbyte.Data = uintptr(p)
|
||||
pbyte.Len = n
|
||||
pbyte.Cap = n
|
||||
return b
|
||||
}
|
9
vendor/github.com/siddontang/ledisdb/store/slice.go
generated
vendored
9
vendor/github.com/siddontang/ledisdb/store/slice.go
generated
vendored
@ -1,9 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
type Slice interface {
|
||||
driver.ISlice
|
||||
}
|
48
vendor/github.com/siddontang/ledisdb/store/snapshot.go
generated
vendored
48
vendor/github.com/siddontang/ledisdb/store/snapshot.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
)
|
||||
|
||||
type Snapshot struct {
|
||||
driver.ISnapshot
|
||||
st *Stat
|
||||
}
|
||||
|
||||
func (s *Snapshot) NewIterator() *Iterator {
|
||||
it := new(Iterator)
|
||||
it.it = s.ISnapshot.NewIterator()
|
||||
it.st = s.st
|
||||
|
||||
s.st.IterNum.Add(1)
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func (s *Snapshot) Get(key []byte) ([]byte, error) {
|
||||
v, err := s.ISnapshot.Get(key)
|
||||
s.st.statGet(v, err)
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (s *Snapshot) GetSlice(key []byte) (Slice, error) {
|
||||
if d, ok := s.ISnapshot.(driver.ISliceGeter); ok {
|
||||
v, err := d.GetSlice(key)
|
||||
s.st.statGet(v, err)
|
||||
return v, err
|
||||
} else {
|
||||
v, err := s.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if v == nil {
|
||||
return nil, nil
|
||||
} else {
|
||||
return driver.GoSlice(v), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Snapshot) Close() {
|
||||
s.st.SnapshotCloseNum.Add(1)
|
||||
s.ISnapshot.Close()
|
||||
}
|
37
vendor/github.com/siddontang/ledisdb/store/stat.go
generated
vendored
37
vendor/github.com/siddontang/ledisdb/store/stat.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/siddontang/go/sync2"
|
||||
)
|
||||
|
||||
type Stat struct {
|
||||
GetNum sync2.AtomicInt64
|
||||
GetMissingNum sync2.AtomicInt64
|
||||
GetTotalTime sync2.AtomicDuration
|
||||
PutNum sync2.AtomicInt64
|
||||
DeleteNum sync2.AtomicInt64
|
||||
IterNum sync2.AtomicInt64
|
||||
IterSeekNum sync2.AtomicInt64
|
||||
IterCloseNum sync2.AtomicInt64
|
||||
SnapshotNum sync2.AtomicInt64
|
||||
SnapshotCloseNum sync2.AtomicInt64
|
||||
BatchNum sync2.AtomicInt64
|
||||
BatchCommitNum sync2.AtomicInt64
|
||||
BatchCommitTotalTime sync2.AtomicDuration
|
||||
TxNum sync2.AtomicInt64
|
||||
TxCommitNum sync2.AtomicInt64
|
||||
TxCloseNum sync2.AtomicInt64
|
||||
CompactNum sync2.AtomicInt64
|
||||
CompactTotalTime sync2.AtomicDuration
|
||||
}
|
||||
|
||||
func (st *Stat) statGet(v interface{}, err error) {
|
||||
st.GetNum.Add(1)
|
||||
if v == nil && err == nil {
|
||||
st.GetMissingNum.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
func (st *Stat) Reset() {
|
||||
*st = Stat{}
|
||||
}
|
62
vendor/github.com/siddontang/ledisdb/store/store.go
generated
vendored
62
vendor/github.com/siddontang/ledisdb/store/store.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
|
||||
_ "github.com/siddontang/ledisdb/store/goleveldb"
|
||||
_ "github.com/siddontang/ledisdb/store/leveldb"
|
||||
_ "github.com/siddontang/ledisdb/store/rocksdb"
|
||||
)
|
||||
|
||||
func getStorePath(cfg *config.Config) string {
|
||||
if len(cfg.DBPath) > 0 {
|
||||
return cfg.DBPath
|
||||
} else {
|
||||
return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName))
|
||||
}
|
||||
}
|
||||
|
||||
func Open(cfg *config.Config) (*DB, error) {
|
||||
s, err := driver.GetStore(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path := getStorePath(cfg)
|
||||
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idb, err := s.Open(path, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := new(DB)
|
||||
db.db = idb
|
||||
db.name = s.String()
|
||||
db.st = &Stat{}
|
||||
db.cfg = cfg
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func Repair(cfg *config.Config) error {
|
||||
s, err := driver.GetStore(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := getStorePath(cfg)
|
||||
|
||||
return s.Repair(path, cfg)
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
136
vendor/github.com/siddontang/ledisdb/store/writebatch.go
generated
vendored
136
vendor/github.com/siddontang/ledisdb/store/writebatch.go
generated
vendored
@ -1,136 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/siddontang/ledisdb/store/driver"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
type WriteBatch struct {
|
||||
wb driver.IWriteBatch
|
||||
st *Stat
|
||||
|
||||
putNum int64
|
||||
deleteNum int64
|
||||
db *DB
|
||||
|
||||
data *BatchData
|
||||
}
|
||||
|
||||
func (wb *WriteBatch) Close() {
|
||||
wb.wb.Close()
|
||||
}
|
||||
|
||||
func (wb *WriteBatch) Put(key []byte, value []byte) {
|
||||
wb.putNum++
|
||||
wb.wb.Put(key, value)
|
||||
}
|
||||
|
||||
func (wb *WriteBatch) Delete(key []byte) {
|
||||
wb.deleteNum++
|
||||
wb.wb.Delete(key)
|
||||
}
|
||||
|
||||
func (wb *WriteBatch) Commit() error {
|
||||
wb.st.BatchCommitNum.Add(1)
|
||||
wb.st.PutNum.Add(wb.putNum)
|
||||
wb.st.DeleteNum.Add(wb.deleteNum)
|
||||
wb.putNum = 0
|
||||
wb.deleteNum = 0
|
||||
|
||||
var err error
|
||||
t := time.Now()
|
||||
if wb.db == nil || !wb.db.needSyncCommit() {
|
||||
err = wb.wb.Commit()
|
||||
} else {
|
||||
err = wb.wb.SyncCommit()
|
||||
}
|
||||
|
||||
wb.st.BatchCommitTotalTime.Add(time.Now().Sub(t))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (wb *WriteBatch) Rollback() error {
|
||||
wb.putNum = 0
|
||||
wb.deleteNum = 0
|
||||
|
||||
return wb.wb.Rollback()
|
||||
}
|
||||
|
||||
// the data will be undefined after commit or rollback
|
||||
func (wb *WriteBatch) BatchData() *BatchData {
|
||||
data := wb.wb.Data()
|
||||
if wb.data == nil {
|
||||
wb.data = new(BatchData)
|
||||
}
|
||||
|
||||
wb.data.Load(data)
|
||||
return wb.data
|
||||
}
|
||||
|
||||
func (wb *WriteBatch) Data() []byte {
|
||||
b := wb.BatchData()
|
||||
return b.Data()
|
||||
}
|
||||
|
||||
/*
|
||||
see leveldb batch data format for more information
|
||||
*/
|
||||
|
||||
type BatchData struct {
|
||||
leveldb.Batch
|
||||
}
|
||||
|
||||
func NewBatchData(data []byte) (*BatchData, error) {
|
||||
b := new(BatchData)
|
||||
|
||||
if err := b.Load(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (d *BatchData) Data() []byte {
|
||||
return d.Dump()
|
||||
}
|
||||
|
||||
func (d *BatchData) Reset() {
|
||||
d.Batch.Reset()
|
||||
}
|
||||
|
||||
type BatchDataReplay interface {
|
||||
Put(key, value []byte)
|
||||
Delete(key []byte)
|
||||
}
|
||||
|
||||
type BatchItem struct {
|
||||
Key []byte
|
||||
Value []byte
|
||||
}
|
||||
|
||||
type batchItems []BatchItem
|
||||
|
||||
func (bs *batchItems) Put(key, value []byte) {
|
||||
*bs = append(*bs, BatchItem{key, value})
|
||||
}
|
||||
|
||||
func (bs *batchItems) Delete(key []byte) {
|
||||
*bs = append(*bs, BatchItem{key, nil})
|
||||
}
|
||||
|
||||
func (d *BatchData) Replay(r BatchDataReplay) error {
|
||||
return d.Batch.Replay(r)
|
||||
}
|
||||
|
||||
func (d *BatchData) Items() ([]BatchItem, error) {
|
||||
is := make(batchItems, 0, d.Len())
|
||||
|
||||
if err := d.Replay(&is); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []BatchItem(is), nil
|
||||
}
|
21
vendor/github.com/siddontang/rdb/LICENSE
generated
vendored
21
vendor/github.com/siddontang/rdb/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 siddontang
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
128
vendor/github.com/siddontang/rdb/decode.go
generated
vendored
128
vendor/github.com/siddontang/rdb/decode.go
generated
vendored
@ -1,128 +0,0 @@
|
||||
package rdb
|
||||
|
||||
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
|
||||
// Licensed under the MIT (MIT-LICENSE.txt) license.
|
||||
|
||||
import "fmt"
|
||||
|
||||
import (
|
||||
"github.com/cupcake/rdb"
|
||||
"github.com/cupcake/rdb/nopdecoder"
|
||||
)
|
||||
|
||||
func DecodeDump(p []byte) (interface{}, error) {
|
||||
d := &decoder{}
|
||||
if err := rdb.DecodeDump(p, 0, nil, 0, d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.obj, d.err
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
nopdecoder.NopDecoder
|
||||
obj interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (d *decoder) initObject(obj interface{}) {
|
||||
if d.err != nil {
|
||||
return
|
||||
}
|
||||
if d.obj != nil {
|
||||
d.err = fmt.Errorf("invalid object, init again")
|
||||
} else {
|
||||
d.obj = obj
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) Set(key, value []byte, expiry int64) {
|
||||
d.initObject(String(value))
|
||||
}
|
||||
|
||||
func (d *decoder) StartHash(key []byte, length, expiry int64) {
|
||||
d.initObject(Hash(nil))
|
||||
}
|
||||
|
||||
func (d *decoder) Hset(key, field, value []byte) {
|
||||
if d.err != nil {
|
||||
return
|
||||
}
|
||||
switch h := d.obj.(type) {
|
||||
default:
|
||||
d.err = fmt.Errorf("invalid object, not a hashmap")
|
||||
case Hash:
|
||||
v := struct {
|
||||
Field, Value []byte
|
||||
}{
|
||||
field,
|
||||
value,
|
||||
}
|
||||
d.obj = append(h, v)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) StartSet(key []byte, cardinality, expiry int64) {
|
||||
d.initObject(Set(nil))
|
||||
}
|
||||
|
||||
func (d *decoder) Sadd(key, member []byte) {
|
||||
if d.err != nil {
|
||||
return
|
||||
}
|
||||
switch s := d.obj.(type) {
|
||||
default:
|
||||
d.err = fmt.Errorf("invalid object, not a set")
|
||||
case Set:
|
||||
d.obj = append(s, member)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) StartList(key []byte, length, expiry int64) {
|
||||
d.initObject(List(nil))
|
||||
}
|
||||
|
||||
func (d *decoder) Rpush(key, value []byte) {
|
||||
if d.err != nil {
|
||||
return
|
||||
}
|
||||
switch l := d.obj.(type) {
|
||||
default:
|
||||
d.err = fmt.Errorf("invalid object, not a list")
|
||||
case List:
|
||||
d.obj = append(l, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) StartZSet(key []byte, cardinality, expiry int64) {
|
||||
d.initObject(ZSet(nil))
|
||||
}
|
||||
|
||||
func (d *decoder) Zadd(key []byte, score float64, member []byte) {
|
||||
if d.err != nil {
|
||||
return
|
||||
}
|
||||
switch z := d.obj.(type) {
|
||||
default:
|
||||
d.err = fmt.Errorf("invalid object, not a zset")
|
||||
case ZSet:
|
||||
v := struct {
|
||||
Member []byte
|
||||
Score float64
|
||||
}{
|
||||
member,
|
||||
score,
|
||||
}
|
||||
d.obj = append(z, v)
|
||||
}
|
||||
}
|
||||
|
||||
type String []byte
|
||||
type List [][]byte
|
||||
type Hash []struct {
|
||||
Field, Value []byte
|
||||
}
|
||||
type Set [][]byte
|
||||
type ZSet []struct {
|
||||
Member []byte
|
||||
Score float64
|
||||
}
|
106
vendor/github.com/siddontang/rdb/digest.go
generated
vendored
106
vendor/github.com/siddontang/rdb/digest.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
|
||||
// Licensed under the MIT (MIT-LICENSE.txt) license.
|
||||
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
var crc64_table = [256]uint64{
|
||||
0x0000000000000000, 0x7ad870c830358979, 0xf5b0e190606b12f2, 0x8f689158505e9b8b,
|
||||
0xc038e5739841b68f, 0xbae095bba8743ff6, 0x358804e3f82aa47d, 0x4f50742bc81f2d04,
|
||||
0xab28ecb46814fe75, 0xd1f09c7c5821770c, 0x5e980d24087fec87, 0x24407dec384a65fe,
|
||||
0x6b1009c7f05548fa, 0x11c8790fc060c183, 0x9ea0e857903e5a08, 0xe478989fa00bd371,
|
||||
0x7d08ff3b88be6f81, 0x07d08ff3b88be6f8, 0x88b81eabe8d57d73, 0xf2606e63d8e0f40a,
|
||||
0xbd301a4810ffd90e, 0xc7e86a8020ca5077, 0x4880fbd87094cbfc, 0x32588b1040a14285,
|
||||
0xd620138fe0aa91f4, 0xacf86347d09f188d, 0x2390f21f80c18306, 0x594882d7b0f40a7f,
|
||||
0x1618f6fc78eb277b, 0x6cc0863448deae02, 0xe3a8176c18803589, 0x997067a428b5bcf0,
|
||||
0xfa11fe77117cdf02, 0x80c98ebf2149567b, 0x0fa11fe77117cdf0, 0x75796f2f41224489,
|
||||
0x3a291b04893d698d, 0x40f16bccb908e0f4, 0xcf99fa94e9567b7f, 0xb5418a5cd963f206,
|
||||
0x513912c379682177, 0x2be1620b495da80e, 0xa489f35319033385, 0xde51839b2936bafc,
|
||||
0x9101f7b0e12997f8, 0xebd98778d11c1e81, 0x64b116208142850a, 0x1e6966e8b1770c73,
|
||||
0x8719014c99c2b083, 0xfdc17184a9f739fa, 0x72a9e0dcf9a9a271, 0x08719014c99c2b08,
|
||||
0x4721e43f0183060c, 0x3df994f731b68f75, 0xb29105af61e814fe, 0xc849756751dd9d87,
|
||||
0x2c31edf8f1d64ef6, 0x56e99d30c1e3c78f, 0xd9810c6891bd5c04, 0xa3597ca0a188d57d,
|
||||
0xec09088b6997f879, 0x96d1784359a27100, 0x19b9e91b09fcea8b, 0x636199d339c963f2,
|
||||
0xdf7adabd7a6e2d6f, 0xa5a2aa754a5ba416, 0x2aca3b2d1a053f9d, 0x50124be52a30b6e4,
|
||||
0x1f423fcee22f9be0, 0x659a4f06d21a1299, 0xeaf2de5e82448912, 0x902aae96b271006b,
|
||||
0x74523609127ad31a, 0x0e8a46c1224f5a63, 0x81e2d7997211c1e8, 0xfb3aa75142244891,
|
||||
0xb46ad37a8a3b6595, 0xceb2a3b2ba0eecec, 0x41da32eaea507767, 0x3b024222da65fe1e,
|
||||
0xa2722586f2d042ee, 0xd8aa554ec2e5cb97, 0x57c2c41692bb501c, 0x2d1ab4dea28ed965,
|
||||
0x624ac0f56a91f461, 0x1892b03d5aa47d18, 0x97fa21650afae693, 0xed2251ad3acf6fea,
|
||||
0x095ac9329ac4bc9b, 0x7382b9faaaf135e2, 0xfcea28a2faafae69, 0x8632586aca9a2710,
|
||||
0xc9622c4102850a14, 0xb3ba5c8932b0836d, 0x3cd2cdd162ee18e6, 0x460abd1952db919f,
|
||||
0x256b24ca6b12f26d, 0x5fb354025b277b14, 0xd0dbc55a0b79e09f, 0xaa03b5923b4c69e6,
|
||||
0xe553c1b9f35344e2, 0x9f8bb171c366cd9b, 0x10e3202993385610, 0x6a3b50e1a30ddf69,
|
||||
0x8e43c87e03060c18, 0xf49bb8b633338561, 0x7bf329ee636d1eea, 0x012b592653589793,
|
||||
0x4e7b2d0d9b47ba97, 0x34a35dc5ab7233ee, 0xbbcbcc9dfb2ca865, 0xc113bc55cb19211c,
|
||||
0x5863dbf1e3ac9dec, 0x22bbab39d3991495, 0xadd33a6183c78f1e, 0xd70b4aa9b3f20667,
|
||||
0x985b3e827bed2b63, 0xe2834e4a4bd8a21a, 0x6debdf121b863991, 0x1733afda2bb3b0e8,
|
||||
0xf34b37458bb86399, 0x8993478dbb8deae0, 0x06fbd6d5ebd3716b, 0x7c23a61ddbe6f812,
|
||||
0x3373d23613f9d516, 0x49aba2fe23cc5c6f, 0xc6c333a67392c7e4, 0xbc1b436e43a74e9d,
|
||||
0x95ac9329ac4bc9b5, 0xef74e3e19c7e40cc, 0x601c72b9cc20db47, 0x1ac40271fc15523e,
|
||||
0x5594765a340a7f3a, 0x2f4c0692043ff643, 0xa02497ca54616dc8, 0xdafce7026454e4b1,
|
||||
0x3e847f9dc45f37c0, 0x445c0f55f46abeb9, 0xcb349e0da4342532, 0xb1eceec59401ac4b,
|
||||
0xfebc9aee5c1e814f, 0x8464ea266c2b0836, 0x0b0c7b7e3c7593bd, 0x71d40bb60c401ac4,
|
||||
0xe8a46c1224f5a634, 0x927c1cda14c02f4d, 0x1d148d82449eb4c6, 0x67ccfd4a74ab3dbf,
|
||||
0x289c8961bcb410bb, 0x5244f9a98c8199c2, 0xdd2c68f1dcdf0249, 0xa7f41839ecea8b30,
|
||||
0x438c80a64ce15841, 0x3954f06e7cd4d138, 0xb63c61362c8a4ab3, 0xcce411fe1cbfc3ca,
|
||||
0x83b465d5d4a0eece, 0xf96c151de49567b7, 0x76048445b4cbfc3c, 0x0cdcf48d84fe7545,
|
||||
0x6fbd6d5ebd3716b7, 0x15651d968d029fce, 0x9a0d8ccedd5c0445, 0xe0d5fc06ed698d3c,
|
||||
0xaf85882d2576a038, 0xd55df8e515432941, 0x5a3569bd451db2ca, 0x20ed197575283bb3,
|
||||
0xc49581ead523e8c2, 0xbe4df122e51661bb, 0x3125607ab548fa30, 0x4bfd10b2857d7349,
|
||||
0x04ad64994d625e4d, 0x7e7514517d57d734, 0xf11d85092d094cbf, 0x8bc5f5c11d3cc5c6,
|
||||
0x12b5926535897936, 0x686de2ad05bcf04f, 0xe70573f555e26bc4, 0x9ddd033d65d7e2bd,
|
||||
0xd28d7716adc8cfb9, 0xa85507de9dfd46c0, 0x273d9686cda3dd4b, 0x5de5e64efd965432,
|
||||
0xb99d7ed15d9d8743, 0xc3450e196da80e3a, 0x4c2d9f413df695b1, 0x36f5ef890dc31cc8,
|
||||
0x79a59ba2c5dc31cc, 0x037deb6af5e9b8b5, 0x8c157a32a5b7233e, 0xf6cd0afa9582aa47,
|
||||
0x4ad64994d625e4da, 0x300e395ce6106da3, 0xbf66a804b64ef628, 0xc5bed8cc867b7f51,
|
||||
0x8aeeace74e645255, 0xf036dc2f7e51db2c, 0x7f5e4d772e0f40a7, 0x05863dbf1e3ac9de,
|
||||
0xe1fea520be311aaf, 0x9b26d5e88e0493d6, 0x144e44b0de5a085d, 0x6e963478ee6f8124,
|
||||
0x21c640532670ac20, 0x5b1e309b16452559, 0xd476a1c3461bbed2, 0xaeaed10b762e37ab,
|
||||
0x37deb6af5e9b8b5b, 0x4d06c6676eae0222, 0xc26e573f3ef099a9, 0xb8b627f70ec510d0,
|
||||
0xf7e653dcc6da3dd4, 0x8d3e2314f6efb4ad, 0x0256b24ca6b12f26, 0x788ec2849684a65f,
|
||||
0x9cf65a1b368f752e, 0xe62e2ad306bafc57, 0x6946bb8b56e467dc, 0x139ecb4366d1eea5,
|
||||
0x5ccebf68aecec3a1, 0x2616cfa09efb4ad8, 0xa97e5ef8cea5d153, 0xd3a62e30fe90582a,
|
||||
0xb0c7b7e3c7593bd8, 0xca1fc72bf76cb2a1, 0x45775673a732292a, 0x3faf26bb9707a053,
|
||||
0x70ff52905f188d57, 0x0a2722586f2d042e, 0x854fb3003f739fa5, 0xff97c3c80f4616dc,
|
||||
0x1bef5b57af4dc5ad, 0x61372b9f9f784cd4, 0xee5fbac7cf26d75f, 0x9487ca0fff135e26,
|
||||
0xdbd7be24370c7322, 0xa10fceec0739fa5b, 0x2e675fb4576761d0, 0x54bf2f7c6752e8a9,
|
||||
0xcdcf48d84fe75459, 0xb71738107fd2dd20, 0x387fa9482f8c46ab, 0x42a7d9801fb9cfd2,
|
||||
0x0df7adabd7a6e2d6, 0x772fdd63e7936baf, 0xf8474c3bb7cdf024, 0x829f3cf387f8795d,
|
||||
0x66e7a46c27f3aa2c, 0x1c3fd4a417c62355, 0x935745fc4798b8de, 0xe98f353477ad31a7,
|
||||
0xa6df411fbfb21ca3, 0xdc0731d78f8795da, 0x536fa08fdfd90e51, 0x29b7d047efec8728}
|
||||
|
||||
type digest struct {
|
||||
crc uint64
|
||||
}
|
||||
|
||||
func (d *digest) update(p []byte) {
|
||||
for _, b := range p {
|
||||
d.crc = crc64_table[byte(d.crc)^b] ^ (d.crc >> 8)
|
||||
}
|
||||
}
|
||||
|
||||
func newDigest() hash.Hash64 {
|
||||
d := &digest{}
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *digest) Write(p []byte) (int, error) {
|
||||
d.update(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (d *digest) Sum(in []byte) []byte {
|
||||
buf := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(buf, d.crc)
|
||||
return append(in, buf...)
|
||||
}
|
||||
|
||||
func (d *digest) Sum64() uint64 { return d.crc }
|
||||
func (d *digest) BlockSize() int { return 1 }
|
||||
func (d *digest) Size() int { return 8 }
|
||||
func (d *digest) Reset() { d.crc = 0 }
|
52
vendor/github.com/siddontang/rdb/encode.go
generated
vendored
52
vendor/github.com/siddontang/rdb/encode.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/cupcake/rdb"
|
||||
)
|
||||
|
||||
func Dump(obj interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
e := rdb.NewEncoder(&buf)
|
||||
|
||||
switch v := obj.(type) {
|
||||
case String:
|
||||
e.EncodeType(rdb.TypeString)
|
||||
e.EncodeString(v)
|
||||
case Hash:
|
||||
e.EncodeType(rdb.TypeHash)
|
||||
e.EncodeLength(uint32(len(v)))
|
||||
|
||||
for i := 0; i < len(v); i++ {
|
||||
e.EncodeString(v[i].Field)
|
||||
e.EncodeString(v[i].Value)
|
||||
}
|
||||
case List:
|
||||
e.EncodeType(rdb.TypeList)
|
||||
e.EncodeLength(uint32(len(v)))
|
||||
for i := 0; i < len(v); i++ {
|
||||
e.EncodeString(v[i])
|
||||
}
|
||||
case Set:
|
||||
e.EncodeType(rdb.TypeSet)
|
||||
e.EncodeLength(uint32(len(v)))
|
||||
for i := 0; i < len(v); i++ {
|
||||
e.EncodeString(v[i])
|
||||
}
|
||||
case ZSet:
|
||||
e.EncodeType(rdb.TypeZSet)
|
||||
e.EncodeLength(uint32(len(v)))
|
||||
for i := 0; i < len(v); i++ {
|
||||
e.EncodeString(v[i].Member)
|
||||
e.EncodeFloat(v[i].Score)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid dump type %T", obj)
|
||||
}
|
||||
|
||||
e.EncodeDumpFooter()
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
112
vendor/github.com/siddontang/rdb/loader.go
generated
vendored
112
vendor/github.com/siddontang/rdb/loader.go
generated
vendored
@ -1,112 +0,0 @@
|
||||
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
|
||||
// Licensed under the MIT (MIT-LICENSE.txt) license.
|
||||
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Loader struct {
|
||||
*rdbReader
|
||||
crc hash.Hash64
|
||||
db uint32
|
||||
}
|
||||
|
||||
func NewLoader(r io.Reader) *Loader {
|
||||
l := &Loader{}
|
||||
l.crc = newDigest()
|
||||
l.rdbReader = newRdbReader(io.TeeReader(r, l.crc))
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *Loader) LoadHeader() error {
|
||||
header := make([]byte, 9)
|
||||
if err := l.readFull(header); err != nil {
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(header[:5], []byte("REDIS")) {
|
||||
return fmt.Errorf("verify magic string, invalid file format")
|
||||
}
|
||||
if version, err := strconv.ParseInt(string(header[5:]), 10, 64); err != nil {
|
||||
return err
|
||||
} else if version <= 0 || version > Version {
|
||||
return fmt.Errorf("verify version, invalid RDB version number %d", version)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Loader) LoadChecksum() error {
|
||||
crc1 := l.crc.Sum64()
|
||||
if crc2, err := l.readUint64(); err != nil {
|
||||
return err
|
||||
} else if crc1 != crc2 {
|
||||
return fmt.Errorf("checksum validation failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
DB uint32
|
||||
Key []byte
|
||||
ValDump []byte
|
||||
ExpireAt uint64
|
||||
}
|
||||
|
||||
func (l *Loader) LoadEntry() (entry *Entry, err error) {
|
||||
var expireat uint64
|
||||
for {
|
||||
var otype byte
|
||||
if otype, err = l.readByte(); err != nil {
|
||||
return
|
||||
}
|
||||
switch otype {
|
||||
case rdbFlagExpiryMS:
|
||||
if expireat, err = l.readUint64(); err != nil {
|
||||
return
|
||||
}
|
||||
case rdbFlagExpiry:
|
||||
var sec uint32
|
||||
if sec, err = l.readUint32(); err != nil {
|
||||
return
|
||||
}
|
||||
expireat = uint64(sec) * 1000
|
||||
case rdbFlagSelectDB:
|
||||
if l.db, err = l.readLength(); err != nil {
|
||||
return
|
||||
}
|
||||
case rdbFlagEOF:
|
||||
return
|
||||
default:
|
||||
var key, obj []byte
|
||||
if key, err = l.readString(); err != nil {
|
||||
return
|
||||
}
|
||||
if obj, err = l.readObject(otype); err != nil {
|
||||
return
|
||||
}
|
||||
entry = &Entry{}
|
||||
entry.DB = l.db
|
||||
entry.Key = key
|
||||
entry.ValDump = createValDump(otype, obj)
|
||||
entry.ExpireAt = expireat
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createValDump(otype byte, obj []byte) []byte {
|
||||
var b bytes.Buffer
|
||||
c := newDigest()
|
||||
w := io.MultiWriter(&b, c)
|
||||
w.Write([]byte{otype})
|
||||
w.Write(obj)
|
||||
binary.Write(w, binary.LittleEndian, uint16(Version))
|
||||
binary.Write(w, binary.LittleEndian, c.Sum64())
|
||||
return b.Bytes()
|
||||
}
|
332
vendor/github.com/siddontang/rdb/reader.go
generated
vendored
332
vendor/github.com/siddontang/rdb/reader.go
generated
vendored
@ -1,332 +0,0 @@
|
||||
// Copyright 2014 Wandoujia Inc. All Rights Reserved.
|
||||
// Licensed under the MIT (MIT-LICENSE.txt) license.
|
||||
|
||||
package rdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
Version = 6
|
||||
)
|
||||
|
||||
const (
|
||||
rdbTypeString = 0
|
||||
rdbTypeList = 1
|
||||
rdbTypeSet = 2
|
||||
rdbTypeZSet = 3
|
||||
rdbTypeHash = 4
|
||||
|
||||
rdbTypeHashZipmap = 9
|
||||
rdbTypeListZiplist = 10
|
||||
rdbTypeSetIntset = 11
|
||||
rdbTypeZSetZiplist = 12
|
||||
rdbTypeHashZiplist = 13
|
||||
|
||||
rdbFlagExpiryMS = 0xfc
|
||||
rdbFlagExpiry = 0xfd
|
||||
rdbFlagSelectDB = 0xfe
|
||||
rdbFlagEOF = 0xff
|
||||
)
|
||||
|
||||
const (
|
||||
rdb6bitLen = 0
|
||||
rdb14bitLen = 1
|
||||
rdb32bitLen = 2
|
||||
rdbEncVal = 3
|
||||
|
||||
rdbEncInt8 = 0
|
||||
rdbEncInt16 = 1
|
||||
rdbEncInt32 = 2
|
||||
rdbEncLZF = 3
|
||||
|
||||
rdbZiplist6bitlenString = 0
|
||||
rdbZiplist14bitlenString = 1
|
||||
rdbZiplist32bitlenString = 2
|
||||
|
||||
rdbZiplistInt16 = 0xc0
|
||||
rdbZiplistInt32 = 0xd0
|
||||
rdbZiplistInt64 = 0xe0
|
||||
rdbZiplistInt24 = 0xf0
|
||||
rdbZiplistInt8 = 0xfe
|
||||
rdbZiplistInt4 = 15
|
||||
)
|
||||
|
||||
type rdbReader struct {
|
||||
raw io.Reader
|
||||
buf [8]byte
|
||||
nread int64
|
||||
}
|
||||
|
||||
func newRdbReader(r io.Reader) *rdbReader {
|
||||
return &rdbReader{raw: r}
|
||||
}
|
||||
|
||||
func (r *rdbReader) Read(p []byte) (int, error) {
|
||||
n, err := r.raw.Read(p)
|
||||
r.nread += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *rdbReader) offset() int64 {
|
||||
return r.nread
|
||||
}
|
||||
|
||||
func (r *rdbReader) readObject(otype byte) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
r = newRdbReader(io.TeeReader(r, &b))
|
||||
switch otype {
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown object-type %02x", otype)
|
||||
case rdbTypeHashZipmap:
|
||||
fallthrough
|
||||
case rdbTypeListZiplist:
|
||||
fallthrough
|
||||
case rdbTypeSetIntset:
|
||||
fallthrough
|
||||
case rdbTypeZSetZiplist:
|
||||
fallthrough
|
||||
case rdbTypeHashZiplist:
|
||||
fallthrough
|
||||
case rdbTypeString:
|
||||
if _, err := r.readString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case rdbTypeList, rdbTypeSet:
|
||||
if n, err := r.readLength(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
for i := 0; i < int(n); i++ {
|
||||
if _, err := r.readString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case rdbTypeZSet:
|
||||
if n, err := r.readLength(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
for i := 0; i < int(n); i++ {
|
||||
if _, err := r.readString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := r.readFloat(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
case rdbTypeHash:
|
||||
if n, err := r.readLength(); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
for i := 0; i < int(n); i++ {
|
||||
if _, err := r.readString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := r.readString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (r *rdbReader) readString() ([]byte, error) {
|
||||
length, encoded, err := r.readEncodedLength()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !encoded {
|
||||
return r.readBytes(int(length))
|
||||
}
|
||||
switch t := uint8(length); t {
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid encoded-string %02x", t)
|
||||
case rdbEncInt8:
|
||||
i, err := r.readInt8()
|
||||
return []byte(strconv.FormatInt(int64(i), 10)), err
|
||||
case rdbEncInt16:
|
||||
i, err := r.readInt16()
|
||||
return []byte(strconv.FormatInt(int64(i), 10)), err
|
||||
case rdbEncInt32:
|
||||
i, err := r.readInt32()
|
||||
return []byte(strconv.FormatInt(int64(i), 10)), err
|
||||
case rdbEncLZF:
|
||||
var inlen, outlen uint32
|
||||
if inlen, err = r.readLength(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if outlen, err = r.readLength(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if in, err := r.readBytes(int(inlen)); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return lzfDecompress(in, int(outlen))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rdbReader) readEncodedLength() (length uint32, encoded bool, err error) {
|
||||
var u uint8
|
||||
if u, err = r.readUint8(); err != nil {
|
||||
return
|
||||
}
|
||||
length = uint32(u & 0x3f)
|
||||
switch u >> 6 {
|
||||
case rdb6bitLen:
|
||||
case rdb14bitLen:
|
||||
u, err = r.readUint8()
|
||||
length = (length << 8) + uint32(u)
|
||||
case rdbEncVal:
|
||||
encoded = true
|
||||
default:
|
||||
length, err = r.readUint32BigEndian()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *rdbReader) readLength() (uint32, error) {
|
||||
length, encoded, err := r.readEncodedLength()
|
||||
if err == nil && encoded {
|
||||
err = fmt.Errorf("encoded-length")
|
||||
}
|
||||
return length, err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readFloat() (float64, error) {
|
||||
u, err := r.readUint8()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch u {
|
||||
case 253:
|
||||
return math.NaN(), nil
|
||||
case 254:
|
||||
return math.Inf(0), nil
|
||||
case 255:
|
||||
return math.Inf(-1), nil
|
||||
default:
|
||||
if b, err := r.readBytes(int(u)); err != nil {
|
||||
return 0, err
|
||||
} else {
|
||||
v, err := strconv.ParseFloat(string(b), 64)
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rdbReader) readByte() (byte, error) {
|
||||
b := r.buf[:1]
|
||||
_, err := r.Read(b)
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readFull(p []byte) error {
|
||||
_, err := io.ReadFull(r, p)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readBytes(n int) ([]byte, error) {
|
||||
p := make([]byte, n)
|
||||
return p, r.readFull(p)
|
||||
}
|
||||
|
||||
func (r *rdbReader) readUint8() (uint8, error) {
|
||||
b, err := r.readByte()
|
||||
return uint8(b), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readUint16() (uint16, error) {
|
||||
b := r.buf[:2]
|
||||
err := r.readFull(b)
|
||||
return binary.LittleEndian.Uint16(b), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readUint32() (uint32, error) {
|
||||
b := r.buf[:4]
|
||||
err := r.readFull(b)
|
||||
return binary.LittleEndian.Uint32(b), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readUint64() (uint64, error) {
|
||||
b := r.buf[:8]
|
||||
err := r.readFull(b)
|
||||
return binary.LittleEndian.Uint64(b), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readUint32BigEndian() (uint32, error) {
|
||||
b := r.buf[:4]
|
||||
err := r.readFull(b)
|
||||
return binary.BigEndian.Uint32(b), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readInt8() (int8, error) {
|
||||
u, err := r.readUint8()
|
||||
return int8(u), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readInt16() (int16, error) {
|
||||
u, err := r.readUint16()
|
||||
return int16(u), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readInt32() (int32, error) {
|
||||
u, err := r.readUint32()
|
||||
return int32(u), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readInt64() (int64, error) {
|
||||
u, err := r.readUint64()
|
||||
return int64(u), err
|
||||
}
|
||||
|
||||
func (r *rdbReader) readInt32BigEndian() (int32, error) {
|
||||
u, err := r.readUint32BigEndian()
|
||||
return int32(u), err
|
||||
}
|
||||
|
||||
func lzfDecompress(in []byte, outlen int) (out []byte, err error) {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
err = fmt.Errorf("decompress exception: %v", x)
|
||||
}
|
||||
}()
|
||||
out = make([]byte, outlen)
|
||||
i, o := 0, 0
|
||||
for i < len(in) {
|
||||
ctrl := int(in[i])
|
||||
i++
|
||||
if ctrl < 32 {
|
||||
for x := 0; x <= ctrl; x++ {
|
||||
out[o] = in[i]
|
||||
i++
|
||||
o++
|
||||
}
|
||||
} else {
|
||||
length := ctrl >> 5
|
||||
if length == 7 {
|
||||
length = length + int(in[i])
|
||||
i++
|
||||
}
|
||||
ref := o - ((ctrl & 0x1f) << 8) - int(in[i]) - 1
|
||||
i++
|
||||
for x := 0; x <= length+1; x++ {
|
||||
out[o] = out[ref]
|
||||
ref++
|
||||
o++
|
||||
}
|
||||
}
|
||||
}
|
||||
if o != outlen {
|
||||
return nil, fmt.Errorf("decompress length is %d != expected %d", o, outlen)
|
||||
}
|
||||
return out, nil
|
||||
}
|
Reference in New Issue
Block a user