1
0
mirror of https://github.com/astaxie/beego.git synced 2025-07-02 02:30:19 +00:00

update mod

This commit is contained in:
astaxie
2018-11-09 12:37:28 +08:00
parent 9fdc1eaf3a
commit 5ea04bdfd3
548 changed files with 339257 additions and 46 deletions

View File

@ -0,0 +1,170 @@
# LedisDB configuration
# Server listen address
addr = "0.0.0.0:6380"
# Unix socket permissions, 755 by default.
# Ignored for tcp socket.
addr_unixsocketperm = "0770"
# Server http listen address, set empty to disable
http_addr = "0.0.0.0:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/datastore"
# Set the number of databases. You can use `select dbindex` to choose a db.
# dbindex must be in [0, databases - 1].
# Default databases is 16, maximum is 10240 now.
databases = 16
# Log server command, set empty to disable
access_log = ""
# Set slaveof to enable replication from master, empty, no replication
# Any write operations except flushall and replication will be disabled in slave mode.
slaveof = ""
# Readonly mode, slave server is always readonly even readonly = false
# for readonly mode, only replication and flushall can write
readonly = false
# Choose which backend storage to use, now support:
#
# leveldb
# rocksdb
# goleveldb
# memory
#
db_name = "leveldb"
# If not set, use data_dir/"db_name"_data
db_path = ""
# Sync commit to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
db_sync_commit = 0
# enable replication or not
use_replication = false
# set connection buffer, you can increase them appropriately
# more size, more memory used
conn_read_buffer_size = 10240
conn_write_buffer_size = 10240
# if connection receives no data after n seconds, it may be dead, close
# 0 to disable and not check
conn_keepalive_interval = 0
# checking TTL (time to live) data every n seconds
# if you set big, the expired data may not be deleted immediately
ttl_check_interval = 1
[leveldb]
# for leveldb and goleveldb
compression = false
block_size = 32768
write_buffer_size = 67108864
cache_size = 524288000
max_open_files = 1024
max_file_size = 33554432
[rocksdb]
# rocksdb has many many configurations,
# we only list little now, but may add more later.
# good luck!
# 0:no, 1:snappy, 2:zlib, 3:bz2, 4:lz4, 5:lz4hc
compression = 0
block_size = 65536
write_buffer_size = 134217728
cache_size = 1073741824
max_open_files = 1024
max_write_buffer_num = 6
min_write_buffer_number_to_merge = 2
num_levels = 7
level0_file_num_compaction_trigger = 8
level0_slowdown_writes_trigger = 16
level0_stop_writes_trigger = 64
target_file_size_base = 67108864
target_file_size_multiplier = 1
max_bytes_for_level_base = 536870912
max_bytes_for_level_multiplier = 8
disable_auto_compactions = false
disable_data_sync = false
use_fsync = false
background_theads = 16
high_priority_background_threads = 1
max_background_compactions = 15
max_background_flushes = 1
allow_os_buffer = true
enable_statistics = false
stats_dump_period_sec = 3600
# dangerous to set true, write may got lost after a crash
# you can set true if replication opened, we may recover from replication log,
# but it is still not a easy work.
disable_wal = false
max_manifest_file_size = 20971520
[lmdb]
map_size = 524288000
nosync = true
[replication]
# Path to store replication information(write ahead log, commit log, etc.)
# if not set, use data_dir/rpl
path = ""
# If sync is true, the new log must be sent to some slaves, and then commit.
# It will reduce performance but have better high availability.
sync = false
# If sync is true, wait at last wait_sync_time milliseconds for slave syncing this log
wait_sync_time = 500
# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok.
# n is slave number
# If 0, wait (n + 1) / 2 acks.
wait_max_slave_acks = 2
# store name: file, goleveldb
# change in runtime is very dangerous
store_name = "file"
# Expire write ahead logs after the given days
expired_log_days = 7
# for file store, if 0, use default 256MB, max is 1G
max_log_file_size = 0
# for file store, if 0, use default 50
max_log_file_num = 0
# for file store, use mmap for file read and write
use_mmap = true
# Sync log to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
sync_log = 0
# Compress the log or not
compression = false
[snapshot]
# Path to store snapshot dump file
# if not set, use data_dir/snapshot
# snapshot file name format is dmp-2006-01-02T15:04:05.999999999
path = ""
# Reserve newest max_num snapshot dump files
max_num = 1
[tls]
enabled = false
certificate = "test.crt"
key = "test.key"

316
vendor/github.com/siddontang/ledisdb/config/config.go generated vendored Normal file
View File

@ -0,0 +1,316 @@
package config
import (
"bytes"
"errors"
"io"
"io/ioutil"
"sync"
"fmt"
"github.com/pelletier/go-toml"
"github.com/siddontang/go/ioutil2"
)
var (
ErrNoConfigFile = errors.New("Running without a config file")
)
const (
DefaultAddr string = "127.0.0.1:6380"
DefaultDBName string = "goleveldb"
DefaultDataDir string = "./var"
KB int = 1024
MB int = KB * 1024
GB int = MB * 1024
)
type LevelDBConfig struct {
Compression bool `toml:"compression"`
BlockSize int `toml:"block_size"`
WriteBufferSize int `toml:"write_buffer_size"`
CacheSize int `toml:"cache_size"`
MaxOpenFiles int `toml:"max_open_files"`
MaxFileSize int `toml:"max_file_size"`
}
type RocksDBConfig struct {
Compression int `toml:"compression"`
BlockSize int `toml:"block_size"`
WriteBufferSize int `toml:"write_buffer_size"`
CacheSize int `toml:"cache_size"`
MaxOpenFiles int `toml:"max_open_files"`
MaxWriteBufferNum int `toml:"max_write_buffer_num"`
MinWriteBufferNumberToMerge int `toml:"min_write_buffer_number_to_merge"`
NumLevels int `toml:"num_levels"`
Level0FileNumCompactionTrigger int `toml:"level0_file_num_compaction_trigger"`
Level0SlowdownWritesTrigger int `toml:"level0_slowdown_writes_trigger"`
Level0StopWritesTrigger int `toml:"level0_stop_writes_trigger"`
TargetFileSizeBase int `toml:"target_file_size_base"`
TargetFileSizeMultiplier int `toml:"target_file_size_multiplier"`
MaxBytesForLevelBase int `toml:"max_bytes_for_level_base"`
MaxBytesForLevelMultiplier int `toml:"max_bytes_for_level_multiplier"`
DisableAutoCompactions bool `toml:"disable_auto_compactions"`
UseFsync bool `toml:"use_fsync"`
MaxBackgroundCompactions int `toml:"max_background_compactions"`
MaxBackgroundFlushes int `toml:"max_background_flushes"`
EnableStatistics bool `toml:"enable_statistics"`
StatsDumpPeriodSec int `toml:"stats_dump_period_sec"`
BackgroundThreads int `toml:"background_theads"`
HighPriorityBackgroundThreads int `toml:"high_priority_background_threads"`
DisableWAL bool `toml:"disable_wal"`
MaxManifestFileSize int `toml:"max_manifest_file_size"`
}
type LMDBConfig struct {
MapSize int `toml:"map_size"`
NoSync bool `toml:"nosync"`
}
type ReplicationConfig struct {
Path string `toml:"path"`
Sync bool `toml:"sync"`
WaitSyncTime int `toml:"wait_sync_time"`
WaitMaxSlaveAcks int `toml:"wait_max_slave_acks"`
ExpiredLogDays int `toml:"expired_log_days"`
StoreName string `toml:"store_name"`
MaxLogFileSize int64 `toml:"max_log_file_size"`
MaxLogFileNum int `toml:"max_log_file_num"`
SyncLog int `toml:"sync_log"`
Compression bool `toml:"compression"`
UseMmap bool `toml:"use_mmap"`
MasterPassword string `toml:"master_password"`
}
type SnapshotConfig struct {
Path string `toml:"path"`
MaxNum int `toml:"max_num"`
}
type TLS struct {
Enabled bool `toml:"enabled"`
Certificate string `toml:"certificate"`
Key string `toml:"key"`
}
type AuthMethod func(c *Config, password string) bool
type Config struct {
m sync.RWMutex `toml:"-"`
AuthPassword string `toml:"auth_password"`
//AuthMethod custom authentication method
AuthMethod AuthMethod `toml:"-"`
FileName string `toml:"-"`
// Addr can be empty to assign a local address dynamically
Addr string `toml:"addr"`
AddrUnixSocketPerm string `toml:"addr_unixsocketperm"`
HttpAddr string `toml:"http_addr"`
SlaveOf string `toml:"slaveof"`
Readonly bool `toml:readonly`
DataDir string `toml:"data_dir"`
Databases int `toml:"databases"`
DBName string `toml:"db_name"`
DBPath string `toml:"db_path"`
DBSyncCommit int `toml:"db_sync_commit"`
LevelDB LevelDBConfig `toml:"leveldb"`
RocksDB RocksDBConfig `toml:"rocksdb"`
LMDB LMDBConfig `toml:"lmdb"`
AccessLog string `toml:"access_log"`
UseReplication bool `toml:"use_replication"`
Replication ReplicationConfig `toml:"replication"`
Snapshot SnapshotConfig `toml:"snapshot"`
ConnReadBufferSize int `toml:"conn_read_buffer_size"`
ConnWriteBufferSize int `toml:"conn_write_buffer_size"`
ConnKeepaliveInterval int `toml:"conn_keepalive_interval"`
TTLCheckInterval int `toml:"ttl_check_interval"`
//tls config
TLS TLS `toml:"tls"`
}
func NewConfigWithFile(fileName string) (*Config, error) {
data, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
cfg, err := NewConfigWithData(data)
if err != nil {
return nil, err
}
cfg.FileName = fileName
return cfg, nil
}
func NewConfigWithData(data []byte) (*Config, error) {
cfg := NewConfigDefault()
if err := toml.Unmarshal(data, cfg); err != nil {
return nil, fmt.Errorf("newConfigwithData: unmarashal: %s", err)
}
cfg.adjust()
return cfg, nil
}
func NewConfigDefault() *Config {
cfg := new(Config)
cfg.Addr = DefaultAddr
cfg.HttpAddr = ""
cfg.DataDir = DefaultDataDir
cfg.DBName = DefaultDBName
cfg.SlaveOf = ""
cfg.Readonly = false
// Disable Auth by default, by setting password to blank
cfg.AuthPassword = ""
// default databases number
cfg.Databases = 16
// disable access log
cfg.AccessLog = ""
cfg.LMDB.MapSize = 20 * MB
cfg.LMDB.NoSync = true
cfg.UseReplication = false
cfg.Replication.WaitSyncTime = 500
cfg.Replication.Compression = true
cfg.Replication.WaitMaxSlaveAcks = 2
cfg.Replication.SyncLog = 0
cfg.Replication.UseMmap = true
cfg.Snapshot.MaxNum = 1
cfg.RocksDB.EnableStatistics = false
cfg.RocksDB.UseFsync = false
cfg.RocksDB.DisableAutoCompactions = false
cfg.RocksDB.DisableWAL = false
cfg.adjust()
return cfg
}
func getDefault(d int, s int) int {
if s <= 0 {
return d
}
return s
}
func (cfg *Config) adjust() {
cfg.LevelDB.adjust()
cfg.RocksDB.adjust()
cfg.Replication.ExpiredLogDays = getDefault(7, cfg.Replication.ExpiredLogDays)
cfg.Replication.MaxLogFileNum = getDefault(50, cfg.Replication.MaxLogFileNum)
cfg.ConnReadBufferSize = getDefault(4*KB, cfg.ConnReadBufferSize)
cfg.ConnWriteBufferSize = getDefault(4*KB, cfg.ConnWriteBufferSize)
cfg.TTLCheckInterval = getDefault(1, cfg.TTLCheckInterval)
cfg.Databases = getDefault(16, cfg.Databases)
}
func (cfg *LevelDBConfig) adjust() {
cfg.CacheSize = getDefault(4*MB, cfg.CacheSize)
cfg.BlockSize = getDefault(4*KB, cfg.BlockSize)
cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize)
cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles)
cfg.MaxFileSize = getDefault(32*MB, cfg.MaxFileSize)
}
func (cfg *RocksDBConfig) adjust() {
cfg.CacheSize = getDefault(4*MB, cfg.CacheSize)
cfg.BlockSize = getDefault(4*KB, cfg.BlockSize)
cfg.WriteBufferSize = getDefault(4*MB, cfg.WriteBufferSize)
cfg.MaxOpenFiles = getDefault(1024, cfg.MaxOpenFiles)
cfg.MaxWriteBufferNum = getDefault(2, cfg.MaxWriteBufferNum)
cfg.MinWriteBufferNumberToMerge = getDefault(1, cfg.MinWriteBufferNumberToMerge)
cfg.NumLevels = getDefault(7, cfg.NumLevels)
cfg.Level0FileNumCompactionTrigger = getDefault(4, cfg.Level0FileNumCompactionTrigger)
cfg.Level0SlowdownWritesTrigger = getDefault(16, cfg.Level0SlowdownWritesTrigger)
cfg.Level0StopWritesTrigger = getDefault(64, cfg.Level0StopWritesTrigger)
cfg.TargetFileSizeBase = getDefault(32*MB, cfg.TargetFileSizeBase)
cfg.TargetFileSizeMultiplier = getDefault(1, cfg.TargetFileSizeMultiplier)
cfg.MaxBytesForLevelBase = getDefault(32*MB, cfg.MaxBytesForLevelBase)
cfg.MaxBytesForLevelMultiplier = getDefault(1, cfg.MaxBytesForLevelMultiplier)
cfg.MaxBackgroundCompactions = getDefault(1, cfg.MaxBackgroundCompactions)
cfg.MaxBackgroundFlushes = getDefault(1, cfg.MaxBackgroundFlushes)
cfg.StatsDumpPeriodSec = getDefault(3600, cfg.StatsDumpPeriodSec)
cfg.BackgroundThreads = getDefault(2, cfg.BackgroundThreads)
cfg.HighPriorityBackgroundThreads = getDefault(1, cfg.HighPriorityBackgroundThreads)
cfg.MaxManifestFileSize = getDefault(20*MB, cfg.MaxManifestFileSize)
}
func (cfg *Config) Dump(w io.Writer) error {
data, err := toml.Marshal(*cfg)
if err != nil {
return err
}
if _, err := w.Write(data); err != nil {
return err
}
return nil
}
func (cfg *Config) DumpFile(fileName string) error {
var b bytes.Buffer
if err := cfg.Dump(&b); err != nil {
return err
}
return ioutil2.WriteFileAtomic(fileName, b.Bytes(), 0644)
}
func (cfg *Config) Rewrite() error {
if len(cfg.FileName) == 0 {
return ErrNoConfigFile
}
return cfg.DumpFile(cfg.FileName)
}
func (cfg *Config) GetReadonly() bool {
cfg.m.RLock()
b := cfg.Readonly
cfg.m.RUnlock()
return b
}
func (cfg *Config) SetReadonly(b bool) {
cfg.m.Lock()
cfg.Readonly = b
cfg.m.Unlock()
}

170
vendor/github.com/siddontang/ledisdb/config/config.toml generated vendored Normal file
View File

@ -0,0 +1,170 @@
# LedisDB configuration
# Server listen address
addr = "127.0.0.1:6380"
# Unix socket permissions, 755 by default.
# Ignored for tcp socket.
addr_unixsocketperm = "0770"
# Server http listen address, set empty to disable
http_addr = "127.0.0.1:11181"
# Data store path, all ledisdb's data will be saved here
data_dir = "/tmp/ledis_server"
# Set the number of databases. You can use `select dbindex` to choose a db.
# dbindex must be in [0, databases - 1].
# Default databases is 16, maximum is 10240 now.
databases = 16
# Log server command, set empty to disable
access_log = ""
# Set slaveof to enable replication from master, empty, no replication
# Any write operations except flushall and replication will be disabled in slave mode.
slaveof = ""
# Readonly mode, slave server is always readonly even readonly = false
# for readonly mode, only replication and flushall can write
readonly = false
# Choose which backend storage to use, now support:
#
# leveldb
# rocksdb
# goleveldb
# memory
#
db_name = "leveldb"
# If not set, use data_dir/"db_name"_data
db_path = ""
# Sync commit to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
db_sync_commit = 0
# enable replication or not
use_replication = false
# set connection buffer, you can increase them appropriately
# more size, more memory used
conn_read_buffer_size = 10240
conn_write_buffer_size = 10240
# if connection receives no data after n seconds, it may be dead, close
# 0 to disable and not check
conn_keepalive_interval = 0
# checking TTL (time to live) data every n seconds
# if you set big, the expired data may not be deleted immediately
ttl_check_interval = 1
[leveldb]
# for leveldb and goleveldb
compression = false
block_size = 32768
write_buffer_size = 67108864
cache_size = 524288000
max_open_files = 1024
max_file_size = 33554432
[rocksdb]
# rocksdb has many many configurations,
# we only list little now, but may add more later.
# good luck!
# 0:no, 1:snappy, 2:zlib, 3:bz2, 4:lz4, 5:lz4hc
compression = 0
block_size = 65536
write_buffer_size = 134217728
cache_size = 1073741824
max_open_files = 1024
max_write_buffer_num = 6
min_write_buffer_number_to_merge = 2
num_levels = 7
level0_file_num_compaction_trigger = 8
level0_slowdown_writes_trigger = 16
level0_stop_writes_trigger = 64
target_file_size_base = 67108864
target_file_size_multiplier = 1
max_bytes_for_level_base = 536870912
max_bytes_for_level_multiplier = 8
disable_auto_compactions = false
disable_data_sync = false
use_fsync = false
background_theads = 16
high_priority_background_threads = 1
max_background_compactions = 15
max_background_flushes = 1
allow_os_buffer = true
enable_statistics = false
stats_dump_period_sec = 3600
# dangerous to set true, write may got lost after a crash
# you can set true if replication opened, we may recover from replication log,
# but it is still not a easy work.
disable_wal = false
max_manifest_file_size = 20971520
[lmdb]
map_size = 524288000
nosync = true
[replication]
# Path to store replication information(write ahead log, commit log, etc.)
# if not set, use data_dir/rpl
path = ""
# If sync is true, the new log must be sent to some slaves, and then commit.
# It will reduce performance but have better high availability.
sync = false
# If sync is true, wait at last wait_sync_time milliseconds for slave syncing this log
wait_sync_time = 500
# If sync is true, wait at most min(wait_max_slave_acks, (n + 1) / 2) to promise syncing ok.
# n is slave number
# If 0, wait (n + 1) / 2 acks.
wait_max_slave_acks = 2
# store name: file, goleveldb
# change in runtime is very dangerous
store_name = "file"
# Expire write ahead logs after the given days
expired_log_days = 7
# for file store, if 0, use default 256MB, max is 1G
max_log_file_size = 0
# for file store, if 0, use default 50
max_log_file_num = 0
# for file store, use mmap for file read and write
use_mmap = true
# Sync log to disk if possible
# 0: no sync
# 1: sync every second
# 2: sync every commit
sync_log = 0
# Compress the log or not
compression = false
[snapshot]
# Path to store snapshot dump file
# if not set, use data_dir/snapshot
# snapshot file name format is dmp-2006-01-02T15:04:05.999999999
path = ""
# Reserve newest max_num snapshot dump files
max_num = 1
[tls]
enabled = true
certificate = "test.crt"
key = "test.key"