mirror of
https://github.com/astaxie/beego.git
synced 2024-12-18 04:20:51 +00:00
commit
8ef8fd2606
@ -36,8 +36,7 @@ install:
|
||||
- go get github.com/beego/goyaml2
|
||||
- go get gopkg.in/yaml.v2
|
||||
- go get github.com/belogik/goes
|
||||
- go get github.com/siddontang/ledisdb/config
|
||||
- go get github.com/siddontang/ledisdb/ledis
|
||||
- go get github.com/ledisdb/ledisdb
|
||||
- go get github.com/ssdb/gossdb/ssdb
|
||||
- go get github.com/cloudflare/golz4
|
||||
- go get github.com/gogo/protobuf/proto
|
||||
@ -49,7 +48,7 @@ install:
|
||||
- go get -u honnef.co/go/tools/cmd/staticcheck
|
||||
- go get -u github.com/mdempsky/unconvert
|
||||
- go get -u github.com/gordonklaus/ineffassign
|
||||
- go get -u github.com/golang/lint/golint
|
||||
- go get -u golang.org/x/lint/golint
|
||||
- go get -u github.com/go-redis/redis
|
||||
before_script:
|
||||
- psql --version
|
||||
|
@ -33,6 +33,8 @@ Congratulations! You've just built your first **beego** app.
|
||||
|
||||
###### Please see [Documentation](http://beego.me/docs) for more.
|
||||
|
||||
###### [beego-example](https://github.com/beego-dev/beego-example)
|
||||
|
||||
## Features
|
||||
|
||||
* RESTful support
|
||||
|
53
admin.go
53
admin.go
@ -24,6 +24,8 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/astaxie/beego/grace"
|
||||
"github.com/astaxie/beego/logs"
|
||||
"github.com/astaxie/beego/toolbox"
|
||||
@ -55,12 +57,14 @@ func init() {
|
||||
beeAdminApp = &adminApp{
|
||||
routers: make(map[string]http.HandlerFunc),
|
||||
}
|
||||
// keep in mind that all data should be html escaped to avoid XSS attack
|
||||
beeAdminApp.Route("/", adminIndex)
|
||||
beeAdminApp.Route("/qps", qpsIndex)
|
||||
beeAdminApp.Route("/prof", profIndex)
|
||||
beeAdminApp.Route("/healthcheck", healthcheck)
|
||||
beeAdminApp.Route("/task", taskStatus)
|
||||
beeAdminApp.Route("/listconf", listConf)
|
||||
beeAdminApp.Route("/metrics", promhttp.Handler().ServeHTTP)
|
||||
FilterMonitorFunc = func(string, string, time.Duration, string, int) bool { return true }
|
||||
}
|
||||
|
||||
@ -105,8 +109,8 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
|
||||
case "conf":
|
||||
m := make(M)
|
||||
list("BConfig", BConfig, m)
|
||||
m["AppConfigPath"] = appConfigPath
|
||||
m["AppConfigProvider"] = appConfigProvider
|
||||
m["AppConfigPath"] = template.HTMLEscapeString(appConfigPath)
|
||||
m["AppConfigProvider"] = template.HTMLEscapeString(appConfigProvider)
|
||||
tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl))
|
||||
tmpl = template.Must(tmpl.Parse(configTpl))
|
||||
tmpl = template.Must(tmpl.Parse(defaultScriptsTpl))
|
||||
@ -151,8 +155,9 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
|
||||
resultList := new([][]string)
|
||||
for _, f := range bf {
|
||||
var result = []string{
|
||||
f.pattern,
|
||||
utils.GetFuncName(f.filterFunc),
|
||||
// void xss
|
||||
template.HTMLEscapeString(f.pattern),
|
||||
template.HTMLEscapeString(utils.GetFuncName(f.filterFunc)),
|
||||
}
|
||||
*resultList = append(*resultList, result)
|
||||
}
|
||||
@ -207,8 +212,8 @@ func PrintTree() M {
|
||||
|
||||
printTree(resultList, t)
|
||||
|
||||
methods = append(methods, method)
|
||||
methodsData[method] = resultList
|
||||
methods = append(methods, template.HTMLEscapeString(method))
|
||||
methodsData[template.HTMLEscapeString(method)] = resultList
|
||||
}
|
||||
|
||||
content["Data"] = methodsData
|
||||
@ -227,21 +232,21 @@ func printTree(resultList *[][]string, t *Tree) {
|
||||
if v, ok := l.runObject.(*ControllerInfo); ok {
|
||||
if v.routerType == routerTypeBeego {
|
||||
var result = []string{
|
||||
v.pattern,
|
||||
fmt.Sprintf("%s", v.methods),
|
||||
v.controllerType.String(),
|
||||
template.HTMLEscapeString(v.pattern),
|
||||
template.HTMLEscapeString(fmt.Sprintf("%s", v.methods)),
|
||||
template.HTMLEscapeString(v.controllerType.String()),
|
||||
}
|
||||
*resultList = append(*resultList, result)
|
||||
} else if v.routerType == routerTypeRESTFul {
|
||||
var result = []string{
|
||||
v.pattern,
|
||||
fmt.Sprintf("%s", v.methods),
|
||||
template.HTMLEscapeString(v.pattern),
|
||||
template.HTMLEscapeString(fmt.Sprintf("%s", v.methods)),
|
||||
"",
|
||||
}
|
||||
*resultList = append(*resultList, result)
|
||||
} else if v.routerType == routerTypeHandler {
|
||||
var result = []string{
|
||||
v.pattern,
|
||||
template.HTMLEscapeString(v.pattern),
|
||||
"",
|
||||
"",
|
||||
}
|
||||
@ -266,7 +271,7 @@ func profIndex(rw http.ResponseWriter, r *http.Request) {
|
||||
result bytes.Buffer
|
||||
)
|
||||
toolbox.ProcessInput(command, &result)
|
||||
data["Content"] = result.String()
|
||||
data["Content"] = template.HTMLEscapeString(result.String())
|
||||
|
||||
if format == "json" && command == "gc summary" {
|
||||
dataJSON, err := json.Marshal(data)
|
||||
@ -280,7 +285,7 @@ func profIndex(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
data["Title"] = command
|
||||
data["Title"] = template.HTMLEscapeString(command)
|
||||
defaultTpl := defaultScriptsTpl
|
||||
if command == "gc summary" {
|
||||
defaultTpl = gcAjaxTpl
|
||||
@ -304,13 +309,13 @@ func healthcheck(rw http.ResponseWriter, _ *http.Request) {
|
||||
if err := h.Check(); err != nil {
|
||||
result = []string{
|
||||
"error",
|
||||
name,
|
||||
err.Error(),
|
||||
template.HTMLEscapeString(name),
|
||||
template.HTMLEscapeString(err.Error()),
|
||||
}
|
||||
} else {
|
||||
result = []string{
|
||||
"success",
|
||||
name,
|
||||
template.HTMLEscapeString(name),
|
||||
"OK",
|
||||
}
|
||||
}
|
||||
@ -334,11 +339,11 @@ func taskStatus(rw http.ResponseWriter, req *http.Request) {
|
||||
if taskname != "" {
|
||||
if t, ok := toolbox.AdminTaskList[taskname]; ok {
|
||||
if err := t.Run(); err != nil {
|
||||
data["Message"] = []string{"error", fmt.Sprintf("%s", err)}
|
||||
data["Message"] = []string{"error", template.HTMLEscapeString(fmt.Sprintf("%s", err))}
|
||||
}
|
||||
data["Message"] = []string{"success", fmt.Sprintf("%s run success,Now the Status is <br>%s", taskname, t.GetStatus())}
|
||||
data["Message"] = []string{"success", template.HTMLEscapeString(fmt.Sprintf("%s run success,Now the Status is <br>%s", taskname, t.GetStatus()))}
|
||||
} else {
|
||||
data["Message"] = []string{"warning", fmt.Sprintf("there's no task which named: %s", taskname)}
|
||||
data["Message"] = []string{"warning", template.HTMLEscapeString(fmt.Sprintf("there's no task which named: %s", taskname))}
|
||||
}
|
||||
}
|
||||
|
||||
@ -354,10 +359,10 @@ func taskStatus(rw http.ResponseWriter, req *http.Request) {
|
||||
}
|
||||
for tname, tk := range toolbox.AdminTaskList {
|
||||
result := []string{
|
||||
tname,
|
||||
tk.GetSpec(),
|
||||
tk.GetStatus(),
|
||||
tk.GetPrev().String(),
|
||||
template.HTMLEscapeString(tname),
|
||||
template.HTMLEscapeString(tk.GetSpec()),
|
||||
template.HTMLEscapeString(tk.GetStatus()),
|
||||
template.HTMLEscapeString(tk.GetPrev().String()),
|
||||
}
|
||||
*resultList = append(*resultList, result)
|
||||
}
|
||||
|
@ -52,6 +52,8 @@ func oldMap() M {
|
||||
m["BConfig.WebConfig.DirectoryIndex"] = BConfig.WebConfig.DirectoryIndex
|
||||
m["BConfig.WebConfig.StaticDir"] = BConfig.WebConfig.StaticDir
|
||||
m["BConfig.WebConfig.StaticExtensionsToGzip"] = BConfig.WebConfig.StaticExtensionsToGzip
|
||||
m["BConfig.WebConfig.StaticCacheFileSize"] = BConfig.WebConfig.StaticCacheFileSize
|
||||
m["BConfig.WebConfig.StaticCacheFileNum"] = BConfig.WebConfig.StaticCacheFileNum
|
||||
m["BConfig.WebConfig.TemplateLeft"] = BConfig.WebConfig.TemplateLeft
|
||||
m["BConfig.WebConfig.TemplateRight"] = BConfig.WebConfig.TemplateRight
|
||||
m["BConfig.WebConfig.ViewsPath"] = BConfig.WebConfig.ViewsPath
|
||||
|
9
app.go
9
app.go
@ -123,14 +123,13 @@ func (app *App) Run(mws ...MiddleWare) {
|
||||
httpsAddr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort)
|
||||
app.Server.Addr = httpsAddr
|
||||
}
|
||||
server := grace.NewServer(httpsAddr, app.Handlers)
|
||||
server := grace.NewServer(httpsAddr, app.Server.Handler)
|
||||
server.Server.ReadTimeout = app.Server.ReadTimeout
|
||||
server.Server.WriteTimeout = app.Server.WriteTimeout
|
||||
if BConfig.Listen.EnableMutualHTTPS {
|
||||
if err := server.ListenAndServeMutualTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile, BConfig.Listen.TrustCaFile); err != nil {
|
||||
logs.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid()))
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
endRunning <- true
|
||||
}
|
||||
} else {
|
||||
if BConfig.Listen.AutoTLS {
|
||||
@ -145,14 +144,14 @@ func (app *App) Run(mws ...MiddleWare) {
|
||||
if err := server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil {
|
||||
logs.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid()))
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
endRunning <- true
|
||||
}
|
||||
}
|
||||
endRunning <- true
|
||||
}()
|
||||
}
|
||||
if BConfig.Listen.EnableHTTP {
|
||||
go func() {
|
||||
server := grace.NewServer(addr, app.Handlers)
|
||||
server := grace.NewServer(addr, app.Server.Handler)
|
||||
server.Server.ReadTimeout = app.Server.ReadTimeout
|
||||
server.Server.WriteTimeout = app.Server.WriteTimeout
|
||||
if BConfig.Listen.ListenTCP4 {
|
||||
@ -161,8 +160,8 @@ func (app *App) Run(mws ...MiddleWare) {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
logs.Critical("ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid()))
|
||||
time.Sleep(100 * time.Microsecond)
|
||||
endRunning <- true
|
||||
}
|
||||
endRunning <- true
|
||||
}()
|
||||
}
|
||||
<-endRunning
|
||||
|
2
beego.go
2
beego.go
@ -23,7 +23,7 @@ import (
|
||||
|
||||
const (
|
||||
// VERSION represent beego web framework version.
|
||||
VERSION = "1.12.1"
|
||||
VERSION = "1.12.2"
|
||||
|
||||
// DEV is for develop
|
||||
DEV = "dev"
|
||||
|
27
build_info.go
Normal file
27
build_info.go
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2020 astaxie
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package beego
|
||||
|
||||
var (
|
||||
BuildVersion string
|
||||
BuildGitRevision string
|
||||
BuildStatus string
|
||||
BuildTag string
|
||||
BuildTime string
|
||||
|
||||
GoVersion string
|
||||
|
||||
GitBranch string
|
||||
)
|
3
cache/memory.go
vendored
3
cache/memory.go
vendored
@ -218,9 +218,12 @@ func (bc *MemoryCache) vacuum() {
|
||||
}
|
||||
for {
|
||||
<-time.After(bc.dur)
|
||||
bc.RLock()
|
||||
if bc.items == nil {
|
||||
bc.RUnlock()
|
||||
return
|
||||
}
|
||||
bc.RUnlock()
|
||||
if keys := bc.expiredKeys(); len(keys) != 0 {
|
||||
bc.clearItems(keys)
|
||||
}
|
||||
|
49
cache/redis/redis.go
vendored
49
cache/redis/redis.go
vendored
@ -55,6 +55,9 @@ type Cache struct {
|
||||
key string
|
||||
password string
|
||||
maxIdle int
|
||||
|
||||
//the timeout to a value less than the redis server's timeout.
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewRedisCache create new redis cache with default collection name.
|
||||
@ -137,12 +140,12 @@ func (rc *Cache) Decr(key string) error {
|
||||
|
||||
// ClearAll clean all cache in redis. delete this redis collection.
|
||||
func (rc *Cache) ClearAll() error {
|
||||
c := rc.p.Get()
|
||||
defer c.Close()
|
||||
cachedKeys, err := redis.Strings(c.Do("KEYS", rc.key+":*"))
|
||||
cachedKeys, err := rc.Scan(rc.key + ":*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c := rc.p.Get()
|
||||
defer c.Close()
|
||||
for _, str := range cachedKeys {
|
||||
if _, err = c.Do("DEL", str); err != nil {
|
||||
return err
|
||||
@ -151,6 +154,35 @@ func (rc *Cache) ClearAll() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Scan scan all keys matching the pattern. a better choice than `keys`
|
||||
func (rc *Cache) Scan(pattern string) (keys []string, err error) {
|
||||
c := rc.p.Get()
|
||||
defer c.Close()
|
||||
var (
|
||||
cursor uint64 = 0 // start
|
||||
result []interface{}
|
||||
list []string
|
||||
)
|
||||
for {
|
||||
result, err = redis.Values(c.Do("SCAN", cursor, "MATCH", pattern, "COUNT", 1024))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
list, err = redis.Strings(result[1], nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
keys = append(keys, list...)
|
||||
cursor, err = redis.Uint64(result[0], nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cursor == 0 { // over
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StartAndGC start redis cache adapter.
|
||||
// config is like {"key":"collection key","conn":"connection info","dbNum":"0"}
|
||||
// the cache item in redis are stored forever,
|
||||
@ -182,12 +214,21 @@ func (rc *Cache) StartAndGC(config string) error {
|
||||
if _, ok := cf["maxIdle"]; !ok {
|
||||
cf["maxIdle"] = "3"
|
||||
}
|
||||
if _, ok := cf["timeout"]; !ok {
|
||||
cf["timeout"] = "180s"
|
||||
}
|
||||
rc.key = cf["key"]
|
||||
rc.conninfo = cf["conn"]
|
||||
rc.dbNum, _ = strconv.Atoi(cf["dbNum"])
|
||||
rc.password = cf["password"]
|
||||
rc.maxIdle, _ = strconv.Atoi(cf["maxIdle"])
|
||||
|
||||
if v, err := time.ParseDuration(cf["timeout"]); err == nil {
|
||||
rc.timeout = v
|
||||
} else {
|
||||
rc.timeout = 180 * time.Second
|
||||
}
|
||||
|
||||
rc.connectInit()
|
||||
|
||||
c := rc.p.Get()
|
||||
@ -221,7 +262,7 @@ func (rc *Cache) connectInit() {
|
||||
// initialize a new pool
|
||||
rc.p = &redis.Pool{
|
||||
MaxIdle: rc.maxIdle,
|
||||
IdleTimeout: 180 * time.Second,
|
||||
IdleTimeout: rc.timeout,
|
||||
Dial: dialFunc,
|
||||
}
|
||||
}
|
||||
|
38
cache/redis/redis_test.go
vendored
38
cache/redis/redis_test.go
vendored
@ -15,6 +15,7 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -104,3 +105,40 @@ func TestRedisCache(t *testing.T) {
|
||||
t.Error("clear all err")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_Scan(t *testing.T) {
|
||||
timeoutDuration := 10 * time.Second
|
||||
// init
|
||||
bm, err := cache.NewCache("redis", `{"conn": "127.0.0.1:6379"}`)
|
||||
if err != nil {
|
||||
t.Error("init err")
|
||||
}
|
||||
// insert all
|
||||
for i := 0; i < 10000; i++ {
|
||||
if err = bm.Put(fmt.Sprintf("astaxie%d", i), fmt.Sprintf("author%d", i), timeoutDuration); err != nil {
|
||||
t.Error("set Error", err)
|
||||
}
|
||||
}
|
||||
// scan all for the first time
|
||||
keys, err := bm.(*Cache).Scan(DefaultKey + ":*")
|
||||
if err != nil {
|
||||
t.Error("scan Error", err)
|
||||
}
|
||||
if len(keys) != 10000 {
|
||||
t.Error("scan all err")
|
||||
}
|
||||
|
||||
// clear all
|
||||
if err = bm.ClearAll(); err != nil {
|
||||
t.Error("clear all err")
|
||||
}
|
||||
|
||||
// scan all for the second time
|
||||
keys, err = bm.(*Cache).Scan(DefaultKey + ":*")
|
||||
if err != nil {
|
||||
t.Error("scan Error", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Error("scan all err")
|
||||
}
|
||||
}
|
||||
|
22
config.go
22
config.go
@ -81,6 +81,8 @@ type WebConfig struct {
|
||||
DirectoryIndex bool
|
||||
StaticDir map[string]string
|
||||
StaticExtensionsToGzip []string
|
||||
StaticCacheFileSize int
|
||||
StaticCacheFileNum int
|
||||
TemplateLeft string
|
||||
TemplateRight string
|
||||
ViewsPath string
|
||||
@ -129,6 +131,8 @@ var (
|
||||
appConfigPath string
|
||||
// appConfigProvider is the provider for the config, default is ini
|
||||
appConfigProvider = "ini"
|
||||
// WorkPath is the absolute path to project root directory
|
||||
WorkPath string
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -137,7 +141,7 @@ func init() {
|
||||
if AppPath, err = filepath.Abs(filepath.Dir(os.Args[0])); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
workPath, err := os.Getwd()
|
||||
WorkPath, err = os.Getwd()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@ -145,7 +149,7 @@ func init() {
|
||||
if os.Getenv("BEEGO_RUNMODE") != "" {
|
||||
filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf"
|
||||
}
|
||||
appConfigPath = filepath.Join(workPath, "conf", filename)
|
||||
appConfigPath = filepath.Join(WorkPath, "conf", filename)
|
||||
if !utils.FileExists(appConfigPath) {
|
||||
appConfigPath = filepath.Join(AppPath, "conf", filename)
|
||||
if !utils.FileExists(appConfigPath) {
|
||||
@ -236,6 +240,8 @@ func newBConfig() *Config {
|
||||
DirectoryIndex: false,
|
||||
StaticDir: map[string]string{"/static": "static"},
|
||||
StaticExtensionsToGzip: []string{".css", ".js"},
|
||||
StaticCacheFileSize: 1024 * 100,
|
||||
StaticCacheFileNum: 1000,
|
||||
TemplateLeft: "{{",
|
||||
TemplateRight: "}}",
|
||||
ViewsPath: "views",
|
||||
@ -317,6 +323,14 @@ func assignConfig(ac config.Configer) error {
|
||||
}
|
||||
}
|
||||
|
||||
if sfs, err := ac.Int("StaticCacheFileSize"); err == nil {
|
||||
BConfig.WebConfig.StaticCacheFileSize = sfs
|
||||
}
|
||||
|
||||
if sfn, err := ac.Int("StaticCacheFileNum"); err == nil {
|
||||
BConfig.WebConfig.StaticCacheFileNum = sfn
|
||||
}
|
||||
|
||||
if lo := ac.String("LogOutputs"); lo != "" {
|
||||
// if lo is not nil or empty
|
||||
// means user has set his own LogOutputs
|
||||
@ -408,9 +422,9 @@ func newAppConfig(appConfigProvider, appConfigPath string) (*beegoAppConfig, err
|
||||
|
||||
func (b *beegoAppConfig) Set(key, val string) error {
|
||||
if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil {
|
||||
return err
|
||||
return b.innerConfig.Set(key, val)
|
||||
}
|
||||
return b.innerConfig.Set(key, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *beegoAppConfig) String(key string) string {
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
@ -94,8 +95,10 @@ func (c *JSONConfigContainer) Int(key string) (int, error) {
|
||||
if val != nil {
|
||||
if v, ok := val.(float64); ok {
|
||||
return int(v), nil
|
||||
} else if v, ok := val.(string); ok {
|
||||
return strconv.Atoi(v)
|
||||
}
|
||||
return 0, errors.New("not int value")
|
||||
return 0, errors.New("not valid value")
|
||||
}
|
||||
return 0, errors.New("not exist key:" + key)
|
||||
}
|
||||
|
@ -115,6 +115,8 @@ func TestAssignConfig_03(t *testing.T) {
|
||||
ac.Set("RunMode", "online")
|
||||
ac.Set("StaticDir", "download:down download2:down2")
|
||||
ac.Set("StaticExtensionsToGzip", ".css,.js,.html,.jpg,.png")
|
||||
ac.Set("StaticCacheFileSize", "87456")
|
||||
ac.Set("StaticCacheFileNum", "1254")
|
||||
assignConfig(ac)
|
||||
|
||||
t.Logf("%#v", BConfig)
|
||||
@ -132,6 +134,12 @@ func TestAssignConfig_03(t *testing.T) {
|
||||
if BConfig.WebConfig.StaticDir["/download2"] != "down2" {
|
||||
t.FailNow()
|
||||
}
|
||||
if BConfig.WebConfig.StaticCacheFileSize != 87456 {
|
||||
t.FailNow()
|
||||
}
|
||||
if BConfig.WebConfig.StaticCacheFileNum != 1254 {
|
||||
t.FailNow()
|
||||
}
|
||||
if len(BConfig.WebConfig.StaticExtensionsToGzip) != 5 {
|
||||
t.FailNow()
|
||||
}
|
||||
|
@ -71,7 +71,9 @@ func (input *BeegoInput) Reset(ctx *Context) {
|
||||
input.CruSession = nil
|
||||
input.pnames = input.pnames[:0]
|
||||
input.pvalues = input.pvalues[:0]
|
||||
input.dataLock.Lock()
|
||||
input.data = nil
|
||||
input.dataLock.Unlock()
|
||||
input.RequestBody = []byte{}
|
||||
}
|
||||
|
||||
@ -87,7 +89,7 @@ func (input *BeegoInput) URI() string {
|
||||
|
||||
// URL returns request url path (without query string, fragment).
|
||||
func (input *BeegoInput) URL() string {
|
||||
return input.Context.Request.URL.Path
|
||||
return input.Context.Request.URL.EscapedPath()
|
||||
}
|
||||
|
||||
// Site returns base site url as scheme://domain type.
|
||||
@ -282,6 +284,11 @@ func (input *BeegoInput) ParamsLen() int {
|
||||
func (input *BeegoInput) Param(key string) string {
|
||||
for i, v := range input.pnames {
|
||||
if v == key && i <= len(input.pvalues) {
|
||||
// we cannot use url.PathEscape(input.pvalues[i])
|
||||
// for example, if the value is /a/b
|
||||
// after url.PathEscape(input.pvalues[i]), the value is %2Fa%2Fb
|
||||
// However, the value is used in ControllerRegister.ServeHTTP
|
||||
// and split by "/", so function crash...
|
||||
return input.pvalues[i]
|
||||
}
|
||||
}
|
||||
|
23
go.mod
23
go.mod
@ -2,36 +2,35 @@ module github.com/astaxie/beego
|
||||
|
||||
require (
|
||||
github.com/Knetic/govaluate v3.0.0+incompatible // indirect
|
||||
github.com/OwnLocal/goes v1.0.0
|
||||
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd
|
||||
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737
|
||||
github.com/casbin/casbin v1.7.0
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58
|
||||
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb
|
||||
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c // indirect
|
||||
github.com/couchbase/go-couchbase v0.0.0-20200519150804-63f3cdb75e0d
|
||||
github.com/couchbase/gomemcached v0.0.0-20200526233749-ec430f949808 // indirect
|
||||
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect
|
||||
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 // indirect
|
||||
github.com/elastic/go-elasticsearch/v6 v6.8.5
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0
|
||||
github.com/go-redis/redis v6.14.2+incompatible
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/go-sql-driver/mysql v1.5.0
|
||||
github.com/gogo/protobuf v1.1.1
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
||||
github.com/gomodule/redigo v2.0.0+incompatible
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/pelletier/go-toml v1.2.0 // indirect
|
||||
github.com/pkg/errors v0.8.0 // indirect
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
|
||||
github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373
|
||||
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect
|
||||
github.com/prometheus/client_golang v1.7.0
|
||||
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644
|
||||
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c // indirect
|
||||
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b // indirect
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
|
||||
golang.org/x/tools v0.0.0-20200117065230-39095c1d176c
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
)
|
||||
|
||||
replace golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 => github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85
|
||||
|
181
go.sum
181
go.sum
@ -1,80 +1,217 @@
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg=
|
||||
github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/OwnLocal/goes v1.0.0/go.mod h1:8rIFjBGTue3lCU0wplczcUgt9Gxgrkkrw7etMIcn8TM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd h1:jZtX5jh5IOMu0fpOTC3ayh6QGSPJ/KWOv1lgPvbRw1M=
|
||||
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
|
||||
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542 h1:nYXb+3jF6Oq/j8R/y90XrKpreCxIalBWfeyeKymgOPk=
|
||||
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
|
||||
github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff h1:/kO0p2RTGLB8R5gub7ps0GmYpB2O8LXEoPq8tzFDCUI=
|
||||
github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff/go.mod h1:PhH1ZhyCzHKt4uAasyx+ljRCgoezetRNf59CUtwUkqY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
|
||||
github.com/casbin/casbin v1.7.0 h1:PuzlE8w0JBg/DhIqnkF1Dewf3z+qmUZMVN07PonvVUQ=
|
||||
github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb h1:w3RapLhkA5+km9Z8vUkC6VCaskduJXvXwJg5neKnfDU=
|
||||
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
|
||||
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c h1:K4FIibkr4//ziZKOKmt4RL0YImuTjLLBtwElf+F2lSQ=
|
||||
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
|
||||
github.com/couchbase/go-couchbase v0.0.0-20200519150804-63f3cdb75e0d h1:OMrhQqj1QCyDT2sxHCDjE+k8aMdn2ngTCGG7g4wrdLo=
|
||||
github.com/couchbase/go-couchbase v0.0.0-20200519150804-63f3cdb75e0d/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
|
||||
github.com/couchbase/gomemcached v0.0.0-20200526233749-ec430f949808 h1:8s2l8TVUwMXl6tZMe3+hPCRJ25nQXiA3d1x622JtOqc=
|
||||
github.com/couchbase/gomemcached v0.0.0-20200526233749-ec430f949808/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
|
||||
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8=
|
||||
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
|
||||
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=
|
||||
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/go-elasticsearch/v6 v6.8.5 h1:U2HtkBseC1FNBmDr0TR2tKltL6FxoY+niDAlj5M8TK8=
|
||||
github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/glendc/gopher-json v0.0.0-20170414221815-dc4743023d0c/go.mod h1:Gja1A+xZ9BoviGJNA2E9vFkPjjsl+CoJxSXiQM1UXtw=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0=
|
||||
github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d h1:xy93KVe+KrIIwWDEAfQBdIfsiHJkepbYsDr+VY3g9/o=
|
||||
github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:B7ZbAFz7NOmvpUE5RGtu3u0WIizy5GdvbNpEf4RPnWs=
|
||||
github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:uZvAcrsnNaCxlh1HorK5dUQHGmEKPh2H/Rl1kehswPo=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6 h1:wxyqOzKxsRJ6vVRL9sXQ64Z45wmBuQ+OTH9sLsC5rKc=
|
||||
github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6/go.mod h1:n931TsDuKuq+uX4v1fulaMbA/7ZLLhjc85h7chZGBCQ=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterh/liner v1.0.1-0.20171122030339-3681c2a91233/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/pingcap/tidb v2.0.11+incompatible/go.mod h1:I8C6jrPINP2rrVunTRd7C9fRRhQrtR43S1/CL5ix/yQ=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
|
||||
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
||||
github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373 h1:p6IxqQMjab30l4lb9mmkIkkcE1yv6o0SKbPhW5pxqHI=
|
||||
github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.0 h1:wCi7urQOGBsYcQROHqpUUX4ct84xp40t9R9JX0FuA/U=
|
||||
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 h1:X+yvsM2yrEktyI+b2qND5gpH8YhURn0k8OCaeRnkINo=
|
||||
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg=
|
||||
github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0 h1:QIF48X1cihydXibm+4wfAc0r/qyPyuFiPFRNphdMpEE=
|
||||
github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
||||
github.com/siddontang/goredis v0.0.0-20150324035039-760763f78400/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s=
|
||||
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs=
|
||||
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec h1:q6XVwXmKvCRHRqesF3cSv6lNqqHi0QWOvgDlSohg8UA=
|
||||
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/syndtr/goleveldb v0.0.0-20160425020131-cfa635847112/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c h1:3eGShk3EQf5gJCYW+WzA0TEJQd37HLOmlYF7N0YJwv0=
|
||||
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b h1:0Ve0/CCjiAiyKddUMUn3RwIGlq2iTW4GuVzyoKBYO/8=
|
||||
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
|
||||
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:et7+NAX3lLIk5qUCTA9QelBjGE/NkhzYw/mhnr0s7nI=
|
||||
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20200117065230-39095c1d176c h1:FodBYPZKH5tAN2O60HlglMwXGAeV/4k+NKbli79M/2c=
|
||||
golang.org/x/tools v0.0.0-20200117065230-39095c1d176c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -46,7 +46,10 @@ func (srv *Server) Serve() (err error) {
|
||||
|
||||
log.Println(syscall.Getpid(), srv.ln.Addr(), "Listener closed.")
|
||||
// wait for Shutdown to return
|
||||
return <-srv.terminalChan
|
||||
if shutdownErr := <-srv.terminalChan; shutdownErr != nil {
|
||||
return shutdownErr
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListenAndServe listens on the TCP network address srv.Addr and then calls Serve
|
||||
@ -180,7 +183,7 @@ func (srv *Server) ListenAndServeMutualTLS(certFile, keyFile, trustFile string)
|
||||
log.Println(err)
|
||||
return err
|
||||
}
|
||||
err = process.Kill()
|
||||
err = process.Signal(syscall.SIGTERM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -407,6 +407,7 @@ func (b *BeegoHTTPRequest) buildURL(paramBody string) {
|
||||
}()
|
||||
b.Header("Content-Type", bodyWriter.FormDataContentType())
|
||||
b.req.Body = ioutil.NopCloser(pr)
|
||||
b.Header("Transfer-Encoding", "chunked")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -16,6 +16,7 @@ package logs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Try each log level in decreasing order of priority.
|
||||
@ -49,3 +50,15 @@ func TestConsoleNoColor(t *testing.T) {
|
||||
log.SetLogger("console", `{"color":false}`)
|
||||
testConsoleCalls(log)
|
||||
}
|
||||
|
||||
// Test console async
|
||||
func TestConsoleAsync(t *testing.T) {
|
||||
log := NewLogger(100)
|
||||
log.SetLogger("console")
|
||||
log.Async()
|
||||
//log.Close()
|
||||
testConsoleCalls(log)
|
||||
for len(log.msgChan) != 0 {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
@ -1,14 +1,17 @@
|
||||
package es
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/OwnLocal/goes"
|
||||
"github.com/elastic/go-elasticsearch/v6"
|
||||
"github.com/elastic/go-elasticsearch/v6/esapi"
|
||||
|
||||
"github.com/astaxie/beego/logs"
|
||||
)
|
||||
|
||||
@ -20,8 +23,14 @@ func NewES() logs.Logger {
|
||||
return cw
|
||||
}
|
||||
|
||||
// esLogger will log msg into ES
|
||||
// before you using this implementation,
|
||||
// please import this package
|
||||
// usually means that you can import this package in your main package
|
||||
// for example, anonymous:
|
||||
// import _ "github.com/astaxie/beego/logs/es"
|
||||
type esLogger struct {
|
||||
*goes.Client
|
||||
*elasticsearch.Client
|
||||
DSN string `json:"dsn"`
|
||||
Level int `json:"level"`
|
||||
}
|
||||
@ -38,10 +47,13 @@ func (el *esLogger) Init(jsonconfig string) error {
|
||||
return err
|
||||
} else if u.Path == "" {
|
||||
return errors.New("missing prefix")
|
||||
} else if host, port, err := net.SplitHostPort(u.Host); err != nil {
|
||||
return err
|
||||
} else {
|
||||
conn := goes.NewClient(host, port)
|
||||
conn, err := elasticsearch.NewClient(elasticsearch.Config{
|
||||
Addresses: []string{el.DSN},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
el.Client = conn
|
||||
}
|
||||
return nil
|
||||
@ -53,21 +65,26 @@ func (el *esLogger) WriteMsg(when time.Time, msg string, level int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
vals := make(map[string]interface{})
|
||||
vals["@timestamp"] = when.Format(time.RFC3339)
|
||||
vals["@msg"] = msg
|
||||
d := goes.Document{
|
||||
Index: fmt.Sprintf("%04d.%02d.%02d", when.Year(), when.Month(), when.Day()),
|
||||
Type: "logs",
|
||||
Fields: vals,
|
||||
idx := LogDocument{
|
||||
Timestamp: when.Format(time.RFC3339),
|
||||
Msg: msg,
|
||||
}
|
||||
_, err := el.Index(d, nil)
|
||||
|
||||
body, err := json.Marshal(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := esapi.IndexRequest{
|
||||
Index: fmt.Sprintf("%04d.%02d.%02d", when.Year(), when.Month(), when.Day()),
|
||||
DocumentType: "logs",
|
||||
Body: strings.NewReader(string(body)),
|
||||
}
|
||||
_, err = req.Do(context.Background(), el.Client)
|
||||
return err
|
||||
}
|
||||
|
||||
// Destroy is a empty method
|
||||
func (el *esLogger) Destroy() {
|
||||
|
||||
}
|
||||
|
||||
// Flush is a empty method
|
||||
@ -75,7 +92,11 @@ func (el *esLogger) Flush() {
|
||||
|
||||
}
|
||||
|
||||
type LogDocument struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Msg string `json:"msg"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
logs.Register(logs.AdapterEs, NewES)
|
||||
}
|
||||
|
||||
|
@ -359,6 +359,10 @@ RESTART_LOGGER:
|
||||
|
||||
func (w *fileLogWriter) deleteOldLog() {
|
||||
dir := filepath.Dir(w.Filename)
|
||||
absolutePath, err := filepath.EvalSymlinks(w.Filename)
|
||||
if err == nil {
|
||||
dir = filepath.Dir(absolutePath)
|
||||
}
|
||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
|
@ -295,7 +295,11 @@ func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error
|
||||
lm.level = logLevel
|
||||
lm.msg = msg
|
||||
lm.when = when
|
||||
bl.msgChan <- lm
|
||||
if bl.outputs != nil {
|
||||
bl.msgChan <- lm
|
||||
} else {
|
||||
logMsgPool.Put(lm)
|
||||
}
|
||||
} else {
|
||||
bl.writeToLoggers(when, msg, logLevel)
|
||||
}
|
||||
|
99
metric/prometheus.go
Normal file
99
metric/prometheus.go
Normal file
@ -0,0 +1,99 @@
|
||||
// Copyright 2020 astaxie
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/astaxie/beego"
|
||||
"github.com/astaxie/beego/logs"
|
||||
)
|
||||
|
||||
func PrometheusMiddleWare(next http.Handler) http.Handler {
|
||||
summaryVec := prometheus.NewSummaryVec(prometheus.SummaryOpts{
|
||||
Name: "beego",
|
||||
Subsystem: "http_request",
|
||||
ConstLabels: map[string]string{
|
||||
"server": beego.BConfig.ServerName,
|
||||
"env": beego.BConfig.RunMode,
|
||||
"appname": beego.BConfig.AppName,
|
||||
},
|
||||
Help: "The statics info for http request",
|
||||
}, []string{"pattern", "method", "status", "duration"})
|
||||
|
||||
prometheus.MustRegister(summaryVec)
|
||||
|
||||
registerBuildInfo()
|
||||
|
||||
return http.HandlerFunc(func(writer http.ResponseWriter, q *http.Request) {
|
||||
start := time.Now()
|
||||
next.ServeHTTP(writer, q)
|
||||
end := time.Now()
|
||||
go report(end.Sub(start), writer, q, summaryVec)
|
||||
})
|
||||
}
|
||||
|
||||
func registerBuildInfo() {
|
||||
buildInfo := prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "beego",
|
||||
Subsystem: "build_info",
|
||||
Help: "The building information",
|
||||
ConstLabels: map[string]string{
|
||||
"appname": beego.BConfig.AppName,
|
||||
"build_version": beego.BuildVersion,
|
||||
"build_revision": beego.BuildGitRevision,
|
||||
"build_status": beego.BuildStatus,
|
||||
"build_tag": beego.BuildTag,
|
||||
"build_time": strings.Replace(beego.BuildTime, "--", " ", 1),
|
||||
"go_version": beego.GoVersion,
|
||||
"git_branch": beego.GitBranch,
|
||||
"start_time": time.Now().Format("2006-01-02 15:04:05"),
|
||||
},
|
||||
}, []string{})
|
||||
|
||||
prometheus.MustRegister(buildInfo)
|
||||
buildInfo.WithLabelValues().Set(1)
|
||||
}
|
||||
|
||||
func report(dur time.Duration, writer http.ResponseWriter, q *http.Request, vec *prometheus.SummaryVec) {
|
||||
ctrl := beego.BeeApp.Handlers
|
||||
ctx := ctrl.GetContext()
|
||||
ctx.Reset(writer, q)
|
||||
defer ctrl.GiveBackContext(ctx)
|
||||
|
||||
// We cannot read the status code from q.Response.StatusCode
|
||||
// since the http server does not set q.Response. So q.Response is nil
|
||||
// Thus, we use reflection to read the status from writer whose concrete type is http.response
|
||||
responseVal := reflect.ValueOf(writer).Elem()
|
||||
field := responseVal.FieldByName("status")
|
||||
status := -1
|
||||
if field.IsValid() && field.Kind() == reflect.Int {
|
||||
status = int(field.Int())
|
||||
}
|
||||
ptn := "UNKNOWN"
|
||||
if rt, found := ctrl.FindRouter(ctx); found {
|
||||
ptn = rt.GetPattern()
|
||||
} else {
|
||||
logs.Warn("we can not find the router info for this request, so request will be recorded as UNKNOWN: " + q.URL.String())
|
||||
}
|
||||
ms := dur / time.Millisecond
|
||||
vec.WithLabelValues(ptn, q.Method, strconv.Itoa(status), strconv.Itoa(int(ms))).Observe(float64(ms))
|
||||
}
|
42
metric/prometheus_test.go
Normal file
42
metric/prometheus_test.go
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2020 astaxie
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metric
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/astaxie/beego/context"
|
||||
)
|
||||
|
||||
func TestPrometheusMiddleWare(t *testing.T) {
|
||||
middleware := PrometheusMiddleWare(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}))
|
||||
writer := &context.Response{}
|
||||
request := &http.Request{
|
||||
URL: &url.URL{
|
||||
Host: "localhost",
|
||||
RawPath: "/a/b/c",
|
||||
},
|
||||
Method: "POST",
|
||||
}
|
||||
vec := prometheus.NewSummaryVec(prometheus.SummaryOpts{}, []string{"pattern", "method", "status", "duration"})
|
||||
|
||||
report(time.Second, writer, request, vec)
|
||||
middleware.ServeHTTP(writer, request)
|
||||
}
|
12
orm/db.go
12
orm/db.go
@ -470,7 +470,7 @@ func (d *dbBase) InsertValue(q dbQuerier, mi *modelInfo, isMulti bool, names []s
|
||||
|
||||
multi := len(values) / len(names)
|
||||
|
||||
if isMulti {
|
||||
if isMulti && multi > 1 {
|
||||
qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
|
||||
}
|
||||
|
||||
@ -770,6 +770,16 @@ func (d *dbBase) UpdateBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Con
|
||||
cols = append(cols, col+" = "+col+" * ?")
|
||||
case ColExcept:
|
||||
cols = append(cols, col+" = "+col+" / ?")
|
||||
case ColBitAnd:
|
||||
cols = append(cols, col+" = "+col+" & ?")
|
||||
case ColBitRShift:
|
||||
cols = append(cols, col+" = "+col+" >> ?")
|
||||
case ColBitLShift:
|
||||
cols = append(cols, col+" = "+col+" << ?")
|
||||
case ColBitXOR:
|
||||
cols = append(cols, col+" = "+col+" ^ ?")
|
||||
case ColBitOr:
|
||||
cols = append(cols, col+" = "+col+" | ?")
|
||||
}
|
||||
values[i] = c.value
|
||||
} else {
|
||||
|
104
orm/db_alias.go
104
orm/db_alias.go
@ -18,6 +18,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
@ -106,8 +107,8 @@ func (ac *_dbCache) getDefault() (al *alias) {
|
||||
|
||||
type DB struct {
|
||||
*sync.RWMutex
|
||||
DB *sql.DB
|
||||
stmts map[string]*sql.Stmt
|
||||
DB *sql.DB
|
||||
stmtDecorators *lru.Cache
|
||||
}
|
||||
|
||||
func (d *DB) Begin() (*sql.Tx, error) {
|
||||
@ -118,22 +119,36 @@ func (d *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
|
||||
return d.DB.BeginTx(ctx, opts)
|
||||
}
|
||||
|
||||
func (d *DB) getStmt(query string) (*sql.Stmt, error) {
|
||||
//su must call release to release *sql.Stmt after using
|
||||
func (d *DB) getStmtDecorator(query string) (*stmtDecorator, error) {
|
||||
d.RLock()
|
||||
if stmt, ok := d.stmts[query]; ok {
|
||||
c, ok := d.stmtDecorators.Get(query)
|
||||
if ok {
|
||||
c.(*stmtDecorator).acquire()
|
||||
d.RUnlock()
|
||||
return stmt, nil
|
||||
return c.(*stmtDecorator), nil
|
||||
}
|
||||
d.RUnlock()
|
||||
|
||||
d.Lock()
|
||||
c, ok = d.stmtDecorators.Get(query)
|
||||
if ok {
|
||||
c.(*stmtDecorator).acquire()
|
||||
d.Unlock()
|
||||
return c.(*stmtDecorator), nil
|
||||
}
|
||||
|
||||
stmt, err := d.Prepare(query)
|
||||
if err != nil {
|
||||
d.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
d.Lock()
|
||||
d.stmts[query] = stmt
|
||||
sd := newStmtDecorator(stmt)
|
||||
sd.acquire()
|
||||
d.stmtDecorators.Add(query, sd)
|
||||
d.Unlock()
|
||||
return stmt, nil
|
||||
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
func (d *DB) Prepare(query string) (*sql.Stmt, error) {
|
||||
@ -145,52 +160,63 @@ func (d *DB) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error
|
||||
}
|
||||
|
||||
func (d *DB) Exec(query string, args ...interface{}) (sql.Result, error) {
|
||||
stmt, err := d.getStmt(query)
|
||||
sd, err := d.getStmtDecorator(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt := sd.getStmt()
|
||||
defer sd.release()
|
||||
return stmt.Exec(args...)
|
||||
}
|
||||
|
||||
func (d *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
|
||||
stmt, err := d.getStmt(query)
|
||||
sd, err := d.getStmtDecorator(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt := sd.getStmt()
|
||||
defer sd.release()
|
||||
return stmt.ExecContext(ctx, args...)
|
||||
}
|
||||
|
||||
func (d *DB) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
||||
stmt, err := d.getStmt(query)
|
||||
sd, err := d.getStmtDecorator(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt := sd.getStmt()
|
||||
defer sd.release()
|
||||
return stmt.Query(args...)
|
||||
}
|
||||
|
||||
func (d *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
||||
stmt, err := d.getStmt(query)
|
||||
sd, err := d.getStmtDecorator(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stmt := sd.getStmt()
|
||||
defer sd.release()
|
||||
return stmt.QueryContext(ctx, args...)
|
||||
}
|
||||
|
||||
func (d *DB) QueryRow(query string, args ...interface{}) *sql.Row {
|
||||
stmt, err := d.getStmt(query)
|
||||
sd, err := d.getStmtDecorator(query)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
stmt := sd.getStmt()
|
||||
defer sd.release()
|
||||
return stmt.QueryRow(args...)
|
||||
|
||||
}
|
||||
|
||||
func (d *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
||||
|
||||
stmt, err := d.getStmt(query)
|
||||
sd, err := d.getStmtDecorator(query)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
stmt := sd.getStmt()
|
||||
defer sd.release()
|
||||
return stmt.QueryRowContext(ctx, args)
|
||||
}
|
||||
|
||||
@ -268,9 +294,9 @@ func addAliasWthDB(aliasName, driverName string, db *sql.DB) (*alias, error) {
|
||||
al.Name = aliasName
|
||||
al.DriverName = driverName
|
||||
al.DB = &DB{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
DB: db,
|
||||
stmts: make(map[string]*sql.Stmt),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
DB: db,
|
||||
stmtDecorators: newStmtDecoratorLruWithEvict(),
|
||||
}
|
||||
|
||||
if dr, ok := drivers[driverName]; ok {
|
||||
@ -374,6 +400,7 @@ func SetMaxIdleConns(aliasName string, maxIdleConns int) {
|
||||
func SetMaxOpenConns(aliasName string, maxOpenConns int) {
|
||||
al := getDbAlias(aliasName)
|
||||
al.MaxOpenConns = maxOpenConns
|
||||
al.DB.DB.SetMaxOpenConns(maxOpenConns)
|
||||
// for tip go 1.2
|
||||
if fun := reflect.ValueOf(al.DB).MethodByName("SetMaxOpenConns"); fun.IsValid() {
|
||||
fun.Call([]reflect.Value{reflect.ValueOf(maxOpenConns)})
|
||||
@ -395,3 +422,44 @@ func GetDB(aliasNames ...string) (*sql.DB, error) {
|
||||
}
|
||||
return nil, fmt.Errorf("DataBase of alias name `%s` not found", name)
|
||||
}
|
||||
|
||||
type stmtDecorator struct {
|
||||
wg sync.WaitGroup
|
||||
lastUse int64
|
||||
stmt *sql.Stmt
|
||||
}
|
||||
|
||||
func (s *stmtDecorator) getStmt() *sql.Stmt {
|
||||
return s.stmt
|
||||
}
|
||||
|
||||
func (s *stmtDecorator) acquire() {
|
||||
s.wg.Add(1)
|
||||
s.lastUse = time.Now().Unix()
|
||||
}
|
||||
|
||||
func (s *stmtDecorator) release() {
|
||||
s.wg.Done()
|
||||
}
|
||||
|
||||
//garbage recycle for stmt
|
||||
func (s *stmtDecorator) destroy() {
|
||||
go func() {
|
||||
s.wg.Wait()
|
||||
_ = s.stmt.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
func newStmtDecorator(sqlStmt *sql.Stmt) *stmtDecorator {
|
||||
return &stmtDecorator{
|
||||
stmt: sqlStmt,
|
||||
lastUse: time.Now().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func newStmtDecoratorLruWithEvict() *lru.Cache {
|
||||
cache, _ := lru.NewWithEvict(1000, func(key interface{}, value interface{}) {
|
||||
value.(*stmtDecorator).destroy()
|
||||
})
|
||||
return cache
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ package orm
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// sqlite operators.
|
||||
@ -66,6 +68,14 @@ type dbBaseSqlite struct {
|
||||
|
||||
var _ dbBaser = new(dbBaseSqlite)
|
||||
|
||||
// override base db read for update behavior as SQlite does not support syntax
|
||||
func (d *dbBaseSqlite) Read(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string, isForUpdate bool) error {
|
||||
if isForUpdate {
|
||||
DebugLog.Println("[WARN] SQLite does not support SELECT FOR UPDATE query, isForUpdate param is ignored and always as false to do the work")
|
||||
}
|
||||
return d.dbBase.Read(q, mi, ind, tz, cols, false)
|
||||
}
|
||||
|
||||
// get sqlite operator.
|
||||
func (d *dbBaseSqlite) OperatorSQL(operator string) string {
|
||||
return sqliteOperators[operator]
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -298,6 +299,7 @@ func bootStrap() {
|
||||
end:
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
debug.PrintStack()
|
||||
os.Exit(2)
|
||||
}
|
||||
}
|
||||
|
@ -559,9 +559,9 @@ func NewOrmWithDB(driverName, aliasName string, db *sql.DB) (Ormer, error) {
|
||||
al.Name = aliasName
|
||||
al.DriverName = driverName
|
||||
al.DB = &DB{
|
||||
RWMutex: new(sync.RWMutex),
|
||||
DB: db,
|
||||
stmts: make(map[string]*sql.Stmt),
|
||||
RWMutex: new(sync.RWMutex),
|
||||
DB: db,
|
||||
stmtDecorators: newStmtDecoratorLruWithEvict(),
|
||||
}
|
||||
|
||||
detectTZ(al)
|
||||
|
@ -32,6 +32,11 @@ const (
|
||||
ColMinus
|
||||
ColMultiply
|
||||
ColExcept
|
||||
ColBitAnd
|
||||
ColBitRShift
|
||||
ColBitLShift
|
||||
ColBitXOR
|
||||
ColBitOr
|
||||
)
|
||||
|
||||
// ColValue do the field raw changes. e.g Nums = Nums + 10. usage:
|
||||
@ -40,7 +45,8 @@ const (
|
||||
// }
|
||||
func ColValue(opt operator, value interface{}) interface{} {
|
||||
switch opt {
|
||||
case ColAdd, ColMinus, ColMultiply, ColExcept:
|
||||
case ColAdd, ColMinus, ColMultiply, ColExcept, ColBitAnd, ColBitRShift,
|
||||
ColBitLShift, ColBitXOR, ColBitOr:
|
||||
default:
|
||||
panic(fmt.Errorf("orm.ColValue wrong operator"))
|
||||
}
|
||||
|
@ -500,7 +500,7 @@ func genRouterCode(pkgRealpath string) {
|
||||
beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"],
|
||||
beego.ControllerComments{
|
||||
Method: "` + strings.TrimSpace(c.Method) + `",
|
||||
` + "Router: `" + c.Router + "`" + `,
|
||||
` + `Router: "` + c.Router + `"` + `,
|
||||
AllowHTTPMethods: ` + allmethod + `,
|
||||
MethodParams: ` + methodParams + `,
|
||||
Filters: ` + filters + `,
|
||||
|
108
router.go
108
router.go
@ -18,6 +18,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@ -121,6 +122,10 @@ type ControllerInfo struct {
|
||||
methodParams []*param.MethodParam
|
||||
}
|
||||
|
||||
func (c *ControllerInfo) GetPattern() string {
|
||||
return c.pattern
|
||||
}
|
||||
|
||||
// ControllerRegister containers registered router rules, controller handlers and filters.
|
||||
type ControllerRegister struct {
|
||||
routers map[string]*Tree
|
||||
@ -249,25 +254,39 @@ func (p *ControllerRegister) addToRouter(method, pattern string, r *ControllerIn
|
||||
func (p *ControllerRegister) Include(cList ...ControllerInterface) {
|
||||
if BConfig.RunMode == DEV {
|
||||
skip := make(map[string]bool, 10)
|
||||
wgopath := utils.GetGOPATHs()
|
||||
go111module := os.Getenv(`GO111MODULE`)
|
||||
for _, c := range cList {
|
||||
reflectVal := reflect.ValueOf(c)
|
||||
t := reflect.Indirect(reflectVal).Type()
|
||||
wgopath := utils.GetGOPATHs()
|
||||
if len(wgopath) == 0 {
|
||||
panic("you are in dev mode. So please set gopath")
|
||||
}
|
||||
pkgpath := ""
|
||||
for _, wg := range wgopath {
|
||||
wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath()))
|
||||
if utils.FileExists(wg) {
|
||||
pkgpath = wg
|
||||
break
|
||||
// for go modules
|
||||
if go111module == `on` {
|
||||
pkgpath := filepath.Join(WorkPath, "..", t.PkgPath())
|
||||
if utils.FileExists(pkgpath) {
|
||||
if pkgpath != "" {
|
||||
if _, ok := skip[pkgpath]; !ok {
|
||||
skip[pkgpath] = true
|
||||
parserPkg(pkgpath, t.PkgPath())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if pkgpath != "" {
|
||||
if _, ok := skip[pkgpath]; !ok {
|
||||
skip[pkgpath] = true
|
||||
parserPkg(pkgpath, t.PkgPath())
|
||||
} else {
|
||||
if len(wgopath) == 0 {
|
||||
panic("you are in dev mode. So please set gopath")
|
||||
}
|
||||
pkgpath := ""
|
||||
for _, wg := range wgopath {
|
||||
wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath()))
|
||||
if utils.FileExists(wg) {
|
||||
pkgpath = wg
|
||||
break
|
||||
}
|
||||
}
|
||||
if pkgpath != "" {
|
||||
if _, ok := skip[pkgpath]; !ok {
|
||||
skip[pkgpath] = true
|
||||
parserPkg(pkgpath, t.PkgPath())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -288,6 +307,21 @@ func (p *ControllerRegister) Include(cList ...ControllerInterface) {
|
||||
}
|
||||
}
|
||||
|
||||
// GetContext returns a context from pool, so usually you should remember to call Reset function to clean the context
|
||||
// And don't forget to give back context to pool
|
||||
// example:
|
||||
// ctx := p.GetContext()
|
||||
// ctx.Reset(w, q)
|
||||
// defer p.GiveBackContext(ctx)
|
||||
func (p *ControllerRegister) GetContext() *beecontext.Context {
|
||||
return p.pool.Get().(*beecontext.Context)
|
||||
}
|
||||
|
||||
// GiveBackContext put the ctx into pool so that it could be reuse
|
||||
func (p *ControllerRegister) GiveBackContext(ctx *beecontext.Context) {
|
||||
p.pool.Put(ctx)
|
||||
}
|
||||
|
||||
// Get add get method
|
||||
// usage:
|
||||
// Get("/", func(ctx *context.Context){
|
||||
@ -667,10 +701,11 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
routerInfo *ControllerInfo
|
||||
isRunnable bool
|
||||
)
|
||||
context := p.pool.Get().(*beecontext.Context)
|
||||
context := p.GetContext()
|
||||
|
||||
context.Reset(rw, r)
|
||||
|
||||
defer p.pool.Put(context)
|
||||
defer p.GiveBackContext(context)
|
||||
if BConfig.RecoverFunc != nil {
|
||||
defer BConfig.RecoverFunc(context)
|
||||
}
|
||||
@ -739,7 +774,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
routerInfo, findRouter = p.FindRouter(context)
|
||||
}
|
||||
|
||||
//if no matches to url, throw a not found exception
|
||||
// if no matches to url, throw a not found exception
|
||||
if !findRouter {
|
||||
exception("404", context)
|
||||
goto Admin
|
||||
@ -750,19 +785,22 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
//execute middleware filters
|
||||
if routerInfo != nil {
|
||||
// store router pattern into context
|
||||
context.Input.SetData("RouterPattern", routerInfo.pattern)
|
||||
}
|
||||
|
||||
// execute middleware filters
|
||||
if len(p.filters[BeforeExec]) > 0 && p.execFilter(context, urlPath, BeforeExec) {
|
||||
goto Admin
|
||||
}
|
||||
|
||||
//check policies
|
||||
// check policies
|
||||
if p.execPolicy(context, urlPath) {
|
||||
goto Admin
|
||||
}
|
||||
|
||||
if routerInfo != nil {
|
||||
//store router pattern into context
|
||||
context.Input.SetData("RouterPattern", routerInfo.pattern)
|
||||
if routerInfo.routerType == routerTypeRESTFul {
|
||||
if _, ok := routerInfo.methods[r.Method]; ok {
|
||||
isRunnable = true
|
||||
@ -796,7 +834,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
|
||||
// also defined runRouter & runMethod from filter
|
||||
if !isRunnable {
|
||||
//Invoke the request handler
|
||||
// Invoke the request handler
|
||||
var execController ControllerInterface
|
||||
if routerInfo != nil && routerInfo.initialize != nil {
|
||||
execController = routerInfo.initialize()
|
||||
@ -809,13 +847,13 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
}
|
||||
|
||||
//call the controller init function
|
||||
// call the controller init function
|
||||
execController.Init(context, runRouter.Name(), runMethod, execController)
|
||||
|
||||
//call prepare function
|
||||
// call prepare function
|
||||
execController.Prepare()
|
||||
|
||||
//if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf
|
||||
// if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf
|
||||
if BConfig.WebConfig.EnableXSRF {
|
||||
execController.XSRFToken()
|
||||
if r.Method == http.MethodPost || r.Method == http.MethodDelete || r.Method == http.MethodPut ||
|
||||
@ -827,7 +865,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
execController.URLMapping()
|
||||
|
||||
if !context.ResponseWriter.Started {
|
||||
//exec main logic
|
||||
// exec main logic
|
||||
switch runMethod {
|
||||
case http.MethodGet:
|
||||
execController.Get()
|
||||
@ -852,14 +890,14 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
in := param.ConvertParams(methodParams, method.Type(), context)
|
||||
out := method.Call(in)
|
||||
|
||||
//For backward compatibility we only handle response if we had incoming methodParams
|
||||
// For backward compatibility we only handle response if we had incoming methodParams
|
||||
if methodParams != nil {
|
||||
p.handleParamResponse(context, execController, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//render template
|
||||
// render template
|
||||
if !context.ResponseWriter.Started && context.Output.Status == 0 {
|
||||
if BConfig.WebConfig.AutoRender {
|
||||
if err := execController.Render(); err != nil {
|
||||
@ -873,7 +911,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
execController.Finish()
|
||||
}
|
||||
|
||||
//execute middleware filters
|
||||
// execute middleware filters
|
||||
if len(p.filters[AfterExec]) > 0 && p.execFilter(context, urlPath, AfterExec) {
|
||||
goto Admin
|
||||
}
|
||||
@ -883,7 +921,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
Admin:
|
||||
//admin module record QPS
|
||||
// admin module record QPS
|
||||
|
||||
statusCode := context.ResponseWriter.Status
|
||||
if statusCode == 0 {
|
||||
@ -931,7 +969,7 @@ Admin:
|
||||
}
|
||||
|
||||
func (p *ControllerRegister) handleParamResponse(context *beecontext.Context, execController ControllerInterface, results []reflect.Value) {
|
||||
//looping in reverse order for the case when both error and value are returned and error sets the response status code
|
||||
// looping in reverse order for the case when both error and value are returned and error sets the response status code
|
||||
for i := len(results) - 1; i >= 0; i-- {
|
||||
result := results[i]
|
||||
if result.Kind() != reflect.Interface || !result.IsNil() {
|
||||
@ -973,11 +1011,11 @@ func toURL(params map[string]string) string {
|
||||
|
||||
// LogAccess logging info HTTP Access
|
||||
func LogAccess(ctx *beecontext.Context, startTime *time.Time, statusCode int) {
|
||||
//Skip logging if AccessLogs config is false
|
||||
// Skip logging if AccessLogs config is false
|
||||
if !BConfig.Log.AccessLogs {
|
||||
return
|
||||
}
|
||||
//Skip logging static requests unless EnableStaticLogs config is true
|
||||
// Skip logging static requests unless EnableStaticLogs config is true
|
||||
if !BConfig.Log.EnableStaticLogs && DefaultAccessLogFilter.Filter(ctx) {
|
||||
return
|
||||
}
|
||||
@ -1002,7 +1040,7 @@ func LogAccess(ctx *beecontext.Context, startTime *time.Time, statusCode int) {
|
||||
HTTPReferrer: r.Header.Get("Referer"),
|
||||
HTTPUserAgent: r.Header.Get("User-Agent"),
|
||||
RemoteUser: r.Header.Get("Remote-User"),
|
||||
BodyBytesSent: 0, //@todo this one is missing!
|
||||
BodyBytesSent: 0, // @todo this one is missing!
|
||||
}
|
||||
logs.AccessLog(record, BConfig.Log.AccessLogsFormat)
|
||||
}
|
||||
|
112
scripts/gobuild.sh
Executable file
112
scripts/gobuild.sh
Executable file
@ -0,0 +1,112 @@
|
||||
#!/bin/bash
|
||||
|
||||
# WARNING: DO NOT EDIT, THIS FILE IS PROBABLY A COPY
|
||||
#
|
||||
# The original version of this file is located in the https://github.com/istio/common-files repo.
|
||||
# If you're looking at this file in a different repo and want to make a change, please go to the
|
||||
# common-files repo, make the change there and check it in. Then come back to this repo and run
|
||||
# "make update-common".
|
||||
|
||||
# Copyright Istio Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script builds and version stamps the output
|
||||
|
||||
# adatp to beego
|
||||
|
||||
VERBOSE=${VERBOSE:-"0"}
|
||||
V=""
|
||||
if [[ "${VERBOSE}" == "1" ]];then
|
||||
V="-x"
|
||||
set -x
|
||||
fi
|
||||
|
||||
SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
OUT=${1:?"output path"}
|
||||
shift
|
||||
|
||||
set -e
|
||||
|
||||
BUILD_GOOS=${GOOS:-linux}
|
||||
BUILD_GOARCH=${GOARCH:-amd64}
|
||||
GOBINARY=${GOBINARY:-go}
|
||||
GOPKG="$GOPATH/pkg"
|
||||
BUILDINFO=${BUILDINFO:-""}
|
||||
STATIC=${STATIC:-1}
|
||||
LDFLAGS=${LDFLAGS:--extldflags -static}
|
||||
GOBUILDFLAGS=${GOBUILDFLAGS:-""}
|
||||
# Split GOBUILDFLAGS by spaces into an array called GOBUILDFLAGS_ARRAY.
|
||||
IFS=' ' read -r -a GOBUILDFLAGS_ARRAY <<< "$GOBUILDFLAGS"
|
||||
|
||||
GCFLAGS=${GCFLAGS:-}
|
||||
export CGO_ENABLED=0
|
||||
|
||||
if [[ "${STATIC}" != "1" ]];then
|
||||
LDFLAGS=""
|
||||
fi
|
||||
|
||||
# gather buildinfo if not already provided
|
||||
# For a release build BUILDINFO should be produced
|
||||
# at the beginning of the build and used throughout
|
||||
if [[ -z ${BUILDINFO} ]];then
|
||||
BUILDINFO=$(mktemp)
|
||||
"${SCRIPTPATH}/report_build_info.sh" > "${BUILDINFO}"
|
||||
fi
|
||||
|
||||
|
||||
# BUILD LD_EXTRAFLAGS
|
||||
LD_EXTRAFLAGS=""
|
||||
|
||||
while read -r line; do
|
||||
LD_EXTRAFLAGS="${LD_EXTRAFLAGS} -X ${line}"
|
||||
done < "${BUILDINFO}"
|
||||
|
||||
# verify go version before build
|
||||
# NB. this was copied verbatim from Kubernetes hack
|
||||
minimum_go_version=go1.13 # supported patterns: go1.x, go1.x.x (x should be a number)
|
||||
IFS=" " read -ra go_version <<< "$(${GOBINARY} version)"
|
||||
if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then
|
||||
echo "Warning: Detected that you are using an older version of the Go compiler. Beego requires ${minimum_go_version} or greater."
|
||||
fi
|
||||
|
||||
CURRENT_BRANCH=$(git branch | grep '*')
|
||||
CURRENT_BRANCH=${CURRENT_BRANCH:2}
|
||||
|
||||
BUILD_TIME=$(date +%Y-%m-%d--%T)
|
||||
|
||||
LD_EXTRAFLAGS="${LD_EXTRAFLAGS} -X github.com/astaxie/beego.GoVersion=${go_version[2]:2}"
|
||||
LD_EXTRAFLAGS="${LD_EXTRAFLAGS} -X github.com/astaxie/beego.GitBranch=${CURRENT_BRANCH}"
|
||||
LD_EXTRAFLAGS="${LD_EXTRAFLAGS} -X github.com/astaxie/beego.BuildTime=$BUILD_TIME"
|
||||
|
||||
OPTIMIZATION_FLAGS="-trimpath"
|
||||
if [ "${DEBUG}" == "1" ]; then
|
||||
OPTIMIZATION_FLAGS=""
|
||||
fi
|
||||
|
||||
|
||||
|
||||
echo "BUILD_GOARCH: $BUILD_GOARCH"
|
||||
echo "GOPKG: $GOPKG"
|
||||
echo "LD_EXTRAFLAGS: $LD_EXTRAFLAGS"
|
||||
echo "GO_VERSION: ${go_version[2]}"
|
||||
echo "BRANCH: $CURRENT_BRANCH"
|
||||
echo "BUILD_TIME: $BUILD_TIME"
|
||||
|
||||
time GOOS=${BUILD_GOOS} GOARCH=${BUILD_GOARCH} ${GOBINARY} build \
|
||||
${V} "${GOBUILDFLAGS_ARRAY[@]}" ${GCFLAGS:+-gcflags "${GCFLAGS}"} \
|
||||
-o "${OUT}" \
|
||||
${OPTIMIZATION_FLAGS} \
|
||||
-pkgdir="${GOPKG}/${BUILD_GOOS}_${BUILD_GOARCH}" \
|
||||
-ldflags "${LDFLAGS} ${LD_EXTRAFLAGS}" "${@}"
|
52
scripts/report_build_info.sh
Executable file
52
scripts/report_build_info.sh
Executable file
@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
# WARNING: DO NOT EDIT, THIS FILE IS PROBABLY A COPY
|
||||
#
|
||||
# The original version of this file is located in the https://github.com/istio/common-files repo.
|
||||
# If you're looking at this file in a different repo and want to make a change, please go to the
|
||||
# common-files repo, make the change there and check it in. Then come back to this repo and run
|
||||
# "make update-common".
|
||||
|
||||
# Copyright Istio Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# adapt to beego
|
||||
|
||||
if BUILD_GIT_REVISION=$(git rev-parse HEAD 2> /dev/null); then
|
||||
if [[ -n "$(git status --porcelain 2>/dev/null)" ]]; then
|
||||
BUILD_GIT_REVISION=${BUILD_GIT_REVISION}"-dirty"
|
||||
fi
|
||||
else
|
||||
BUILD_GIT_REVISION=unknown
|
||||
fi
|
||||
|
||||
# Check for local changes
|
||||
if git diff-index --quiet HEAD --; then
|
||||
tree_status="Clean"
|
||||
else
|
||||
tree_status="Modified"
|
||||
fi
|
||||
|
||||
# security wanted VERSION='unknown'
|
||||
VERSION="${BUILD_GIT_REVISION}"
|
||||
if [[ -n ${BEEGO_VERSION} ]]; then
|
||||
VERSION="${BEEGO_VERSION}"
|
||||
fi
|
||||
|
||||
GIT_DESCRIBE_TAG=$(git describe --tags)
|
||||
|
||||
echo "github.com/astaxie/beego.BuildVersion=${VERSION}"
|
||||
echo "github.com/astaxie/beego.BuildGitRevision=${BUILD_GIT_REVISION}"
|
||||
echo "github.com/astaxie/beego.BuildStatus=${tree_status}"
|
||||
echo "github.com/astaxie/beego.BuildTag=${GIT_DESCRIBE_TAG}"
|
@ -7,9 +7,10 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ledisdb/ledisdb/config"
|
||||
"github.com/ledisdb/ledisdb/ledis"
|
||||
|
||||
"github.com/astaxie/beego/session"
|
||||
"github.com/siddontang/ledisdb/config"
|
||||
"github.com/siddontang/ledisdb/ledis"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -74,7 +74,9 @@ func (st *CookieSessionStore) SessionID() string {
|
||||
|
||||
// SessionRelease Write cookie session to http response cookie
|
||||
func (st *CookieSessionStore) SessionRelease(w http.ResponseWriter) {
|
||||
st.lock.Lock()
|
||||
encodedCookie, err := encodeCookie(cookiepder.block, cookiepder.config.SecurityKey, cookiepder.config.SecurityName, st.values)
|
||||
st.lock.Unlock()
|
||||
if err == nil {
|
||||
cookie := &http.Cookie{Name: cookiepder.config.CookieName,
|
||||
Value: url.QueryEscape(encodedCookie),
|
||||
|
@ -129,8 +129,9 @@ func (fp *FileProvider) SessionInit(maxlifetime int64, savePath string) error {
|
||||
// if file is not exist, create it.
|
||||
// the file path is generated from sid string.
|
||||
func (fp *FileProvider) SessionRead(sid string) (Store, error) {
|
||||
if strings.ContainsAny(sid, "./") {
|
||||
return nil, nil
|
||||
invalidChars := "./"
|
||||
if strings.ContainsAny(sid, invalidChars) {
|
||||
return nil, errors.New("the sid shouldn't have following characters: " + invalidChars)
|
||||
}
|
||||
if len(sid) < 2 {
|
||||
return nil, errors.New("length of the sid is less than 2")
|
||||
@ -138,7 +139,7 @@ func (fp *FileProvider) SessionRead(sid string) (Store, error) {
|
||||
filepder.lock.Lock()
|
||||
defer filepder.lock.Unlock()
|
||||
|
||||
err := os.MkdirAll(path.Join(fp.savePath, string(sid[0]), string(sid[1])), 0777)
|
||||
err := os.MkdirAll(path.Join(fp.savePath, string(sid[0]), string(sid[1])), 0755)
|
||||
if err != nil {
|
||||
SLogger.Println(err.Error())
|
||||
}
|
||||
@ -231,7 +232,7 @@ func (fp *FileProvider) SessionRegenerate(oldsid, sid string) (Store, error) {
|
||||
return nil, fmt.Errorf("newsid %s exist", newSidFile)
|
||||
}
|
||||
|
||||
err = os.MkdirAll(newPath, 0777)
|
||||
err = os.MkdirAll(newPath, 0755)
|
||||
if err != nil {
|
||||
SLogger.Println(err.Error())
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
|
||||
"github.com/astaxie/beego/context"
|
||||
"github.com/astaxie/beego/logs"
|
||||
"github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
var errNotStaticRequest = errors.New("request not a static file request")
|
||||
@ -67,6 +68,10 @@ func serverStaticRouter(ctx *context.Context) {
|
||||
http.ServeFile(ctx.ResponseWriter, ctx.Request, filePath)
|
||||
}
|
||||
return
|
||||
} else if fileInfo.Size() > int64(BConfig.WebConfig.StaticCacheFileSize) {
|
||||
//over size file serve with http module
|
||||
http.ServeFile(ctx.ResponseWriter, ctx.Request, filePath)
|
||||
return
|
||||
}
|
||||
|
||||
var enableCompress = BConfig.EnableGzip && isStaticCompress(filePath)
|
||||
@ -93,10 +98,11 @@ func serverStaticRouter(ctx *context.Context) {
|
||||
}
|
||||
|
||||
type serveContentHolder struct {
|
||||
data []byte
|
||||
modTime time.Time
|
||||
size int64
|
||||
encoding string
|
||||
data []byte
|
||||
modTime time.Time
|
||||
size int64
|
||||
originSize int64 //original file size:to judge file changed
|
||||
encoding string
|
||||
}
|
||||
|
||||
type serveContentReader struct {
|
||||
@ -104,22 +110,36 @@ type serveContentReader struct {
|
||||
}
|
||||
|
||||
var (
|
||||
staticFileMap = make(map[string]*serveContentHolder)
|
||||
mapLock sync.RWMutex
|
||||
staticFileLruCache *lru.Cache
|
||||
lruLock sync.RWMutex
|
||||
)
|
||||
|
||||
func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, *serveContentReader, error) {
|
||||
if staticFileLruCache == nil {
|
||||
//avoid lru cache error
|
||||
if BConfig.WebConfig.StaticCacheFileNum >= 1 {
|
||||
staticFileLruCache, _ = lru.New(BConfig.WebConfig.StaticCacheFileNum)
|
||||
} else {
|
||||
staticFileLruCache, _ = lru.New(1)
|
||||
}
|
||||
}
|
||||
mapKey := acceptEncoding + ":" + filePath
|
||||
mapLock.RLock()
|
||||
mapFile := staticFileMap[mapKey]
|
||||
mapLock.RUnlock()
|
||||
lruLock.RLock()
|
||||
var mapFile *serveContentHolder
|
||||
if cacheItem, ok := staticFileLruCache.Get(mapKey); ok {
|
||||
mapFile = cacheItem.(*serveContentHolder)
|
||||
}
|
||||
lruLock.RUnlock()
|
||||
if isOk(mapFile, fi) {
|
||||
reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)}
|
||||
return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil
|
||||
}
|
||||
mapLock.Lock()
|
||||
defer mapLock.Unlock()
|
||||
if mapFile = staticFileMap[mapKey]; !isOk(mapFile, fi) {
|
||||
lruLock.Lock()
|
||||
defer lruLock.Unlock()
|
||||
if cacheItem, ok := staticFileLruCache.Get(mapKey); ok {
|
||||
mapFile = cacheItem.(*serveContentHolder)
|
||||
}
|
||||
if !isOk(mapFile, fi) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false, "", nil, nil, err
|
||||
@ -130,8 +150,10 @@ func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, str
|
||||
if err != nil {
|
||||
return false, "", nil, nil, err
|
||||
}
|
||||
mapFile = &serveContentHolder{data: bufferWriter.Bytes(), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), encoding: n}
|
||||
staticFileMap[mapKey] = mapFile
|
||||
mapFile = &serveContentHolder{data: bufferWriter.Bytes(), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), originSize: fi.Size(), encoding: n}
|
||||
if isOk(mapFile, fi) {
|
||||
staticFileLruCache.Add(mapKey, mapFile)
|
||||
}
|
||||
}
|
||||
|
||||
reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)}
|
||||
@ -141,8 +163,10 @@ func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, str
|
||||
func isOk(s *serveContentHolder, fi os.FileInfo) bool {
|
||||
if s == nil {
|
||||
return false
|
||||
} else if s.size > int64(BConfig.WebConfig.StaticCacheFileSize) {
|
||||
return false
|
||||
}
|
||||
return s.modTime == fi.ModTime() && s.size == fi.Size()
|
||||
return s.modTime == fi.ModTime() && s.originSize == fi.Size()
|
||||
}
|
||||
|
||||
// isStaticCompress detect static files
|
||||
|
@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -53,6 +54,31 @@ func TestOpenStaticFileDeflate_1(t *testing.T) {
|
||||
testOpenFile("deflate", content, t)
|
||||
}
|
||||
|
||||
func TestStaticCacheWork(t *testing.T) {
|
||||
encodings := []string{"", "gzip", "deflate"}
|
||||
|
||||
fi, _ := os.Stat(licenseFile)
|
||||
for _, encoding := range encodings {
|
||||
_, _, first, _, err := openFile(licenseFile, fi, encoding)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, _, second, _, err := openFile(licenseFile, fi, encoding)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
address1 := fmt.Sprintf("%p", first)
|
||||
address2 := fmt.Sprintf("%p", second)
|
||||
if address1 != address2 {
|
||||
t.Errorf("encoding '%v' can not hit cache", encoding)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assetOpenFileAndContent(sch *serveContentHolder, reader *serveContentReader, content []byte, t *testing.T) {
|
||||
t.Log(sch.size, len(content))
|
||||
if sch.size != int64(len(content)) {
|
||||
@ -66,7 +92,7 @@ func assetOpenFileAndContent(sch *serveContentHolder, reader *serveContentReader
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
if len(staticFileMap) == 0 {
|
||||
if staticFileLruCache.Len() == 0 {
|
||||
t.Log("men map is empty")
|
||||
t.Fail()
|
||||
}
|
||||
|
@ -117,8 +117,8 @@ func (m *URLMap) GetMap() map[string]interface{} {
|
||||
|
||||
// GetMapData return all mapdata
|
||||
func (m *URLMap) GetMapData() []map[string]interface{} {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
m.lock.RLock()
|
||||
defer m.lock.RUnlock()
|
||||
|
||||
var resultLists []map[string]interface{}
|
||||
|
||||
|
@ -33,7 +33,7 @@ type bounds struct {
|
||||
// The bounds for each field.
|
||||
var (
|
||||
AdminTaskList map[string]Tasker
|
||||
taskLock sync.Mutex
|
||||
taskLock sync.RWMutex
|
||||
stop chan bool
|
||||
changed chan bool
|
||||
isstart bool
|
||||
@ -408,7 +408,10 @@ func run() {
|
||||
}
|
||||
|
||||
for {
|
||||
// we only use RLock here because NewMapSorter copy the reference, do not change any thing
|
||||
taskLock.RLock()
|
||||
sortList := NewMapSorter(AdminTaskList)
|
||||
taskLock.RUnlock()
|
||||
sortList.Sort()
|
||||
var effective time.Time
|
||||
if len(AdminTaskList) == 0 || sortList.Vals[0].GetNext().IsZero() {
|
||||
@ -432,9 +435,11 @@ func run() {
|
||||
continue
|
||||
case <-changed:
|
||||
now = time.Now().Local()
|
||||
taskLock.Lock()
|
||||
for _, t := range AdminTaskList {
|
||||
t.SetNext(now)
|
||||
}
|
||||
taskLock.Unlock()
|
||||
continue
|
||||
case <-stop:
|
||||
return
|
||||
|
@ -224,7 +224,7 @@ func parseFunc(vfunc, key string, label string) (v ValidFunc, err error) {
|
||||
func numIn(name string) (num int, err error) {
|
||||
fn, ok := funcs[name]
|
||||
if !ok {
|
||||
err = fmt.Errorf("doesn't exsits %s valid function", name)
|
||||
err = fmt.Errorf("doesn't exists %s valid function", name)
|
||||
return
|
||||
}
|
||||
// sub *Validation obj and key
|
||||
@ -236,7 +236,7 @@ func trim(name, key string, s []string) (ts []interface{}, err error) {
|
||||
ts = make([]interface{}, len(s), len(s)+1)
|
||||
fn, ok := funcs[name]
|
||||
if !ok {
|
||||
err = fmt.Errorf("doesn't exsits %s valid function", name)
|
||||
err = fmt.Errorf("doesn't exists %s valid function", name)
|
||||
return
|
||||
}
|
||||
for i := 0; i < len(s); i++ {
|
||||
|
@ -15,6 +15,7 @@
|
||||
package validation
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
@ -23,7 +24,7 @@ type user struct {
|
||||
ID int
|
||||
Tag string `valid:"Maxx(aa)"`
|
||||
Name string `valid:"Required;"`
|
||||
Age int `valid:"Required;Range(1, 140)"`
|
||||
Age int `valid:"Required; Range(1, 140)"`
|
||||
match string `valid:"Required; Match(/^(test)?\\w*@(/test/);com$/);Max(2)"`
|
||||
}
|
||||
|
||||
@ -42,7 +43,7 @@ func TestGetValidFuncs(t *testing.T) {
|
||||
}
|
||||
|
||||
f, _ = tf.FieldByName("Tag")
|
||||
if _, err = getValidFuncs(f); err.Error() != "doesn't exsits Maxx valid function" {
|
||||
if _, err = getValidFuncs(f); err.Error() != "doesn't exists Maxx valid function" {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -80,6 +81,33 @@ func TestGetValidFuncs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Name string `valid:"Required;MaxSize(5)" `
|
||||
Sex string `valid:"Required;" label:"sex_label"`
|
||||
Age int `valid:"Required;Range(1, 140);" label:"age_label"`
|
||||
}
|
||||
|
||||
func TestValidation(t *testing.T) {
|
||||
u := User{"man1238888456", "", 1140}
|
||||
valid := Validation{}
|
||||
b, err := valid.Valid(&u)
|
||||
if err != nil {
|
||||
// handle error
|
||||
}
|
||||
if !b {
|
||||
// validation does not pass
|
||||
// blabla...
|
||||
for _, err := range valid.Errors {
|
||||
log.Println(err.Key, err.Message)
|
||||
}
|
||||
if len(valid.Errors) != 3 {
|
||||
t.Error("must be has 3 error")
|
||||
}
|
||||
} else {
|
||||
t.Error("must be has 3 error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCall(t *testing.T) {
|
||||
u := user{Name: "test", Age: 180}
|
||||
tf := reflect.TypeOf(u)
|
||||
|
@ -273,10 +273,13 @@ func (v *Validation) apply(chk Validator, obj interface{}) *Result {
|
||||
Field = parts[0]
|
||||
Name = parts[1]
|
||||
Label = parts[2]
|
||||
if len(Label) == 0 {
|
||||
Label = Field
|
||||
}
|
||||
}
|
||||
|
||||
err := &Error{
|
||||
Message: Label + chk.DefaultMessage(),
|
||||
Message: Label + " " + chk.DefaultMessage(),
|
||||
Key: key,
|
||||
Name: Name,
|
||||
Field: Field,
|
||||
@ -293,19 +296,25 @@ func (v *Validation) apply(chk Validator, obj interface{}) *Result {
|
||||
}
|
||||
}
|
||||
|
||||
// key must like aa.bb.cc or aa.bb.
|
||||
// AddError adds independent error message for the provided key
|
||||
func (v *Validation) AddError(key, message string) {
|
||||
Name := key
|
||||
Field := ""
|
||||
|
||||
Label := ""
|
||||
parts := strings.Split(key, ".")
|
||||
if len(parts) == 3 {
|
||||
Field = parts[0]
|
||||
Name = parts[1]
|
||||
Label = parts[2]
|
||||
if len(Label) == 0 {
|
||||
Label = Field
|
||||
}
|
||||
}
|
||||
|
||||
err := &Error{
|
||||
Message: message,
|
||||
Message: Label + " " + message,
|
||||
Key: key,
|
||||
Name: Name,
|
||||
Field: Field,
|
||||
@ -381,7 +390,6 @@ func (v *Validation) Valid(obj interface{}) (b bool, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
chk := Required{""}.IsSatisfied(currentField)
|
||||
if !hasRequired && v.RequiredFirst && !chk {
|
||||
if _, ok := CanSkipFuncs[vf.Name]; ok {
|
||||
|
@ -253,44 +253,68 @@ func TestBase64(t *testing.T) {
|
||||
func TestMobile(t *testing.T) {
|
||||
valid := Validation{}
|
||||
|
||||
if valid.Mobile("19800008888", "mobile").Ok {
|
||||
t.Error("\"19800008888\" is a valid mobile phone number should be false")
|
||||
validMobiles := []string{
|
||||
"19800008888",
|
||||
"18800008888",
|
||||
"18000008888",
|
||||
"8618300008888",
|
||||
"+8614700008888",
|
||||
"17300008888",
|
||||
"+8617100008888",
|
||||
"8617500008888",
|
||||
"8617400008888",
|
||||
"16200008888",
|
||||
"16500008888",
|
||||
"16600008888",
|
||||
"16700008888",
|
||||
"13300008888",
|
||||
"14900008888",
|
||||
"15300008888",
|
||||
"17300008888",
|
||||
"17700008888",
|
||||
"18000008888",
|
||||
"18900008888",
|
||||
"19100008888",
|
||||
"19900008888",
|
||||
"19300008888",
|
||||
"13000008888",
|
||||
"13100008888",
|
||||
"13200008888",
|
||||
"14500008888",
|
||||
"15500008888",
|
||||
"15600008888",
|
||||
"16600008888",
|
||||
"17100008888",
|
||||
"17500008888",
|
||||
"17600008888",
|
||||
"18500008888",
|
||||
"18600008888",
|
||||
"13400008888",
|
||||
"13500008888",
|
||||
"13600008888",
|
||||
"13700008888",
|
||||
"13800008888",
|
||||
"13900008888",
|
||||
"14700008888",
|
||||
"15000008888",
|
||||
"15100008888",
|
||||
"15200008888",
|
||||
"15800008888",
|
||||
"15900008888",
|
||||
"17200008888",
|
||||
"17800008888",
|
||||
"18200008888",
|
||||
"18300008888",
|
||||
"18400008888",
|
||||
"18700008888",
|
||||
"18800008888",
|
||||
"19800008888",
|
||||
}
|
||||
if !valid.Mobile("18800008888", "mobile").Ok {
|
||||
t.Error("\"18800008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("18000008888", "mobile").Ok {
|
||||
t.Error("\"18000008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("8618300008888", "mobile").Ok {
|
||||
t.Error("\"8618300008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("+8614700008888", "mobile").Ok {
|
||||
t.Error("\"+8614700008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("17300008888", "mobile").Ok {
|
||||
t.Error("\"17300008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("+8617100008888", "mobile").Ok {
|
||||
t.Error("\"+8617100008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("8617500008888", "mobile").Ok {
|
||||
t.Error("\"8617500008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if valid.Mobile("8617400008888", "mobile").Ok {
|
||||
t.Error("\"8617400008888\" is a valid mobile phone number should be false")
|
||||
}
|
||||
if !valid.Mobile("16200008888", "mobile").Ok {
|
||||
t.Error("\"16200008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("16500008888", "mobile").Ok {
|
||||
t.Error("\"16500008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("16600008888", "mobile").Ok {
|
||||
t.Error("\"16600008888\" is a valid mobile phone number should be true")
|
||||
}
|
||||
if !valid.Mobile("16700008888", "mobile").Ok {
|
||||
t.Error("\"16700008888\" is a valid mobile phone number should be true")
|
||||
|
||||
for _, m := range validMobiles {
|
||||
if !valid.Mobile(m, "mobile").Ok {
|
||||
t.Error(m + " is a valid mobile phone number should be true")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -381,8 +405,8 @@ func TestValid(t *testing.T) {
|
||||
if len(valid.Errors) != 1 {
|
||||
t.Fatalf("valid errors len should be 1 but got %d", len(valid.Errors))
|
||||
}
|
||||
if valid.Errors[0].Key != "Age.Range" {
|
||||
t.Errorf("Message key should be `Name.Match` but got %s", valid.Errors[0].Key)
|
||||
if valid.Errors[0].Key != "Age.Range." {
|
||||
t.Errorf("Message key should be `Age.Range` but got %s", valid.Errors[0].Key)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,9 +16,11 @@ package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/astaxie/beego/logs"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
@ -57,6 +59,8 @@ var MessageTmpls = map[string]string{
|
||||
"ZipCode": "Must be valid zipcode",
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
|
||||
// SetDefaultMessage set default messages
|
||||
// if not set, the default messages are
|
||||
// "Required": "Can not be empty",
|
||||
@ -84,9 +88,12 @@ func SetDefaultMessage(msg map[string]string) {
|
||||
return
|
||||
}
|
||||
|
||||
for name := range msg {
|
||||
MessageTmpls[name] = msg[name]
|
||||
}
|
||||
once.Do(func() {
|
||||
for name := range msg {
|
||||
MessageTmpls[name] = msg[name]
|
||||
}
|
||||
})
|
||||
logs.Warn(`you must SetDefaultMessage at once`)
|
||||
}
|
||||
|
||||
// Validator interface
|
||||
@ -632,7 +639,7 @@ func (b Base64) GetLimitValue() interface{} {
|
||||
}
|
||||
|
||||
// just for chinese mobile phone number
|
||||
var mobilePattern = regexp.MustCompile(`^((\+86)|(86))?(1(([35][0-9])|[8][0-9]|[7][01356789]|[4][579]|[6][2567]))\d{8}$`)
|
||||
var mobilePattern = regexp.MustCompile(`^((\+86)|(86))?1([356789][0-9]|4[579]|6[67]|7[0135678]|9[189])[0-9]{8}$`)
|
||||
|
||||
// Mobile check struct
|
||||
type Mobile struct {
|
||||
|
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
27
vendor/golang.org/x/crypto/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
22
vendor/golang.org/x/crypto/PATENTS
generated
vendored
@ -1,22 +0,0 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
921
vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
921
vendor/golang.org/x/crypto/acme/acme.go
generated
vendored
@ -1,921 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package acme provides an implementation of the
|
||||
// Automatic Certificate Management Environment (ACME) spec.
|
||||
// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details.
|
||||
//
|
||||
// Most common scenarios will want to use autocert subdirectory instead,
|
||||
// which provides automatic access to certificates from Let's Encrypt
|
||||
// and any other ACME-based CA.
|
||||
//
|
||||
// This package is a work in progress and makes no API stability promises.
|
||||
package acme
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// LetsEncryptURL is the Directory endpoint of Let's Encrypt CA.
|
||||
LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory"
|
||||
|
||||
// ALPNProto is the ALPN protocol name used by a CA server when validating
|
||||
// tls-alpn-01 challenges.
|
||||
//
|
||||
// Package users must ensure their servers can negotiate the ACME ALPN
|
||||
// in order for tls-alpn-01 challenge verifications to succeed.
|
||||
ALPNProto = "acme-tls/1"
|
||||
)
|
||||
|
||||
// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge.
|
||||
var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1}
|
||||
|
||||
const (
|
||||
maxChainLen = 5 // max depth and breadth of a certificate chain
|
||||
maxCertSize = 1 << 20 // max size of a certificate, in bytes
|
||||
|
||||
// Max number of collected nonces kept in memory.
|
||||
// Expect usual peak of 1 or 2.
|
||||
maxNonces = 100
|
||||
)
|
||||
|
||||
// Client is an ACME client.
|
||||
// The only required field is Key. An example of creating a client with a new key
|
||||
// is as follows:
|
||||
//
|
||||
// key, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
// if err != nil {
|
||||
// log.Fatal(err)
|
||||
// }
|
||||
// client := &Client{Key: key}
|
||||
//
|
||||
type Client struct {
|
||||
// Key is the account key used to register with a CA and sign requests.
|
||||
// Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey.
|
||||
Key crypto.Signer
|
||||
|
||||
// HTTPClient optionally specifies an HTTP client to use
|
||||
// instead of http.DefaultClient.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// DirectoryURL points to the CA directory endpoint.
|
||||
// If empty, LetsEncryptURL is used.
|
||||
// Mutating this value after a successful call of Client's Discover method
|
||||
// will have no effect.
|
||||
DirectoryURL string
|
||||
|
||||
// RetryBackoff computes the duration after which the nth retry of a failed request
|
||||
// should occur. The value of n for the first call on failure is 1.
|
||||
// The values of r and resp are the request and response of the last failed attempt.
|
||||
// If the returned value is negative or zero, no more retries are done and an error
|
||||
// is returned to the caller of the original method.
|
||||
//
|
||||
// Requests which result in a 4xx client error are not retried,
|
||||
// except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests.
|
||||
//
|
||||
// If RetryBackoff is nil, a truncated exponential backoff algorithm
|
||||
// with the ceiling of 10 seconds is used, where each subsequent retry n
|
||||
// is done after either ("Retry-After" + jitter) or (2^n seconds + jitter),
|
||||
// preferring the former if "Retry-After" header is found in the resp.
|
||||
// The jitter is a random value up to 1 second.
|
||||
RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration
|
||||
|
||||
dirMu sync.Mutex // guards writes to dir
|
||||
dir *Directory // cached result of Client's Discover method
|
||||
|
||||
noncesMu sync.Mutex
|
||||
nonces map[string]struct{} // nonces collected from previous responses
|
||||
}
|
||||
|
||||
// Discover performs ACME server discovery using c.DirectoryURL.
|
||||
//
|
||||
// It caches successful result. So, subsequent calls will not result in
|
||||
// a network round-trip. This also means mutating c.DirectoryURL after successful call
|
||||
// of this method will have no effect.
|
||||
func (c *Client) Discover(ctx context.Context) (Directory, error) {
|
||||
c.dirMu.Lock()
|
||||
defer c.dirMu.Unlock()
|
||||
if c.dir != nil {
|
||||
return *c.dir, nil
|
||||
}
|
||||
|
||||
dirURL := c.DirectoryURL
|
||||
if dirURL == "" {
|
||||
dirURL = LetsEncryptURL
|
||||
}
|
||||
res, err := c.get(ctx, dirURL, wantStatus(http.StatusOK))
|
||||
if err != nil {
|
||||
return Directory{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
c.addNonce(res.Header)
|
||||
|
||||
var v struct {
|
||||
Reg string `json:"new-reg"`
|
||||
Authz string `json:"new-authz"`
|
||||
Cert string `json:"new-cert"`
|
||||
Revoke string `json:"revoke-cert"`
|
||||
Meta struct {
|
||||
Terms string `json:"terms-of-service"`
|
||||
Website string `json:"website"`
|
||||
CAA []string `json:"caa-identities"`
|
||||
}
|
||||
}
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return Directory{}, err
|
||||
}
|
||||
c.dir = &Directory{
|
||||
RegURL: v.Reg,
|
||||
AuthzURL: v.Authz,
|
||||
CertURL: v.Cert,
|
||||
RevokeURL: v.Revoke,
|
||||
Terms: v.Meta.Terms,
|
||||
Website: v.Meta.Website,
|
||||
CAA: v.Meta.CAA,
|
||||
}
|
||||
return *c.dir, nil
|
||||
}
|
||||
|
||||
// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format.
|
||||
// The exp argument indicates the desired certificate validity duration. CA may issue a certificate
|
||||
// with a different duration.
|
||||
// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain.
|
||||
//
|
||||
// In the case where CA server does not provide the issued certificate in the response,
|
||||
// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips.
|
||||
// In such a scenario, the caller can cancel the polling with ctx.
|
||||
//
|
||||
// CreateCert returns an error if the CA's response or chain was unreasonably large.
|
||||
// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features.
|
||||
func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) {
|
||||
if _, err := c.Discover(ctx); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
req := struct {
|
||||
Resource string `json:"resource"`
|
||||
CSR string `json:"csr"`
|
||||
NotBefore string `json:"notBefore,omitempty"`
|
||||
NotAfter string `json:"notAfter,omitempty"`
|
||||
}{
|
||||
Resource: "new-cert",
|
||||
CSR: base64.RawURLEncoding.EncodeToString(csr),
|
||||
}
|
||||
now := timeNow()
|
||||
req.NotBefore = now.Format(time.RFC3339)
|
||||
if exp > 0 {
|
||||
req.NotAfter = now.Add(exp).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
curl := res.Header.Get("Location") // cert permanent URL
|
||||
if res.ContentLength == 0 {
|
||||
// no cert in the body; poll until we get it
|
||||
cert, err := c.FetchCert(ctx, curl, bundle)
|
||||
return cert, curl, err
|
||||
}
|
||||
// slurp issued cert and CA chain, if requested
|
||||
cert, err := c.responseCert(ctx, res, bundle)
|
||||
return cert, curl, err
|
||||
}
|
||||
|
||||
// FetchCert retrieves already issued certificate from the given url, in DER format.
|
||||
// It retries the request until the certificate is successfully retrieved,
|
||||
// context is cancelled by the caller or an error response is received.
|
||||
//
|
||||
// The returned value will also contain the CA (issuer) certificate if the bundle argument is true.
|
||||
//
|
||||
// FetchCert returns an error if the CA's response or chain was unreasonably large.
|
||||
// Callers are encouraged to parse the returned value to ensure the certificate is valid
|
||||
// and has expected features.
|
||||
func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) {
|
||||
res, err := c.get(ctx, url, wantStatus(http.StatusOK))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.responseCert(ctx, res, bundle)
|
||||
}
|
||||
|
||||
// RevokeCert revokes a previously issued certificate cert, provided in DER format.
|
||||
//
|
||||
// The key argument, used to sign the request, must be authorized
|
||||
// to revoke the certificate. It's up to the CA to decide which keys are authorized.
|
||||
// For instance, the key pair of the certificate may be authorized.
|
||||
// If the key is nil, c.Key is used instead.
|
||||
func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error {
|
||||
if _, err := c.Discover(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body := &struct {
|
||||
Resource string `json:"resource"`
|
||||
Cert string `json:"certificate"`
|
||||
Reason int `json:"reason"`
|
||||
}{
|
||||
Resource: "revoke-cert",
|
||||
Cert: base64.RawURLEncoding.EncodeToString(cert),
|
||||
Reason: int(reason),
|
||||
}
|
||||
if key == nil {
|
||||
key = c.Key
|
||||
}
|
||||
res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service
|
||||
// during account registration. See Register method of Client for more details.
|
||||
func AcceptTOS(tosURL string) bool { return true }
|
||||
|
||||
// Register creates a new account registration by following the "new-reg" flow.
|
||||
// It returns the registered account. The account is not modified.
|
||||
//
|
||||
// The registration may require the caller to agree to the CA's Terms of Service (TOS).
|
||||
// If so, and the account has not indicated the acceptance of the terms (see Account for details),
|
||||
// Register calls prompt with a TOS URL provided by the CA. Prompt should report
|
||||
// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS.
|
||||
func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) {
|
||||
if _, err := c.Discover(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var accept bool
|
||||
if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms {
|
||||
accept = prompt(a.CurrentTerms)
|
||||
}
|
||||
if accept {
|
||||
a.AgreedTerms = a.CurrentTerms
|
||||
a, err = c.UpdateReg(ctx, a)
|
||||
}
|
||||
return a, err
|
||||
}
|
||||
|
||||
// GetReg retrieves an existing registration.
|
||||
// The url argument is an Account URI.
|
||||
func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) {
|
||||
a, err := c.doReg(ctx, url, "reg", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.URI = url
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// UpdateReg updates an existing registration.
|
||||
// It returns an updated account copy. The provided account is not modified.
|
||||
func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) {
|
||||
uri := a.URI
|
||||
a, err := c.doReg(ctx, uri, "reg", a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.URI = uri
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Authorize performs the initial step in an authorization flow.
|
||||
// The caller will then need to choose from and perform a set of returned
|
||||
// challenges using c.Accept in order to successfully complete authorization.
|
||||
//
|
||||
// If an authorization has been previously granted, the CA may return
|
||||
// a valid authorization (Authorization.Status is StatusValid). If so, the caller
|
||||
// need not fulfill any challenge and can proceed to requesting a certificate.
|
||||
func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) {
|
||||
if _, err := c.Discover(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
type authzID struct {
|
||||
Type string `json:"type"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
req := struct {
|
||||
Resource string `json:"resource"`
|
||||
Identifier authzID `json:"identifier"`
|
||||
}{
|
||||
Resource: "new-authz",
|
||||
Identifier: authzID{Type: "dns", Value: domain},
|
||||
}
|
||||
res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var v wireAuthz
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return nil, fmt.Errorf("acme: invalid response: %v", err)
|
||||
}
|
||||
if v.Status != StatusPending && v.Status != StatusValid {
|
||||
return nil, fmt.Errorf("acme: unexpected status: %s", v.Status)
|
||||
}
|
||||
return v.authorization(res.Header.Get("Location")), nil
|
||||
}
|
||||
|
||||
// GetAuthorization retrieves an authorization identified by the given URL.
|
||||
//
|
||||
// If a caller needs to poll an authorization until its status is final,
|
||||
// see the WaitAuthorization method.
|
||||
func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) {
|
||||
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
var v wireAuthz
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return nil, fmt.Errorf("acme: invalid response: %v", err)
|
||||
}
|
||||
return v.authorization(url), nil
|
||||
}
|
||||
|
||||
// RevokeAuthorization relinquishes an existing authorization identified
|
||||
// by the given URL.
|
||||
// The url argument is an Authorization.URI value.
|
||||
//
|
||||
// If successful, the caller will be required to obtain a new authorization
|
||||
// using the Authorize method before being able to request a new certificate
|
||||
// for the domain associated with the authorization.
|
||||
//
|
||||
// It does not revoke existing certificates.
|
||||
func (c *Client) RevokeAuthorization(ctx context.Context, url string) error {
|
||||
req := struct {
|
||||
Resource string `json:"resource"`
|
||||
Status string `json:"status"`
|
||||
Delete bool `json:"delete"`
|
||||
}{
|
||||
Resource: "authz",
|
||||
Status: "deactivated",
|
||||
Delete: true,
|
||||
}
|
||||
res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// WaitAuthorization polls an authorization at the given URL
|
||||
// until it is in one of the final states, StatusValid or StatusInvalid,
|
||||
// the ACME CA responded with a 4xx error code, or the context is done.
|
||||
//
|
||||
// It returns a non-nil Authorization only if its Status is StatusValid.
|
||||
// In all other cases WaitAuthorization returns an error.
|
||||
// If the Status is StatusInvalid, the returned error is of type *AuthorizationError.
|
||||
func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) {
|
||||
for {
|
||||
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var raw wireAuthz
|
||||
err = json.NewDecoder(res.Body).Decode(&raw)
|
||||
res.Body.Close()
|
||||
switch {
|
||||
case err != nil:
|
||||
// Skip and retry.
|
||||
case raw.Status == StatusValid:
|
||||
return raw.authorization(url), nil
|
||||
case raw.Status == StatusInvalid:
|
||||
return nil, raw.error(url)
|
||||
}
|
||||
|
||||
// Exponential backoff is implemented in c.get above.
|
||||
// This is just to prevent continuously hitting the CA
|
||||
// while waiting for a final authorization status.
|
||||
d := retryAfter(res.Header.Get("Retry-After"))
|
||||
if d == 0 {
|
||||
// Given that the fastest challenges TLS-SNI and HTTP-01
|
||||
// require a CA to make at least 1 network round trip
|
||||
// and most likely persist a challenge state,
|
||||
// this default delay seems reasonable.
|
||||
d = time.Second
|
||||
}
|
||||
t := time.NewTimer(d)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Stop()
|
||||
return nil, ctx.Err()
|
||||
case <-t.C:
|
||||
// Retry.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetChallenge retrieves the current status of an challenge.
|
||||
//
|
||||
// A client typically polls a challenge status using this method.
|
||||
func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) {
|
||||
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
v := wireChallenge{URI: url}
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return nil, fmt.Errorf("acme: invalid response: %v", err)
|
||||
}
|
||||
return v.challenge(), nil
|
||||
}
|
||||
|
||||
// Accept informs the server that the client accepts one of its challenges
|
||||
// previously obtained with c.Authorize.
|
||||
//
|
||||
// The server will then perform the validation asynchronously.
|
||||
func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) {
|
||||
auth, err := keyAuth(c.Key.Public(), chal.Token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := struct {
|
||||
Resource string `json:"resource"`
|
||||
Type string `json:"type"`
|
||||
Auth string `json:"keyAuthorization"`
|
||||
}{
|
||||
Resource: "challenge",
|
||||
Type: chal.Type,
|
||||
Auth: auth,
|
||||
}
|
||||
res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus(
|
||||
http.StatusOK, // according to the spec
|
||||
http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md)
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var v wireChallenge
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return nil, fmt.Errorf("acme: invalid response: %v", err)
|
||||
}
|
||||
return v.challenge(), nil
|
||||
}
|
||||
|
||||
// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response.
|
||||
// A TXT record containing the returned value must be provisioned under
|
||||
// "_acme-challenge" name of the domain being validated.
|
||||
//
|
||||
// The token argument is a Challenge.Token value.
|
||||
func (c *Client) DNS01ChallengeRecord(token string) (string, error) {
|
||||
ka, err := keyAuth(c.Key.Public(), token)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := sha256.Sum256([]byte(ka))
|
||||
return base64.RawURLEncoding.EncodeToString(b[:]), nil
|
||||
}
|
||||
|
||||
// HTTP01ChallengeResponse returns the response for an http-01 challenge.
|
||||
// Servers should respond with the value to HTTP requests at the URL path
|
||||
// provided by HTTP01ChallengePath to validate the challenge and prove control
|
||||
// over a domain name.
|
||||
//
|
||||
// The token argument is a Challenge.Token value.
|
||||
func (c *Client) HTTP01ChallengeResponse(token string) (string, error) {
|
||||
return keyAuth(c.Key.Public(), token)
|
||||
}
|
||||
|
||||
// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge
|
||||
// should be provided by the servers.
|
||||
// The response value can be obtained with HTTP01ChallengeResponse.
|
||||
//
|
||||
// The token argument is a Challenge.Token value.
|
||||
func (c *Client) HTTP01ChallengePath(token string) string {
|
||||
return "/.well-known/acme-challenge/" + token
|
||||
}
|
||||
|
||||
// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response.
|
||||
// Servers can present the certificate to validate the challenge and prove control
|
||||
// over a domain name.
|
||||
//
|
||||
// The implementation is incomplete in that the returned value is a single certificate,
|
||||
// computed only for Z0 of the key authorization. ACME CAs are expected to update
|
||||
// their implementations to use the newer version, TLS-SNI-02.
|
||||
// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3.
|
||||
//
|
||||
// The token argument is a Challenge.Token value.
|
||||
// If a WithKey option is provided, its private part signs the returned cert,
|
||||
// and the public part is used to specify the signee.
|
||||
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
|
||||
//
|
||||
// The returned certificate is valid for the next 24 hours and must be presented only when
|
||||
// the server name of the TLS ClientHello matches exactly the returned name value.
|
||||
func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) {
|
||||
ka, err := keyAuth(c.Key.Public(), token)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, "", err
|
||||
}
|
||||
b := sha256.Sum256([]byte(ka))
|
||||
h := hex.EncodeToString(b[:])
|
||||
name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:])
|
||||
cert, err = tlsChallengeCert([]string{name}, opt)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, "", err
|
||||
}
|
||||
return cert, name, nil
|
||||
}
|
||||
|
||||
// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response.
|
||||
// Servers can present the certificate to validate the challenge and prove control
|
||||
// over a domain name. For more details on TLS-SNI-02 see
|
||||
// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3.
|
||||
//
|
||||
// The token argument is a Challenge.Token value.
|
||||
// If a WithKey option is provided, its private part signs the returned cert,
|
||||
// and the public part is used to specify the signee.
|
||||
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
|
||||
//
|
||||
// The returned certificate is valid for the next 24 hours and must be presented only when
|
||||
// the server name in the TLS ClientHello matches exactly the returned name value.
|
||||
func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) {
|
||||
b := sha256.Sum256([]byte(token))
|
||||
h := hex.EncodeToString(b[:])
|
||||
sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:])
|
||||
|
||||
ka, err := keyAuth(c.Key.Public(), token)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, "", err
|
||||
}
|
||||
b = sha256.Sum256([]byte(ka))
|
||||
h = hex.EncodeToString(b[:])
|
||||
sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:])
|
||||
|
||||
cert, err = tlsChallengeCert([]string{sanA, sanB}, opt)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, "", err
|
||||
}
|
||||
return cert, sanA, nil
|
||||
}
|
||||
|
||||
// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response.
|
||||
// Servers can present the certificate to validate the challenge and prove control
|
||||
// over a domain name. For more details on TLS-ALPN-01 see
|
||||
// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3
|
||||
//
|
||||
// The token argument is a Challenge.Token value.
|
||||
// If a WithKey option is provided, its private part signs the returned cert,
|
||||
// and the public part is used to specify the signee.
|
||||
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
|
||||
//
|
||||
// The returned certificate is valid for the next 24 hours and must be presented only when
|
||||
// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol
|
||||
// has been specified.
|
||||
func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) {
|
||||
ka, err := keyAuth(c.Key.Public(), token)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
shasum := sha256.Sum256([]byte(ka))
|
||||
extValue, err := asn1.Marshal(shasum[:])
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
acmeExtension := pkix.Extension{
|
||||
Id: idPeACMEIdentifierV1,
|
||||
Critical: true,
|
||||
Value: extValue,
|
||||
}
|
||||
|
||||
tmpl := defaultTLSChallengeCertTemplate()
|
||||
|
||||
var newOpt []CertOption
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case *certOptTemplate:
|
||||
t := *(*x509.Certificate)(o) // shallow copy is ok
|
||||
tmpl = &t
|
||||
default:
|
||||
newOpt = append(newOpt, o)
|
||||
}
|
||||
}
|
||||
tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension)
|
||||
newOpt = append(newOpt, WithTemplate(tmpl))
|
||||
return tlsChallengeCert([]string{domain}, newOpt)
|
||||
}
|
||||
|
||||
// doReg sends all types of registration requests.
|
||||
// The type of request is identified by typ argument, which is a "resource"
|
||||
// in the ACME spec terms.
|
||||
//
|
||||
// A non-nil acct argument indicates whether the intention is to mutate data
|
||||
// of the Account. Only Contact and Agreement of its fields are used
|
||||
// in such cases.
|
||||
func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) {
|
||||
req := struct {
|
||||
Resource string `json:"resource"`
|
||||
Contact []string `json:"contact,omitempty"`
|
||||
Agreement string `json:"agreement,omitempty"`
|
||||
}{
|
||||
Resource: typ,
|
||||
}
|
||||
if acct != nil {
|
||||
req.Contact = acct.Contact
|
||||
req.Agreement = acct.AgreedTerms
|
||||
}
|
||||
res, err := c.post(ctx, c.Key, url, req, wantStatus(
|
||||
http.StatusOK, // updates and deletes
|
||||
http.StatusCreated, // new account creation
|
||||
http.StatusAccepted, // Let's Encrypt divergent implementation
|
||||
))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
var v struct {
|
||||
Contact []string
|
||||
Agreement string
|
||||
Authorizations string
|
||||
Certificates string
|
||||
}
|
||||
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
|
||||
return nil, fmt.Errorf("acme: invalid response: %v", err)
|
||||
}
|
||||
var tos string
|
||||
if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 {
|
||||
tos = v[0]
|
||||
}
|
||||
var authz string
|
||||
if v := linkHeader(res.Header, "next"); len(v) > 0 {
|
||||
authz = v[0]
|
||||
}
|
||||
return &Account{
|
||||
URI: res.Header.Get("Location"),
|
||||
Contact: v.Contact,
|
||||
AgreedTerms: v.Agreement,
|
||||
CurrentTerms: tos,
|
||||
Authz: authz,
|
||||
Authorizations: v.Authorizations,
|
||||
Certificates: v.Certificates,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// popNonce returns a nonce value previously stored with c.addNonce
|
||||
// or fetches a fresh one from the given URL.
|
||||
func (c *Client) popNonce(ctx context.Context, url string) (string, error) {
|
||||
c.noncesMu.Lock()
|
||||
defer c.noncesMu.Unlock()
|
||||
if len(c.nonces) == 0 {
|
||||
return c.fetchNonce(ctx, url)
|
||||
}
|
||||
var nonce string
|
||||
for nonce = range c.nonces {
|
||||
delete(c.nonces, nonce)
|
||||
break
|
||||
}
|
||||
return nonce, nil
|
||||
}
|
||||
|
||||
// clearNonces clears any stored nonces
|
||||
func (c *Client) clearNonces() {
|
||||
c.noncesMu.Lock()
|
||||
defer c.noncesMu.Unlock()
|
||||
c.nonces = make(map[string]struct{})
|
||||
}
|
||||
|
||||
// addNonce stores a nonce value found in h (if any) for future use.
|
||||
func (c *Client) addNonce(h http.Header) {
|
||||
v := nonceFromHeader(h)
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
c.noncesMu.Lock()
|
||||
defer c.noncesMu.Unlock()
|
||||
if len(c.nonces) >= maxNonces {
|
||||
return
|
||||
}
|
||||
if c.nonces == nil {
|
||||
c.nonces = make(map[string]struct{})
|
||||
}
|
||||
c.nonces[v] = struct{}{}
|
||||
}
|
||||
|
||||
func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) {
|
||||
r, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := c.doNoRetry(ctx, r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
nonce := nonceFromHeader(resp.Header)
|
||||
if nonce == "" {
|
||||
if resp.StatusCode > 299 {
|
||||
return "", responseError(resp)
|
||||
}
|
||||
return "", errors.New("acme: nonce not found")
|
||||
}
|
||||
return nonce, nil
|
||||
}
|
||||
|
||||
func nonceFromHeader(h http.Header) string {
|
||||
return h.Get("Replay-Nonce")
|
||||
}
|
||||
|
||||
func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) {
|
||||
b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("acme: response stream: %v", err)
|
||||
}
|
||||
if len(b) > maxCertSize {
|
||||
return nil, errors.New("acme: certificate is too big")
|
||||
}
|
||||
cert := [][]byte{b}
|
||||
if !bundle {
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// Append CA chain cert(s).
|
||||
// At least one is required according to the spec:
|
||||
// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1
|
||||
up := linkHeader(res.Header, "up")
|
||||
if len(up) == 0 {
|
||||
return nil, errors.New("acme: rel=up link not found")
|
||||
}
|
||||
if len(up) > maxChainLen {
|
||||
return nil, errors.New("acme: rel=up link is too large")
|
||||
}
|
||||
for _, url := range up {
|
||||
cc, err := c.chainCert(ctx, url, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cert = append(cert, cc...)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// chainCert fetches CA certificate chain recursively by following "up" links.
|
||||
// Each recursive call increments the depth by 1, resulting in an error
|
||||
// if the recursion level reaches maxChainLen.
|
||||
//
|
||||
// First chainCert call starts with depth of 0.
|
||||
func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) {
|
||||
if depth >= maxChainLen {
|
||||
return nil, errors.New("acme: certificate chain is too deep")
|
||||
}
|
||||
|
||||
res, err := c.get(ctx, url, wantStatus(http.StatusOK))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(b) > maxCertSize {
|
||||
return nil, errors.New("acme: certificate is too big")
|
||||
}
|
||||
chain := [][]byte{b}
|
||||
|
||||
uplink := linkHeader(res.Header, "up")
|
||||
if len(uplink) > maxChainLen {
|
||||
return nil, errors.New("acme: certificate chain is too large")
|
||||
}
|
||||
for _, up := range uplink {
|
||||
cc, err := c.chainCert(ctx, up, depth+1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chain = append(chain, cc...)
|
||||
}
|
||||
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
// linkHeader returns URI-Reference values of all Link headers
|
||||
// with relation-type rel.
|
||||
// See https://tools.ietf.org/html/rfc5988#section-5 for details.
|
||||
func linkHeader(h http.Header, rel string) []string {
|
||||
var links []string
|
||||
for _, v := range h["Link"] {
|
||||
parts := strings.Split(v, ";")
|
||||
for _, p := range parts {
|
||||
p = strings.TrimSpace(p)
|
||||
if !strings.HasPrefix(p, "rel=") {
|
||||
continue
|
||||
}
|
||||
if v := strings.Trim(p[4:], `"`); v == rel {
|
||||
links = append(links, strings.Trim(parts[0], "<>"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return links
|
||||
}
|
||||
|
||||
// keyAuth generates a key authorization string for a given token.
|
||||
func keyAuth(pub crypto.PublicKey, token string) (string, error) {
|
||||
th, err := JWKThumbprint(pub)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", token, th), nil
|
||||
}
|
||||
|
||||
// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges.
|
||||
func defaultTLSChallengeCertTemplate() *x509.Certificate {
|
||||
return &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(24 * time.Hour),
|
||||
BasicConstraintsValid: true,
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
}
|
||||
}
|
||||
|
||||
// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges
|
||||
// with the given SANs and auto-generated public/private key pair.
|
||||
// The Subject Common Name is set to the first SAN to aid debugging.
|
||||
// To create a cert with a custom key pair, specify WithKey option.
|
||||
func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
|
||||
var key crypto.Signer
|
||||
tmpl := defaultTLSChallengeCertTemplate()
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case *certOptKey:
|
||||
if key != nil {
|
||||
return tls.Certificate{}, errors.New("acme: duplicate key option")
|
||||
}
|
||||
key = o.key
|
||||
case *certOptTemplate:
|
||||
t := *(*x509.Certificate)(o) // shallow copy is ok
|
||||
tmpl = &t
|
||||
default:
|
||||
// package's fault, if we let this happen:
|
||||
panic(fmt.Sprintf("unsupported option type %T", o))
|
||||
}
|
||||
}
|
||||
if key == nil {
|
||||
var err error
|
||||
if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
}
|
||||
tmpl.DNSNames = san
|
||||
if len(san) > 0 {
|
||||
tmpl.Subject.CommonName = san[0]
|
||||
}
|
||||
|
||||
der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, err
|
||||
}
|
||||
return tls.Certificate{
|
||||
Certificate: [][]byte{der},
|
||||
PrivateKey: key,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// encodePEM returns b encoded as PEM with block of type typ.
|
||||
func encodePEM(typ string, b []byte) []byte {
|
||||
pb := &pem.Block{Type: typ, Bytes: b}
|
||||
return pem.EncodeToMemory(pb)
|
||||
}
|
||||
|
||||
// timeNow is useful for testing for fixed current time.
|
||||
var timeNow = time.Now
|
1127
vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
1127
vendor/golang.org/x/crypto/acme/autocert/autocert.go
generated
vendored
File diff suppressed because it is too large
Load Diff
130
vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
130
vendor/golang.org/x/crypto/acme/autocert/cache.go
generated
vendored
@ -1,130 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// ErrCacheMiss is returned when a certificate is not found in cache.
|
||||
var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss")
|
||||
|
||||
// Cache is used by Manager to store and retrieve previously obtained certificates
|
||||
// and other account data as opaque blobs.
|
||||
//
|
||||
// Cache implementations should not rely on the key naming pattern. Keys can
|
||||
// include any printable ASCII characters, except the following: \/:*?"<>|
|
||||
type Cache interface {
|
||||
// Get returns a certificate data for the specified key.
|
||||
// If there's no such key, Get returns ErrCacheMiss.
|
||||
Get(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
// Put stores the data in the cache under the specified key.
|
||||
// Underlying implementations may use any data storage format,
|
||||
// as long as the reverse operation, Get, results in the original data.
|
||||
Put(ctx context.Context, key string, data []byte) error
|
||||
|
||||
// Delete removes a certificate data from the cache under the specified key.
|
||||
// If there's no such key in the cache, Delete returns nil.
|
||||
Delete(ctx context.Context, key string) error
|
||||
}
|
||||
|
||||
// DirCache implements Cache using a directory on the local filesystem.
|
||||
// If the directory does not exist, it will be created with 0700 permissions.
|
||||
type DirCache string
|
||||
|
||||
// Get reads a certificate data from the specified file name.
|
||||
func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {
|
||||
name = filepath.Join(string(d), name)
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
done = make(chan struct{})
|
||||
)
|
||||
go func() {
|
||||
data, err = ioutil.ReadFile(name)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
return nil, ErrCacheMiss
|
||||
}
|
||||
return data, err
|
||||
}
|
||||
|
||||
// Put writes the certificate data to the specified file name.
|
||||
// The file will be created with 0600 permissions.
|
||||
func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
|
||||
if err := os.MkdirAll(string(d), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
var err error
|
||||
go func() {
|
||||
defer close(done)
|
||||
var tmp string
|
||||
if tmp, err = d.writeTempFile(name, data); err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Don't overwrite the file if the context was canceled.
|
||||
default:
|
||||
newName := filepath.Join(string(d), name)
|
||||
err = os.Rename(tmp, newName)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete removes the specified file name.
|
||||
func (d DirCache) Delete(ctx context.Context, name string) error {
|
||||
name = filepath.Join(string(d), name)
|
||||
var (
|
||||
err error
|
||||
done = make(chan struct{})
|
||||
)
|
||||
go func() {
|
||||
err = os.Remove(name)
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-done:
|
||||
}
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTempFile writes b to a temporary file, closes the file and returns its path.
|
||||
func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) {
|
||||
// TempFile uses 0600 permissions
|
||||
f, err := ioutil.TempFile(string(d), prefix)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, err := f.Write(b); err != nil {
|
||||
f.Close()
|
||||
return "", err
|
||||
}
|
||||
return f.Name(), f.Close()
|
||||
}
|
157
vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
157
vendor/golang.org/x/crypto/acme/autocert/listener.go
generated
vendored
@ -1,157 +0,0 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewListener returns a net.Listener that listens on the standard TLS
|
||||
// port (443) on all interfaces and returns *tls.Conn connections with
|
||||
// LetsEncrypt certificates for the provided domain or domains.
|
||||
//
|
||||
// It enables one-line HTTPS servers:
|
||||
//
|
||||
// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler))
|
||||
//
|
||||
// NewListener is a convenience function for a common configuration.
|
||||
// More complex or custom configurations can use the autocert.Manager
|
||||
// type instead.
|
||||
//
|
||||
// Use of this function implies acceptance of the LetsEncrypt Terms of
|
||||
// Service. If domains is not empty, the provided domains are passed
|
||||
// to HostWhitelist. If domains is empty, the listener will do
|
||||
// LetsEncrypt challenges for any requested domain, which is not
|
||||
// recommended.
|
||||
//
|
||||
// Certificates are cached in a "golang-autocert" directory under an
|
||||
// operating system-specific cache or temp directory. This may not
|
||||
// be suitable for servers spanning multiple machines.
|
||||
//
|
||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
||||
// should only be used with servers that support HTTP/2.
|
||||
//
|
||||
// The returned Listener also enables TCP keep-alives on the accepted
|
||||
// connections. The returned *tls.Conn are returned before their TLS
|
||||
// handshake has completed.
|
||||
func NewListener(domains ...string) net.Listener {
|
||||
m := &Manager{
|
||||
Prompt: AcceptTOS,
|
||||
}
|
||||
if len(domains) > 0 {
|
||||
m.HostPolicy = HostWhitelist(domains...)
|
||||
}
|
||||
dir := cacheDir()
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
log.Printf("warning: autocert.NewListener not using a cache: %v", err)
|
||||
} else {
|
||||
m.Cache = DirCache(dir)
|
||||
}
|
||||
return m.Listener()
|
||||
}
|
||||
|
||||
// Listener listens on the standard TLS port (443) on all interfaces
|
||||
// and returns a net.Listener returning *tls.Conn connections.
|
||||
//
|
||||
// The returned listener uses a *tls.Config that enables HTTP/2, and
|
||||
// should only be used with servers that support HTTP/2.
|
||||
//
|
||||
// The returned Listener also enables TCP keep-alives on the accepted
|
||||
// connections. The returned *tls.Conn are returned before their TLS
|
||||
// handshake has completed.
|
||||
//
|
||||
// Unlike NewListener, it is the caller's responsibility to initialize
|
||||
// the Manager m's Prompt, Cache, HostPolicy, and other desired options.
|
||||
func (m *Manager) Listener() net.Listener {
|
||||
ln := &listener{
|
||||
m: m,
|
||||
conf: m.TLSConfig(),
|
||||
}
|
||||
ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443")
|
||||
return ln
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
m *Manager
|
||||
conf *tls.Config
|
||||
|
||||
tcpListener net.Listener
|
||||
tcpListenErr error
|
||||
}
|
||||
|
||||
func (ln *listener) Accept() (net.Conn, error) {
|
||||
if ln.tcpListenErr != nil {
|
||||
return nil, ln.tcpListenErr
|
||||
}
|
||||
conn, err := ln.tcpListener.Accept()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tcpConn := conn.(*net.TCPConn)
|
||||
|
||||
// Because Listener is a convenience function, help out with
|
||||
// this too. This is not possible for the caller to set once
|
||||
// we return a *tcp.Conn wrapping an inaccessible net.Conn.
|
||||
// If callers don't want this, they can do things the manual
|
||||
// way and tweak as needed. But this is what net/http does
|
||||
// itself, so copy that. If net/http changes, we can change
|
||||
// here too.
|
||||
tcpConn.SetKeepAlive(true)
|
||||
tcpConn.SetKeepAlivePeriod(3 * time.Minute)
|
||||
|
||||
return tls.Server(tcpConn, ln.conf), nil
|
||||
}
|
||||
|
||||
func (ln *listener) Addr() net.Addr {
|
||||
if ln.tcpListener != nil {
|
||||
return ln.tcpListener.Addr()
|
||||
}
|
||||
// net.Listen failed. Return something non-nil in case callers
|
||||
// call Addr before Accept:
|
||||
return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443}
|
||||
}
|
||||
|
||||
func (ln *listener) Close() error {
|
||||
if ln.tcpListenErr != nil {
|
||||
return ln.tcpListenErr
|
||||
}
|
||||
return ln.tcpListener.Close()
|
||||
}
|
||||
|
||||
func homeDir() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
|
||||
}
|
||||
if h := os.Getenv("HOME"); h != "" {
|
||||
return h
|
||||
}
|
||||
return "/"
|
||||
}
|
||||
|
||||
func cacheDir() string {
|
||||
const base = "golang-autocert"
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return filepath.Join(homeDir(), "Library", "Caches", base)
|
||||
case "windows":
|
||||
for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} {
|
||||
if v := os.Getenv(ev); v != "" {
|
||||
return filepath.Join(v, base)
|
||||
}
|
||||
}
|
||||
// Worst case:
|
||||
return filepath.Join(homeDir(), base)
|
||||
}
|
||||
if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" {
|
||||
return filepath.Join(xdg, base)
|
||||
}
|
||||
return filepath.Join(homeDir(), ".cache", base)
|
||||
}
|
141
vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
141
vendor/golang.org/x/crypto/acme/autocert/renewal.go
generated
vendored
@ -1,141 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package autocert
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// renewJitter is the maximum deviation from Manager.RenewBefore.
|
||||
const renewJitter = time.Hour
|
||||
|
||||
// domainRenewal tracks the state used by the periodic timers
|
||||
// renewing a single domain's cert.
|
||||
type domainRenewal struct {
|
||||
m *Manager
|
||||
ck certKey
|
||||
key crypto.Signer
|
||||
|
||||
timerMu sync.Mutex
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// start starts a cert renewal timer at the time
|
||||
// defined by the certificate expiration time exp.
|
||||
//
|
||||
// If the timer is already started, calling start is a noop.
|
||||
func (dr *domainRenewal) start(exp time.Time) {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer != nil {
|
||||
return
|
||||
}
|
||||
dr.timer = time.AfterFunc(dr.next(exp), dr.renew)
|
||||
}
|
||||
|
||||
// stop stops the cert renewal timer.
|
||||
// If the timer is already stopped, calling stop is a noop.
|
||||
func (dr *domainRenewal) stop() {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer == nil {
|
||||
return
|
||||
}
|
||||
dr.timer.Stop()
|
||||
dr.timer = nil
|
||||
}
|
||||
|
||||
// renew is called periodically by a timer.
|
||||
// The first renew call is kicked off by dr.start.
|
||||
func (dr *domainRenewal) renew() {
|
||||
dr.timerMu.Lock()
|
||||
defer dr.timerMu.Unlock()
|
||||
if dr.timer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
// TODO: rotate dr.key at some point?
|
||||
next, err := dr.do(ctx)
|
||||
if err != nil {
|
||||
next = renewJitter / 2
|
||||
next += time.Duration(pseudoRand.int63n(int64(next)))
|
||||
}
|
||||
dr.timer = time.AfterFunc(next, dr.renew)
|
||||
testDidRenewLoop(next, err)
|
||||
}
|
||||
|
||||
// updateState locks and replaces the relevant Manager.state item with the given
|
||||
// state. It additionally updates dr.key with the given state's key.
|
||||
func (dr *domainRenewal) updateState(state *certState) {
|
||||
dr.m.stateMu.Lock()
|
||||
defer dr.m.stateMu.Unlock()
|
||||
dr.key = state.key
|
||||
dr.m.state[dr.ck] = state
|
||||
}
|
||||
|
||||
// do is similar to Manager.createCert but it doesn't lock a Manager.state item.
|
||||
// Instead, it requests a new certificate independently and, upon success,
|
||||
// replaces dr.m.state item with a new one and updates cache for the given domain.
|
||||
//
|
||||
// It may lock and update the Manager.state if the expiration date of the currently
|
||||
// cached cert is far enough in the future.
|
||||
//
|
||||
// The returned value is a time interval after which the renewal should occur again.
|
||||
func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
|
||||
// a race is likely unavoidable in a distributed environment
|
||||
// but we try nonetheless
|
||||
if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil {
|
||||
next := dr.next(tlscert.Leaf.NotAfter)
|
||||
if next > dr.m.renewBefore()+renewJitter {
|
||||
signer, ok := tlscert.PrivateKey.(crypto.Signer)
|
||||
if ok {
|
||||
state := &certState{
|
||||
key: signer,
|
||||
cert: tlscert.Certificate,
|
||||
leaf: tlscert.Leaf,
|
||||
}
|
||||
dr.updateState(state)
|
||||
return next, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
state := &certState{
|
||||
key: dr.key,
|
||||
cert: der,
|
||||
leaf: leaf,
|
||||
}
|
||||
tlscert, err := state.tlscert()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dr.updateState(state)
|
||||
return dr.next(leaf.NotAfter), nil
|
||||
}
|
||||
|
||||
func (dr *domainRenewal) next(expiry time.Time) time.Duration {
|
||||
d := expiry.Sub(timeNow()) - dr.m.renewBefore()
|
||||
// add a bit of randomness to renew deadline
|
||||
n := pseudoRand.int63n(int64(renewJitter))
|
||||
d -= time.Duration(n)
|
||||
if d < 0 {
|
||||
return 0
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
var testDidRenewLoop = func(next time.Duration, err error) {}
|
281
vendor/golang.org/x/crypto/acme/http.go
generated
vendored
281
vendor/golang.org/x/crypto/acme/http.go
generated
vendored
@ -1,281 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// retryTimer encapsulates common logic for retrying unsuccessful requests.
|
||||
// It is not safe for concurrent use.
|
||||
type retryTimer struct {
|
||||
// backoffFn provides backoff delay sequence for retries.
|
||||
// See Client.RetryBackoff doc comment.
|
||||
backoffFn func(n int, r *http.Request, res *http.Response) time.Duration
|
||||
// n is the current retry attempt.
|
||||
n int
|
||||
}
|
||||
|
||||
func (t *retryTimer) inc() {
|
||||
t.n++
|
||||
}
|
||||
|
||||
// backoff pauses the current goroutine as described in Client.RetryBackoff.
|
||||
func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error {
|
||||
d := t.backoffFn(t.n, r, res)
|
||||
if d <= 0 {
|
||||
return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n)
|
||||
}
|
||||
wakeup := time.NewTimer(d)
|
||||
defer wakeup.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-wakeup.C:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) retryTimer() *retryTimer {
|
||||
f := c.RetryBackoff
|
||||
if f == nil {
|
||||
f = defaultBackoff
|
||||
}
|
||||
return &retryTimer{backoffFn: f}
|
||||
}
|
||||
|
||||
// defaultBackoff provides default Client.RetryBackoff implementation
|
||||
// using a truncated exponential backoff algorithm,
|
||||
// as described in Client.RetryBackoff.
|
||||
//
|
||||
// The n argument is always bounded between 1 and 30.
|
||||
// The returned value is always greater than 0.
|
||||
func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration {
|
||||
const max = 10 * time.Second
|
||||
var jitter time.Duration
|
||||
if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil {
|
||||
// Set the minimum to 1ms to avoid a case where
|
||||
// an invalid Retry-After value is parsed into 0 below,
|
||||
// resulting in the 0 returned value which would unintentionally
|
||||
// stop the retries.
|
||||
jitter = (1 + time.Duration(x.Int64())) * time.Millisecond
|
||||
}
|
||||
if v, ok := res.Header["Retry-After"]; ok {
|
||||
return retryAfter(v[0]) + jitter
|
||||
}
|
||||
|
||||
if n < 1 {
|
||||
n = 1
|
||||
}
|
||||
if n > 30 {
|
||||
n = 30
|
||||
}
|
||||
d := time.Duration(1<<uint(n-1))*time.Second + jitter
|
||||
if d > max {
|
||||
return max
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// retryAfter parses a Retry-After HTTP header value,
|
||||
// trying to convert v into an int (seconds) or use http.ParseTime otherwise.
|
||||
// It returns zero value if v cannot be parsed.
|
||||
func retryAfter(v string) time.Duration {
|
||||
if i, err := strconv.Atoi(v); err == nil {
|
||||
return time.Duration(i) * time.Second
|
||||
}
|
||||
t, err := http.ParseTime(v)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return t.Sub(timeNow())
|
||||
}
|
||||
|
||||
// resOkay is a function that reports whether the provided response is okay.
|
||||
// It is expected to keep the response body unread.
|
||||
type resOkay func(*http.Response) bool
|
||||
|
||||
// wantStatus returns a function which reports whether the code
|
||||
// matches the status code of a response.
|
||||
func wantStatus(codes ...int) resOkay {
|
||||
return func(res *http.Response) bool {
|
||||
for _, code := range codes {
|
||||
if code == res.StatusCode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// get issues an unsigned GET request to the specified URL.
|
||||
// It returns a non-error value only when ok reports true.
|
||||
//
|
||||
// get retries unsuccessful attempts according to c.RetryBackoff
|
||||
// until the context is done or a non-retriable error is received.
|
||||
func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) {
|
||||
retry := c.retryTimer()
|
||||
for {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := c.doNoRetry(ctx, req)
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
case ok(res):
|
||||
return res, nil
|
||||
case isRetriable(res.StatusCode):
|
||||
retry.inc()
|
||||
resErr := responseError(res)
|
||||
res.Body.Close()
|
||||
// Ignore the error value from retry.backoff
|
||||
// and return the one from last retry, as received from the CA.
|
||||
if retry.backoff(ctx, req, res) != nil {
|
||||
return nil, resErr
|
||||
}
|
||||
default:
|
||||
defer res.Body.Close()
|
||||
return nil, responseError(res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// post issues a signed POST request in JWS format using the provided key
|
||||
// to the specified URL.
|
||||
// It returns a non-error value only when ok reports true.
|
||||
//
|
||||
// post retries unsuccessful attempts according to c.RetryBackoff
|
||||
// until the context is done or a non-retriable error is received.
|
||||
// It uses postNoRetry to make individual requests.
|
||||
func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) {
|
||||
retry := c.retryTimer()
|
||||
for {
|
||||
res, req, err := c.postNoRetry(ctx, key, url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok(res) {
|
||||
return res, nil
|
||||
}
|
||||
resErr := responseError(res)
|
||||
res.Body.Close()
|
||||
switch {
|
||||
// Check for bad nonce before isRetriable because it may have been returned
|
||||
// with an unretriable response code such as 400 Bad Request.
|
||||
case isBadNonce(resErr):
|
||||
// Consider any previously stored nonce values to be invalid.
|
||||
c.clearNonces()
|
||||
case !isRetriable(res.StatusCode):
|
||||
return nil, resErr
|
||||
}
|
||||
retry.inc()
|
||||
// Ignore the error value from retry.backoff
|
||||
// and return the one from last retry, as received from the CA.
|
||||
if err := retry.backoff(ctx, req, res); err != nil {
|
||||
return nil, resErr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// postNoRetry signs the body with the given key and POSTs it to the provided url.
|
||||
// The body argument must be JSON-serializable.
|
||||
// It is used by c.post to retry unsuccessful attempts.
|
||||
func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) {
|
||||
nonce, err := c.popNonce(ctx, url)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
b, err := jwsEncodeJSON(body, key, nonce)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/jose+json")
|
||||
res, err := c.doNoRetry(ctx, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c.addNonce(res.Header)
|
||||
return res, req, nil
|
||||
}
|
||||
|
||||
// doNoRetry issues a request req, replacing its context (if any) with ctx.
|
||||
func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
res, err := c.httpClient().Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Prefer the unadorned context error.
|
||||
// (The acme package had tests assuming this, previously from ctxhttp's
|
||||
// behavior, predating net/http supporting contexts natively)
|
||||
// TODO(bradfitz): reconsider this in the future. But for now this
|
||||
// requires no test updates.
|
||||
return nil, ctx.Err()
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (c *Client) httpClient() *http.Client {
|
||||
if c.HTTPClient != nil {
|
||||
return c.HTTPClient
|
||||
}
|
||||
return http.DefaultClient
|
||||
}
|
||||
|
||||
// isBadNonce reports whether err is an ACME "badnonce" error.
|
||||
func isBadNonce(err error) bool {
|
||||
// According to the spec badNonce is urn:ietf:params:acme:error:badNonce.
|
||||
// However, ACME servers in the wild return their versions of the error.
|
||||
// See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4
|
||||
// and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66.
|
||||
ae, ok := err.(*Error)
|
||||
return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce")
|
||||
}
|
||||
|
||||
// isRetriable reports whether a request can be retried
|
||||
// based on the response status code.
|
||||
//
|
||||
// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code.
|
||||
// Callers should parse the response and check with isBadNonce.
|
||||
func isRetriable(code int) bool {
|
||||
return code <= 399 || code >= 500 || code == http.StatusTooManyRequests
|
||||
}
|
||||
|
||||
// responseError creates an error of Error type from resp.
|
||||
func responseError(resp *http.Response) error {
|
||||
// don't care if ReadAll returns an error:
|
||||
// json.Unmarshal will fail in that case anyway
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
e := &wireError{Status: resp.StatusCode}
|
||||
if err := json.Unmarshal(b, e); err != nil {
|
||||
// this is not a regular error response:
|
||||
// populate detail with anything we received,
|
||||
// e.Status will already contain HTTP response code value
|
||||
e.Detail = string(b)
|
||||
if e.Detail == "" {
|
||||
e.Detail = resp.Status
|
||||
}
|
||||
}
|
||||
return e.error(resp.Header)
|
||||
}
|
153
vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
153
vendor/golang.org/x/crypto/acme/jws.go
generated
vendored
@ -1,153 +0,0 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
_ "crypto/sha512" // need for EC keys
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// jwsEncodeJSON signs claimset using provided key and a nonce.
|
||||
// The result is serialized in JSON format.
|
||||
// See https://tools.ietf.org/html/rfc7515#section-7.
|
||||
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) {
|
||||
jwk, err := jwkEncode(key.Public())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
alg, sha := jwsHasher(key)
|
||||
if alg == "" || !sha.Available() {
|
||||
return nil, ErrUnsupportedKey
|
||||
}
|
||||
phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce)
|
||||
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
|
||||
cs, err := json.Marshal(claimset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload := base64.RawURLEncoding.EncodeToString(cs)
|
||||
hash := sha.New()
|
||||
hash.Write([]byte(phead + "." + payload))
|
||||
sig, err := jwsSign(key, sha, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
enc := struct {
|
||||
Protected string `json:"protected"`
|
||||
Payload string `json:"payload"`
|
||||
Sig string `json:"signature"`
|
||||
}{
|
||||
Protected: phead,
|
||||
Payload: payload,
|
||||
Sig: base64.RawURLEncoding.EncodeToString(sig),
|
||||
}
|
||||
return json.Marshal(&enc)
|
||||
}
|
||||
|
||||
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
|
||||
// The result is also suitable for creating a JWK thumbprint.
|
||||
// https://tools.ietf.org/html/rfc7517
|
||||
func jwkEncode(pub crypto.PublicKey) (string, error) {
|
||||
switch pub := pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.3.1
|
||||
n := pub.N
|
||||
e := big.NewInt(int64(pub.E))
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
|
||||
base64.RawURLEncoding.EncodeToString(e.Bytes()),
|
||||
base64.RawURLEncoding.EncodeToString(n.Bytes()),
|
||||
), nil
|
||||
case *ecdsa.PublicKey:
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1
|
||||
p := pub.Curve.Params()
|
||||
n := p.BitSize / 8
|
||||
if p.BitSize%8 != 0 {
|
||||
n++
|
||||
}
|
||||
x := pub.X.Bytes()
|
||||
if n > len(x) {
|
||||
x = append(make([]byte, n-len(x)), x...)
|
||||
}
|
||||
y := pub.Y.Bytes()
|
||||
if n > len(y) {
|
||||
y = append(make([]byte, n-len(y)), y...)
|
||||
}
|
||||
// Field order is important.
|
||||
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
|
||||
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
|
||||
p.Name,
|
||||
base64.RawURLEncoding.EncodeToString(x),
|
||||
base64.RawURLEncoding.EncodeToString(y),
|
||||
), nil
|
||||
}
|
||||
return "", ErrUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsSign signs the digest using the given key.
|
||||
// It returns ErrUnsupportedKey if the key type is unknown.
|
||||
// The hash is used only for RSA keys.
|
||||
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return key.Sign(rand.Reader, digest, hash)
|
||||
case *ecdsa.PrivateKey:
|
||||
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rb, sb := r.Bytes(), s.Bytes()
|
||||
size := key.Params().BitSize / 8
|
||||
if size%8 > 0 {
|
||||
size++
|
||||
}
|
||||
sig := make([]byte, size*2)
|
||||
copy(sig[size-len(rb):], rb)
|
||||
copy(sig[size*2-len(sb):], sb)
|
||||
return sig, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKey
|
||||
}
|
||||
|
||||
// jwsHasher indicates suitable JWS algorithm name and a hash function
|
||||
// to use for signing a digest with the provided key.
|
||||
// It returns ("", 0) if the key is not supported.
|
||||
func jwsHasher(key crypto.Signer) (string, crypto.Hash) {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return "RS256", crypto.SHA256
|
||||
case *ecdsa.PrivateKey:
|
||||
switch key.Params().Name {
|
||||
case "P-256":
|
||||
return "ES256", crypto.SHA256
|
||||
case "P-384":
|
||||
return "ES384", crypto.SHA384
|
||||
case "P-521":
|
||||
return "ES512", crypto.SHA512
|
||||
}
|
||||
}
|
||||
return "", 0
|
||||
}
|
||||
|
||||
// JWKThumbprint creates a JWK thumbprint out of pub
|
||||
// as specified in https://tools.ietf.org/html/rfc7638.
|
||||
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
|
||||
jwk, err := jwkEncode(pub)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b := sha256.Sum256([]byte(jwk))
|
||||
return base64.RawURLEncoding.EncodeToString(b[:]), nil
|
||||
}
|
329
vendor/golang.org/x/crypto/acme/types.go
generated
vendored
329
vendor/golang.org/x/crypto/acme/types.go
generated
vendored
@ -1,329 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ACME server response statuses used to describe Authorization and Challenge states.
|
||||
const (
|
||||
StatusUnknown = "unknown"
|
||||
StatusPending = "pending"
|
||||
StatusProcessing = "processing"
|
||||
StatusValid = "valid"
|
||||
StatusInvalid = "invalid"
|
||||
StatusRevoked = "revoked"
|
||||
)
|
||||
|
||||
// CRLReasonCode identifies the reason for a certificate revocation.
|
||||
type CRLReasonCode int
|
||||
|
||||
// CRL reason codes as defined in RFC 5280.
|
||||
const (
|
||||
CRLReasonUnspecified CRLReasonCode = 0
|
||||
CRLReasonKeyCompromise CRLReasonCode = 1
|
||||
CRLReasonCACompromise CRLReasonCode = 2
|
||||
CRLReasonAffiliationChanged CRLReasonCode = 3
|
||||
CRLReasonSuperseded CRLReasonCode = 4
|
||||
CRLReasonCessationOfOperation CRLReasonCode = 5
|
||||
CRLReasonCertificateHold CRLReasonCode = 6
|
||||
CRLReasonRemoveFromCRL CRLReasonCode = 8
|
||||
CRLReasonPrivilegeWithdrawn CRLReasonCode = 9
|
||||
CRLReasonAACompromise CRLReasonCode = 10
|
||||
)
|
||||
|
||||
// ErrUnsupportedKey is returned when an unsupported key type is encountered.
|
||||
var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported")
|
||||
|
||||
// Error is an ACME error, defined in Problem Details for HTTP APIs doc
|
||||
// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem.
|
||||
type Error struct {
|
||||
// StatusCode is The HTTP status code generated by the origin server.
|
||||
StatusCode int
|
||||
// ProblemType is a URI reference that identifies the problem type,
|
||||
// typically in a "urn:acme:error:xxx" form.
|
||||
ProblemType string
|
||||
// Detail is a human-readable explanation specific to this occurrence of the problem.
|
||||
Detail string
|
||||
// Header is the original server error response headers.
|
||||
// It may be nil.
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail)
|
||||
}
|
||||
|
||||
// AuthorizationError indicates that an authorization for an identifier
|
||||
// did not succeed.
|
||||
// It contains all errors from Challenge items of the failed Authorization.
|
||||
type AuthorizationError struct {
|
||||
// URI uniquely identifies the failed Authorization.
|
||||
URI string
|
||||
|
||||
// Identifier is an AuthzID.Value of the failed Authorization.
|
||||
Identifier string
|
||||
|
||||
// Errors is a collection of non-nil error values of Challenge items
|
||||
// of the failed Authorization.
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (a *AuthorizationError) Error() string {
|
||||
e := make([]string, len(a.Errors))
|
||||
for i, err := range a.Errors {
|
||||
e[i] = err.Error()
|
||||
}
|
||||
return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; "))
|
||||
}
|
||||
|
||||
// RateLimit reports whether err represents a rate limit error and
|
||||
// any Retry-After duration returned by the server.
|
||||
//
|
||||
// See the following for more details on rate limiting:
|
||||
// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6
|
||||
func RateLimit(err error) (time.Duration, bool) {
|
||||
e, ok := err.(*Error)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
// Some CA implementations may return incorrect values.
|
||||
// Use case-insensitive comparison.
|
||||
if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") {
|
||||
return 0, false
|
||||
}
|
||||
if e.Header == nil {
|
||||
return 0, true
|
||||
}
|
||||
return retryAfter(e.Header.Get("Retry-After")), true
|
||||
}
|
||||
|
||||
// Account is a user account. It is associated with a private key.
|
||||
type Account struct {
|
||||
// URI is the account unique ID, which is also a URL used to retrieve
|
||||
// account data from the CA.
|
||||
URI string
|
||||
|
||||
// Contact is a slice of contact info used during registration.
|
||||
Contact []string
|
||||
|
||||
// The terms user has agreed to.
|
||||
// A value not matching CurrentTerms indicates that the user hasn't agreed
|
||||
// to the actual Terms of Service of the CA.
|
||||
AgreedTerms string
|
||||
|
||||
// Actual terms of a CA.
|
||||
CurrentTerms string
|
||||
|
||||
// Authz is the authorization URL used to initiate a new authz flow.
|
||||
Authz string
|
||||
|
||||
// Authorizations is a URI from which a list of authorizations
|
||||
// granted to this account can be fetched via a GET request.
|
||||
Authorizations string
|
||||
|
||||
// Certificates is a URI from which a list of certificates
|
||||
// issued for this account can be fetched via a GET request.
|
||||
Certificates string
|
||||
}
|
||||
|
||||
// Directory is ACME server discovery data.
|
||||
type Directory struct {
|
||||
// RegURL is an account endpoint URL, allowing for creating new
|
||||
// and modifying existing accounts.
|
||||
RegURL string
|
||||
|
||||
// AuthzURL is used to initiate Identifier Authorization flow.
|
||||
AuthzURL string
|
||||
|
||||
// CertURL is a new certificate issuance endpoint URL.
|
||||
CertURL string
|
||||
|
||||
// RevokeURL is used to initiate a certificate revocation flow.
|
||||
RevokeURL string
|
||||
|
||||
// Term is a URI identifying the current terms of service.
|
||||
Terms string
|
||||
|
||||
// Website is an HTTP or HTTPS URL locating a website
|
||||
// providing more information about the ACME server.
|
||||
Website string
|
||||
|
||||
// CAA consists of lowercase hostname elements, which the ACME server
|
||||
// recognises as referring to itself for the purposes of CAA record validation
|
||||
// as defined in RFC6844.
|
||||
CAA []string
|
||||
}
|
||||
|
||||
// Challenge encodes a returned CA challenge.
|
||||
// Its Error field may be non-nil if the challenge is part of an Authorization
|
||||
// with StatusInvalid.
|
||||
type Challenge struct {
|
||||
// Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01".
|
||||
Type string
|
||||
|
||||
// URI is where a challenge response can be posted to.
|
||||
URI string
|
||||
|
||||
// Token is a random value that uniquely identifies the challenge.
|
||||
Token string
|
||||
|
||||
// Status identifies the status of this challenge.
|
||||
Status string
|
||||
|
||||
// Error indicates the reason for an authorization failure
|
||||
// when this challenge was used.
|
||||
// The type of a non-nil value is *Error.
|
||||
Error error
|
||||
}
|
||||
|
||||
// Authorization encodes an authorization response.
|
||||
type Authorization struct {
|
||||
// URI uniquely identifies a authorization.
|
||||
URI string
|
||||
|
||||
// Status identifies the status of an authorization.
|
||||
Status string
|
||||
|
||||
// Identifier is what the account is authorized to represent.
|
||||
Identifier AuthzID
|
||||
|
||||
// Challenges that the client needs to fulfill in order to prove possession
|
||||
// of the identifier (for pending authorizations).
|
||||
// For final authorizations, the challenges that were used.
|
||||
Challenges []*Challenge
|
||||
|
||||
// A collection of sets of challenges, each of which would be sufficient
|
||||
// to prove possession of the identifier.
|
||||
// Clients must complete a set of challenges that covers at least one set.
|
||||
// Challenges are identified by their indices in the challenges array.
|
||||
// If this field is empty, the client needs to complete all challenges.
|
||||
Combinations [][]int
|
||||
}
|
||||
|
||||
// AuthzID is an identifier that an account is authorized to represent.
|
||||
type AuthzID struct {
|
||||
Type string // The type of identifier, e.g. "dns".
|
||||
Value string // The identifier itself, e.g. "example.org".
|
||||
}
|
||||
|
||||
// wireAuthz is ACME JSON representation of Authorization objects.
|
||||
type wireAuthz struct {
|
||||
Status string
|
||||
Challenges []wireChallenge
|
||||
Combinations [][]int
|
||||
Identifier struct {
|
||||
Type string
|
||||
Value string
|
||||
}
|
||||
}
|
||||
|
||||
func (z *wireAuthz) authorization(uri string) *Authorization {
|
||||
a := &Authorization{
|
||||
URI: uri,
|
||||
Status: z.Status,
|
||||
Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value},
|
||||
Combinations: z.Combinations, // shallow copy
|
||||
Challenges: make([]*Challenge, len(z.Challenges)),
|
||||
}
|
||||
for i, v := range z.Challenges {
|
||||
a.Challenges[i] = v.challenge()
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (z *wireAuthz) error(uri string) *AuthorizationError {
|
||||
err := &AuthorizationError{
|
||||
URI: uri,
|
||||
Identifier: z.Identifier.Value,
|
||||
}
|
||||
for _, raw := range z.Challenges {
|
||||
if raw.Error != nil {
|
||||
err.Errors = append(err.Errors, raw.Error.error(nil))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// wireChallenge is ACME JSON challenge representation.
|
||||
type wireChallenge struct {
|
||||
URI string `json:"uri"`
|
||||
Type string
|
||||
Token string
|
||||
Status string
|
||||
Error *wireError
|
||||
}
|
||||
|
||||
func (c *wireChallenge) challenge() *Challenge {
|
||||
v := &Challenge{
|
||||
URI: c.URI,
|
||||
Type: c.Type,
|
||||
Token: c.Token,
|
||||
Status: c.Status,
|
||||
}
|
||||
if v.Status == "" {
|
||||
v.Status = StatusPending
|
||||
}
|
||||
if c.Error != nil {
|
||||
v.Error = c.Error.error(nil)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// wireError is a subset of fields of the Problem Details object
|
||||
// as described in https://tools.ietf.org/html/rfc7807#section-3.1.
|
||||
type wireError struct {
|
||||
Status int
|
||||
Type string
|
||||
Detail string
|
||||
}
|
||||
|
||||
func (e *wireError) error(h http.Header) *Error {
|
||||
return &Error{
|
||||
StatusCode: e.Status,
|
||||
ProblemType: e.Type,
|
||||
Detail: e.Detail,
|
||||
Header: h,
|
||||
}
|
||||
}
|
||||
|
||||
// CertOption is an optional argument type for the TLS ChallengeCert methods for
|
||||
// customizing a temporary certificate for TLS-based challenges.
|
||||
type CertOption interface {
|
||||
privateCertOpt()
|
||||
}
|
||||
|
||||
// WithKey creates an option holding a private/public key pair.
|
||||
// The private part signs a certificate, and the public part represents the signee.
|
||||
func WithKey(key crypto.Signer) CertOption {
|
||||
return &certOptKey{key}
|
||||
}
|
||||
|
||||
type certOptKey struct {
|
||||
key crypto.Signer
|
||||
}
|
||||
|
||||
func (*certOptKey) privateCertOpt() {}
|
||||
|
||||
// WithTemplate creates an option for specifying a certificate template.
|
||||
// See x509.CreateCertificate for template usage details.
|
||||
//
|
||||
// In TLS ChallengeCert methods, the template is also used as parent,
|
||||
// resulting in a self-signed certificate.
|
||||
// The DNSNames field of t is always overwritten for tls-sni challenge certs.
|
||||
func WithTemplate(t *x509.Certificate) CertOption {
|
||||
return (*certOptTemplate)(t)
|
||||
}
|
||||
|
||||
type certOptTemplate x509.Certificate
|
||||
|
||||
func (*certOptTemplate) privateCertOpt() {}
|
77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
generated
vendored
77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
generated
vendored
@ -1,77 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
|
||||
2898 / PKCS #5 v2.0.
|
||||
|
||||
A key derivation function is useful when encrypting data based on a password
|
||||
or any other not-fully-random data. It uses a pseudorandom function to derive
|
||||
a secure encryption key based on the password.
|
||||
|
||||
While v2.0 of the standard defines only one pseudorandom function to use,
|
||||
HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
|
||||
Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
|
||||
choose, you can pass the `New` functions from the different SHA packages to
|
||||
pbkdf2.Key.
|
||||
*/
|
||||
package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Key derives a key from the password, salt and iteration count, returning a
|
||||
// []byte of length keylen that can be used as cryptographic key. The key is
|
||||
// derived based on the method described as PBKDF2 with the HMAC variant using
|
||||
// the supplied hash function.
|
||||
//
|
||||
// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
|
||||
// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
|
||||
// doing:
|
||||
//
|
||||
// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
|
||||
//
|
||||
// Remember to get a good random salt. At least 8 bytes is recommended by the
|
||||
// RFC.
|
||||
//
|
||||
// Using a higher iteration count will increase the cost of an exhaustive
|
||||
// search but will also make derivation proportionally slower.
|
||||
func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
|
||||
prf := hmac.New(h, password)
|
||||
hashLen := prf.Size()
|
||||
numBlocks := (keyLen + hashLen - 1) / hashLen
|
||||
|
||||
var buf [4]byte
|
||||
dk := make([]byte, 0, numBlocks*hashLen)
|
||||
U := make([]byte, hashLen)
|
||||
for block := 1; block <= numBlocks; block++ {
|
||||
// N.B.: || means concatenation, ^ means XOR
|
||||
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
|
||||
// U_1 = PRF(password, salt || uint(i))
|
||||
prf.Reset()
|
||||
prf.Write(salt)
|
||||
buf[0] = byte(block >> 24)
|
||||
buf[1] = byte(block >> 16)
|
||||
buf[2] = byte(block >> 8)
|
||||
buf[3] = byte(block)
|
||||
prf.Write(buf[:4])
|
||||
dk = prf.Sum(dk)
|
||||
T := dk[len(dk)-hashLen:]
|
||||
copy(U, T)
|
||||
|
||||
// U_n = PRF(password, U_(n-1))
|
||||
for n := 2; n <= iter; n++ {
|
||||
prf.Reset()
|
||||
prf.Write(U)
|
||||
U = U[:0]
|
||||
U = prf.Sum(U)
|
||||
for x := range U {
|
||||
T[x] ^= U[x]
|
||||
}
|
||||
}
|
||||
}
|
||||
return dk[:keyLen]
|
||||
}
|
201
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
201
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml
generated
vendored
@ -1,31 +0,0 @@
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original copyright and license:
|
||||
|
||||
apic.go
|
||||
emitterc.go
|
||||
parserc.go
|
||||
readerc.go
|
||||
scannerc.go
|
||||
writerc.go
|
||||
yamlh.go
|
||||
yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
13
vendor/gopkg.in/yaml.v2/NOTICE
generated
vendored
13
vendor/gopkg.in/yaml.v2/NOTICE
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
133
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
@ -1,133 +0,0 @@
|
||||
# YAML support for the Go language
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
The yaml package enables Go programs to comfortably encode and decode YAML
|
||||
values. It was developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a
|
||||
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
|
||||
C library to parse and generate YAML data quickly and reliably.
|
||||
|
||||
Compatibility
|
||||
-------------
|
||||
|
||||
The yaml package supports most of YAML 1.1 and 1.2, including support for
|
||||
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
|
||||
implemented, and base-60 floats from YAML 1.1 are purposefully not
|
||||
supported since they're a poor design and are gone in YAML 1.2.
|
||||
|
||||
Installation and usage
|
||||
----------------------
|
||||
|
||||
The import path for the package is *gopkg.in/yaml.v2*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
API documentation
|
||||
-----------------
|
||||
|
||||
If opened in a browser, the import path itself leads to the API documentation:
|
||||
|
||||
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
|
||||
|
||||
API stability
|
||||
-------------
|
||||
|
||||
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
|
||||
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
739
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
739
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
@ -1,739 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
//// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create ALIAS.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
|
||||
//{
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// anchor_copy *yaml_char_t = NULL
|
||||
//
|
||||
// assert(event) // Non-NULL event object is expected.
|
||||
// assert(anchor) // Non-NULL anchor is expected.
|
||||
//
|
||||
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
|
||||
//
|
||||
// anchor_copy = yaml_strdup(anchor)
|
||||
// if (!anchor_copy)
|
||||
// return 0
|
||||
//
|
||||
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
775
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
775
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
@ -1,775 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
documentNode = 1 << iota
|
||||
mappingNode
|
||||
sequenceNode
|
||||
scalarNode
|
||||
aliasNode
|
||||
)
|
||||
|
||||
type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
// For an alias node, alias holds the resolved alias.
|
||||
alias *node
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
if len(b) == 0 {
|
||||
b = []byte{'\n'}
|
||||
}
|
||||
yaml_parser_set_input_string(&p.parser, b)
|
||||
return &p
|
||||
}
|
||||
|
||||
func newParserFromReader(r io.Reader) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
yaml_parser_set_input_reader(&p.parser, r)
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *parser) init() {
|
||||
if p.doneInit {
|
||||
return
|
||||
}
|
||||
p.expect(yaml_STREAM_START_EVENT)
|
||||
p.doneInit = true
|
||||
}
|
||||
|
||||
func (p *parser) destroy() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
yaml_parser_delete(&p.parser)
|
||||
}
|
||||
|
||||
// expect consumes an event from the event stream and
|
||||
// checks that it's of the expected type.
|
||||
func (p *parser) expect(e yaml_event_type_t) {
|
||||
if p.event.typ == yaml_NO_EVENT {
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
}
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
if p.event.typ != e {
|
||||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
||||
p.fail()
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
p.event.typ = yaml_NO_EVENT
|
||||
}
|
||||
|
||||
// peek peeks at the next event in the event stream,
|
||||
// puts the results into p.event and returns the event type.
|
||||
func (p *parser) peek() yaml_event_type_t {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
return p.event.typ
|
||||
}
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
return p.event.typ
|
||||
}
|
||||
|
||||
func (p *parser) fail() {
|
||||
var where string
|
||||
var line int
|
||||
if p.parser.problem_mark.line != 0 {
|
||||
line = p.parser.problem_mark.line
|
||||
// Scanner errors don't iterate line before returning error
|
||||
if p.parser.error == yaml_SCANNER_ERROR {
|
||||
line++
|
||||
}
|
||||
} else if p.parser.context_mark.line != 0 {
|
||||
line = p.parser.context_mark.line
|
||||
}
|
||||
if line != 0 {
|
||||
where = "line " + strconv.Itoa(line) + ": "
|
||||
}
|
||||
var msg string
|
||||
if len(p.parser.problem) > 0 {
|
||||
msg = p.parser.problem
|
||||
} else {
|
||||
msg = "unknown problem parsing YAML content"
|
||||
}
|
||||
failf("%s%s", where, msg)
|
||||
}
|
||||
|
||||
func (p *parser) anchor(n *node, anchor []byte) {
|
||||
if anchor != nil {
|
||||
p.doc.anchors[string(anchor)] = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
p.init()
|
||||
switch p.peek() {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
case yaml_ALIAS_EVENT:
|
||||
return p.alias()
|
||||
case yaml_MAPPING_START_EVENT:
|
||||
return p.mapping()
|
||||
case yaml_SEQUENCE_START_EVENT:
|
||||
return p.sequence()
|
||||
case yaml_DOCUMENT_START_EVENT:
|
||||
return p.document()
|
||||
case yaml_STREAM_END_EVENT:
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("attempted to parse unknown event: " + p.event.typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) node(kind int) *node {
|
||||
return &node{
|
||||
kind: kind,
|
||||
line: p.event.start_mark.line,
|
||||
column: p.event.start_mark.column,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
p.doc = n
|
||||
p.expect(yaml_DOCUMENT_START_EVENT)
|
||||
n.children = append(n.children, p.parse())
|
||||
p.expect(yaml_DOCUMENT_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
n.alias = p.doc.anchors[n.value]
|
||||
if n.alias == nil {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
p.expect(yaml_ALIAS_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) scalar() *node {
|
||||
n := p.node(scalarNode)
|
||||
n.value = string(p.event.value)
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SCALAR_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SEQUENCE_START_EVENT)
|
||||
for p.peek() != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
}
|
||||
p.expect(yaml_SEQUENCE_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_MAPPING_START_EVENT)
|
||||
for p.peek() != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
}
|
||||
p.expect(yaml_MAPPING_END_EVENT)
|
||||
return n
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Decoder, unmarshals a node into a provided value.
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
aliases map[*node]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
strict bool
|
||||
}
|
||||
|
||||
var (
|
||||
mapItemType = reflect.TypeOf(MapItem{})
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
ifaceType = defaultMapType.Elem()
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
ptrTimeType = reflect.TypeOf(&time.Time{})
|
||||
)
|
||||
|
||||
func newDecoder(strict bool) *decoder {
|
||||
d := &decoder{mapType: defaultMapType, strict: strict}
|
||||
d.aliases = make(map[*node]bool)
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
|
||||
if n.tag != "" {
|
||||
tag = n.tag
|
||||
}
|
||||
value := n.value
|
||||
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
|
||||
if len(value) > 10 {
|
||||
value = " `" + value[:7] + "...`"
|
||||
} else {
|
||||
value = " `" + value + "`"
|
||||
}
|
||||
}
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
|
||||
}
|
||||
|
||||
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
terrlen := len(d.terrors)
|
||||
err := u.UnmarshalYAML(func(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
d.unmarshal(n, reflect.ValueOf(v))
|
||||
if len(d.terrors) > terrlen {
|
||||
issues := d.terrors[terrlen:]
|
||||
d.terrors = d.terrors[:terrlen]
|
||||
return &TypeError{issues}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if e, ok := err.(*TypeError); ok {
|
||||
d.terrors = append(d.terrors, e.Errors...)
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
|
||||
// if a value is found to implement it.
|
||||
// It returns the initialized and dereferenced out value, whether
|
||||
// unmarshalling was already done by UnmarshalYAML, and if so whether
|
||||
// its types unmarshalled appropriately.
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
for again {
|
||||
again = false
|
||||
if out.Kind() == reflect.Ptr {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(out.Type().Elem()))
|
||||
}
|
||||
out = out.Elem()
|
||||
again = true
|
||||
}
|
||||
if out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
|
||||
good = d.callUnmarshaler(n, u)
|
||||
return out, true, good
|
||||
}
|
||||
}
|
||||
}
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
case aliasNode:
|
||||
return d.alias(n, out)
|
||||
}
|
||||
out, unmarshaled, good := d.prepare(n, out)
|
||||
if unmarshaled {
|
||||
return good
|
||||
}
|
||||
switch n.kind {
|
||||
case scalarNode:
|
||||
good = d.scalar(n, out)
|
||||
case mappingNode:
|
||||
good = d.mapping(n, out)
|
||||
case sequenceNode:
|
||||
good = d.sequence(n, out)
|
||||
default:
|
||||
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
if len(n.children) == 1 {
|
||||
d.doc = n
|
||||
d.unmarshal(n.children[0], out)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
if d.aliases[n] {
|
||||
// TODO this could actually be allowed in some circumstances.
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n] = true
|
||||
good = d.unmarshal(n.alias, out)
|
||||
delete(d.aliases, n)
|
||||
return good
|
||||
}
|
||||
|
||||
var zeroValue reflect.Value
|
||||
|
||||
func resetMap(out reflect.Value) {
|
||||
for _, k := range out.MapKeys() {
|
||||
out.SetMapIndex(k, zeroValue)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
tag = yaml_STR_TAG
|
||||
resolved = n.value
|
||||
} else {
|
||||
tag, resolved = resolve(n.tag, n.value)
|
||||
if tag == yaml_BINARY_TAG {
|
||||
data, err := base64.StdEncoding.DecodeString(resolved.(string))
|
||||
if err != nil {
|
||||
failf("!!binary value contains invalid base64 data")
|
||||
}
|
||||
resolved = string(data)
|
||||
}
|
||||
}
|
||||
if resolved == nil {
|
||||
if out.Kind() == reflect.Map && !out.CanAddr() {
|
||||
resetMap(out)
|
||||
} else {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
}
|
||||
return true
|
||||
}
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
// We've resolved to exactly the type we want, so use that.
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
// Perhaps we can use the value as a TextUnmarshaler to
|
||||
// set its value.
|
||||
if out.CanAddr() {
|
||||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
||||
if ok {
|
||||
var text []byte
|
||||
if tag == yaml_BINARY_TAG {
|
||||
text = []byte(resolved.(string))
|
||||
} else {
|
||||
// We let any value be unmarshaled into TextUnmarshaler.
|
||||
// That might be more lax than we'd like, but the
|
||||
// TextUnmarshaler itself should bowl out any dubious values.
|
||||
text = []byte(n.value)
|
||||
}
|
||||
err := u.UnmarshalText(text)
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
switch out.Kind() {
|
||||
case reflect.String:
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
return true
|
||||
}
|
||||
if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
return true
|
||||
}
|
||||
case reflect.Interface:
|
||||
if resolved == nil {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
} else if tag == yaml_TIMESTAMP_TAG {
|
||||
// It looks like a timestamp but for backward compatibility
|
||||
// reasons we set it as a string, so that code that unmarshals
|
||||
// timestamp-like values into interface{} will continue to
|
||||
// see a string and not a time.Time.
|
||||
// TODO(v3) Drop this.
|
||||
out.Set(reflect.ValueOf(n.value))
|
||||
} else {
|
||||
out.Set(reflect.ValueOf(resolved))
|
||||
}
|
||||
return true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if !out.OverflowInt(resolved) {
|
||||
out.SetInt(resolved)
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case int64:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case uint64:
|
||||
if !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch resolved := resolved.(type) {
|
||||
case bool:
|
||||
out.SetBool(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case int64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case uint64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
case float64:
|
||||
out.SetFloat(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Struct:
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
return true
|
||||
}
|
||||
}
|
||||
d.terror(n, tag, out)
|
||||
return false
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
v := reflect.ValueOf(i)
|
||||
sv := reflect.New(v.Type()).Elem()
|
||||
sv.Set(v)
|
||||
return sv
|
||||
}
|
||||
|
||||
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
l := len(n.children)
|
||||
|
||||
var iface reflect.Value
|
||||
switch out.Kind() {
|
||||
case reflect.Slice:
|
||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||
case reflect.Array:
|
||||
if l != out.Len() {
|
||||
failf("invalid array: want %d elements but got %d", out.Len(), l)
|
||||
}
|
||||
case reflect.Interface:
|
||||
// No type hints. Will have to use a generic sequence.
|
||||
iface = out
|
||||
out = settableValueOf(make([]interface{}, l))
|
||||
default:
|
||||
d.terror(n, yaml_SEQ_TAG, out)
|
||||
return false
|
||||
}
|
||||
et := out.Type().Elem()
|
||||
|
||||
j := 0
|
||||
for i := 0; i < l; i++ {
|
||||
e := reflect.New(et).Elem()
|
||||
if ok := d.unmarshal(n.children[i], e); ok {
|
||||
out.Index(j).Set(e)
|
||||
j++
|
||||
}
|
||||
}
|
||||
if out.Kind() != reflect.Array {
|
||||
out.Set(out.Slice(0, j))
|
||||
}
|
||||
if iface.IsValid() {
|
||||
iface.Set(out)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Struct:
|
||||
return d.mappingStruct(n, out)
|
||||
case reflect.Slice:
|
||||
return d.mappingSlice(n, out)
|
||||
case reflect.Map:
|
||||
// okay
|
||||
case reflect.Interface:
|
||||
if d.mapType.Kind() == reflect.Map {
|
||||
iface := out
|
||||
out = reflect.MakeMap(d.mapType)
|
||||
iface.Set(out)
|
||||
} else {
|
||||
slicev := reflect.New(d.mapType).Elem()
|
||||
if !d.mappingSlice(n, slicev) {
|
||||
return false
|
||||
}
|
||||
out.Set(slicev)
|
||||
return true
|
||||
}
|
||||
default:
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
outt := out.Type()
|
||||
kt := outt.Key()
|
||||
et := outt.Elem()
|
||||
|
||||
mapType := d.mapType
|
||||
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
|
||||
d.mapType = outt
|
||||
}
|
||||
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(outt))
|
||||
}
|
||||
l := len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
k := reflect.New(kt).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
kkind := k.Kind()
|
||||
if kkind == reflect.Interface {
|
||||
kkind = k.Elem().Kind()
|
||||
}
|
||||
if kkind == reflect.Map || kkind == reflect.Slice {
|
||||
failf("invalid map key: %#v", k.Interface())
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
d.setMapIndex(n.children[i+1], out, k, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
|
||||
if d.strict && out.MapIndex(k) != zeroValue {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
|
||||
return
|
||||
}
|
||||
out.SetMapIndex(k, v)
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
d.terror(n, yaml_MAP_TAG, out)
|
||||
return false
|
||||
}
|
||||
|
||||
mapType := d.mapType
|
||||
d.mapType = outt
|
||||
|
||||
var slice []MapItem
|
||||
var l = len(n.children)
|
||||
for i := 0; i < l; i += 2 {
|
||||
if isMerge(n.children[i]) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
item := MapItem{}
|
||||
k := reflect.ValueOf(&item.Key).Elem()
|
||||
if d.unmarshal(n.children[i], k) {
|
||||
v := reflect.ValueOf(&item.Value).Elem()
|
||||
if d.unmarshal(n.children[i+1], v) {
|
||||
slice = append(slice, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
out.Set(reflect.ValueOf(slice))
|
||||
d.mapType = mapType
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
sinfo, err := getStructInfo(out.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
name := settableValueOf("")
|
||||
l := len(n.children)
|
||||
|
||||
var inlineMap reflect.Value
|
||||
var elemType reflect.Type
|
||||
if sinfo.InlineMap != -1 {
|
||||
inlineMap = out.Field(sinfo.InlineMap)
|
||||
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
|
||||
elemType = inlineMap.Type().Elem()
|
||||
}
|
||||
|
||||
var doneFields []bool
|
||||
if d.strict {
|
||||
doneFields = make([]bool, len(sinfo.FieldsList))
|
||||
}
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
d.merge(n.children[i+1], out)
|
||||
continue
|
||||
}
|
||||
if !d.unmarshal(ni, name) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
if d.strict {
|
||||
if doneFields[info.Id] {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
|
||||
continue
|
||||
}
|
||||
doneFields[info.Id] = true
|
||||
}
|
||||
var field reflect.Value
|
||||
if info.Inline == nil {
|
||||
field = out.Field(info.Num)
|
||||
} else {
|
||||
field = out.FieldByIndex(info.Inline)
|
||||
}
|
||||
d.unmarshal(n.children[i+1], field)
|
||||
} else if sinfo.InlineMap != -1 {
|
||||
if inlineMap.IsNil() {
|
||||
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
d.setMapIndex(n.children[i+1], inlineMap, name, value)
|
||||
} else if d.strict {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func failWantMap() {
|
||||
failf("map merge requires map or sequence of maps as the value")
|
||||
}
|
||||
|
||||
func (d *decoder) merge(n *node, out reflect.Value) {
|
||||
switch n.kind {
|
||||
case mappingNode:
|
||||
d.unmarshal(n, out)
|
||||
case aliasNode:
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
case sequenceNode:
|
||||
// Step backwards as earlier nodes take precedence.
|
||||
for i := len(n.children) - 1; i >= 0; i-- {
|
||||
ni := n.children[i]
|
||||
if ni.kind == aliasNode {
|
||||
an, ok := d.doc.anchors[ni.value]
|
||||
if ok && an.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
} else if ni.kind != mappingNode {
|
||||
failWantMap()
|
||||
}
|
||||
d.unmarshal(ni, out)
|
||||
}
|
||||
default:
|
||||
failWantMap()
|
||||
}
|
||||
}
|
||||
|
||||
func isMerge(n *node) bool {
|
||||
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
|
||||
}
|
1685
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
1685
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
362
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
362
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
@ -1,362 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
// doneInit holds whether the initial stream_start_event has been
|
||||
// emitted.
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newEncoder() *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) init() {
|
||||
if e.doneInit {
|
||||
return
|
||||
}
|
||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||||
e.emit()
|
||||
e.doneInit = true
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.emitter.open_ended = false
|
||||
yaml_stream_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) destroy() {
|
||||
yaml_emitter_delete(&e.emitter)
|
||||
}
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
if !ok {
|
||||
msg := e.emitter.problem
|
||||
if msg == "" {
|
||||
msg = "unknown problem generating YAML content"
|
||||
}
|
||||
failf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||||
e.init()
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.emit()
|
||||
e.marshal(tag, in)
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
switch m := iface.(type) {
|
||||
case time.Time, *time.Time:
|
||||
// Although time.Time implements TextMarshaler,
|
||||
// we don't want to treat it as a string for YAML
|
||||
// purposes because YAML has special support for
|
||||
// timestamps.
|
||||
case Marshaler:
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
if v == nil {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
in = reflect.ValueOf(v)
|
||||
case encoding.TextMarshaler:
|
||||
text, err := m.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
case nil:
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
e.marshal(tag, in.Elem())
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
if in.Type() == ptrTimeType {
|
||||
e.timev(tag, in.Elem())
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Struct:
|
||||
if in.Type() == timeType {
|
||||
e.timev(tag, in)
|
||||
} else {
|
||||
e.structv(tag, in)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
if in.Type().Elem() == mapItemType {
|
||||
e.itemsv(tag, in)
|
||||
} else {
|
||||
e.slicev(tag, in)
|
||||
}
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if in.Type() == durationType {
|
||||
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
|
||||
} else {
|
||||
e.intv(tag, in)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.floatv(tag, in)
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("cannot marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
keys := keyList(in.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
e.marshal("", k)
|
||||
e.marshal("", in.MapIndex(k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) itemsv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
|
||||
for _, item := range slice {
|
||||
e.marshal("", reflect.ValueOf(item.Key))
|
||||
e.marshal("", reflect.ValueOf(item.Value))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||
sinfo, err := getStructInfo(in.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.mappingv(tag, func() {
|
||||
for _, info := range sinfo.FieldsList {
|
||||
var value reflect.Value
|
||||
if info.Inline == nil {
|
||||
value = in.Field(info.Num)
|
||||
} else {
|
||||
value = in.FieldByIndex(info.Inline)
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.marshal("", reflect.ValueOf(info.Key))
|
||||
e.flow = info.Flow
|
||||
e.marshal("", value)
|
||||
}
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := in.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
e.flow = false
|
||||
keys := keyList(m.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
|
||||
}
|
||||
e.marshal("", k)
|
||||
e.flow = false
|
||||
e.marshal("", m.MapIndex(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) mappingv(tag string, f func()) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||||
e.emit()
|
||||
f()
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
n := in.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
e.marshal("", in.Index(i))
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
canUsePlain := true
|
||||
switch {
|
||||
case !utf8.ValidString(s):
|
||||
if tag == yaml_BINARY_TAG {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if tag != "" {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = yaml_BINARY_TAG
|
||||
s = encodeBase64(s)
|
||||
case tag == "":
|
||||
// Check to see if it would resolve to a specific
|
||||
// tag when encoded unquoted. If it doesn't,
|
||||
// there's no need to quote it.
|
||||
rtag, _ := resolve("", s)
|
||||
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
|
||||
}
|
||||
// Note: it's possible for user code to emit invalid YAML
|
||||
// if they explicitly specify a tag and a string containing
|
||||
// text that's incompatible with that tag.
|
||||
switch {
|
||||
case strings.Contains(s, "\n"):
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case canUsePlain:
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
default:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style)
|
||||
}
|
||||
|
||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||
var s string
|
||||
if in.Bool() {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatInt(in.Int(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatUint(in.Uint(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
||||
t := in.Interface().(time.Time)
|
||||
s := t.Format(time.RFC3339Nano)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// Issue #352: When formatting, use the precision of the underlying value
|
||||
precision := 64
|
||||
if in.Kind() == reflect.Float32 {
|
||||
precision = 32
|
||||
}
|
||||
|
||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) nilv() {
|
||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
|
||||
implicit := tag == ""
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.emit()
|
||||
}
|
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
@ -1,5 +0,0 @@
|
||||
module "gopkg.in/yaml.v2"
|
||||
|
||||
require (
|
||||
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
|
||||
)
|
1095
vendor/gopkg.in/yaml.v2/parserc.go
generated
vendored
1095
vendor/gopkg.in/yaml.v2/parserc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
412
vendor/gopkg.in/yaml.v2/readerc.go
generated
vendored
412
vendor/gopkg.in/yaml.v2/readerc.go
generated
vendored
@ -1,412 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Set the reader error and return 0.
|
||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||
parser.error = yaml_READER_ERROR
|
||||
parser.problem = problem
|
||||
parser.problem_offset = offset
|
||||
parser.problem_value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Byte order marks.
|
||||
const (
|
||||
bom_UTF8 = "\xef\xbb\xbf"
|
||||
bom_UTF16LE = "\xff\xfe"
|
||||
bom_UTF16BE = "\xfe\xff"
|
||||
)
|
||||
|
||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||
// Ensure that we had enough bytes in the raw buffer.
|
||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the encoding.
|
||||
buf := parser.raw_buffer
|
||||
pos := parser.raw_buffer_pos
|
||||
avail := len(buf) - pos
|
||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||
parser.encoding = yaml_UTF16LE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||
parser.encoding = yaml_UTF16BE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
parser.raw_buffer_pos += 3
|
||||
parser.offset += 3
|
||||
} else {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update the raw buffer.
|
||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||
size_read := 0
|
||||
|
||||
// Return if the raw buffer is full.
|
||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return on EOF.
|
||||
if parser.eof {
|
||||
return true
|
||||
}
|
||||
|
||||
// Move the remaining bytes in the raw buffer to the beginning.
|
||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||
}
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||
parser.raw_buffer_pos = 0
|
||||
|
||||
// Call the read handler to fill the buffer.
|
||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||
if err == io.EOF {
|
||||
parser.eof = true
|
||||
} else if err != nil {
|
||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure that the buffer contains at least `length` characters.
|
||||
// Return true on success, false on failure.
|
||||
//
|
||||
// The length is supposed to be significantly less that the buffer size.
|
||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
if parser.read_handler == nil {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
||||
// The fact we need to do this is pretty awful, but the description above implies
|
||||
// for that to be the case, and there are tests
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
||||
// This is just broken. To return true, we need to have the
|
||||
// given length in the buffer. Not doing that means every single
|
||||
// check that calls this function to make sure the buffer has a
|
||||
// given length is Go) panicking; or C) accessing invalid memory.
|
||||
//return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
if parser.unread >= length {
|
||||
return true
|
||||
}
|
||||
|
||||
// Determine the input encoding if it is not known yet.
|
||||
if parser.encoding == yaml_ANY_ENCODING {
|
||||
if !yaml_parser_determine_encoding(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Move the unread characters to the beginning of the buffer.
|
||||
buffer_len := len(parser.buffer)
|
||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||
buffer_len -= parser.buffer_pos
|
||||
parser.buffer_pos = 0
|
||||
} else if parser.buffer_pos == buffer_len {
|
||||
buffer_len = 0
|
||||
parser.buffer_pos = 0
|
||||
}
|
||||
|
||||
// Open the whole buffer for writing, and cut it before returning.
|
||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||
|
||||
// Fill the buffer until it has enough characters.
|
||||
first := true
|
||||
for parser.unread < length {
|
||||
|
||||
// Fill the raw buffer if necessary.
|
||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return false
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// Decode the raw buffer.
|
||||
inner:
|
||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||
var value rune
|
||||
var width int
|
||||
|
||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||
|
||||
// Decode the next character.
|
||||
switch parser.encoding {
|
||||
case yaml_UTF8_ENCODING:
|
||||
// Decode a UTF-8 character. Check RFC 3629
|
||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||
//
|
||||
// The following table (taken from the RFC) is used for
|
||||
// decoding.
|
||||
//
|
||||
// Char. number range | UTF-8 octet sequence
|
||||
// (hexadecimal) | (binary)
|
||||
// --------------------+------------------------------------
|
||||
// 0000 0000-0000 007F | 0xxxxxxx
|
||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
//
|
||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||
// are prohibited as they are reserved for use with UTF-16
|
||||
// surrogate pairs.
|
||||
|
||||
// Determine the length of the UTF-8 sequence.
|
||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
width = 1
|
||||
case octet&0xE0 == 0xC0:
|
||||
width = 2
|
||||
case octet&0xF0 == 0xE0:
|
||||
width = 3
|
||||
case octet&0xF8 == 0xF0:
|
||||
width = 4
|
||||
default:
|
||||
// The leading octet is invalid.
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid leading UTF-8 octet",
|
||||
parser.offset, int(octet))
|
||||
}
|
||||
|
||||
// Check if the raw buffer contains an incomplete character.
|
||||
if width > raw_unread {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-8 octet sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Decode the leading octet.
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
value = rune(octet & 0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
value = rune(octet & 0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
value = rune(octet & 0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
value = rune(octet & 0x07)
|
||||
default:
|
||||
value = 0
|
||||
}
|
||||
|
||||
// Check and decode the trailing octets.
|
||||
for k := 1; k < width; k++ {
|
||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||
|
||||
// Check if the octet is valid.
|
||||
if (octet & 0xC0) != 0x80 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid trailing UTF-8 octet",
|
||||
parser.offset+k, int(octet))
|
||||
}
|
||||
|
||||
// Decode the octet.
|
||||
value = (value << 6) + rune(octet&0x3F)
|
||||
}
|
||||
|
||||
// Check the length of the sequence against the value.
|
||||
switch {
|
||||
case width == 1:
|
||||
case width == 2 && value >= 0x80:
|
||||
case width == 3 && value >= 0x800:
|
||||
case width == 4 && value >= 0x10000:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid length of a UTF-8 sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
|
||||
// Check the range of the value.
|
||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid Unicode character",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||
var low, high int
|
||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
low, high = 1, 0
|
||||
}
|
||||
|
||||
// The UTF-16 encoding is not as simple as one might
|
||||
// naively think. Check RFC 2781
|
||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||
//
|
||||
// Normally, two subsequent bytes describe a Unicode
|
||||
// character. However a special technique (called a
|
||||
// surrogate pair) is used for specifying character
|
||||
// values larger than 0xFFFF.
|
||||
//
|
||||
// A surrogate pair consists of two pseudo-characters:
|
||||
// high surrogate area (0xD800-0xDBFF)
|
||||
// low surrogate area (0xDC00-0xDFFF)
|
||||
//
|
||||
// The following formulas are used for decoding
|
||||
// and encoding characters using surrogate pairs:
|
||||
//
|
||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||
// W1 = 110110yyyyyyyyyy
|
||||
// W2 = 110111xxxxxxxxxx
|
||||
//
|
||||
// where U is the character value, W1 is the high surrogate
|
||||
// area, W2 is the low surrogate area.
|
||||
|
||||
// Check for incomplete UTF-16 character.
|
||||
if raw_unread < 2 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 character",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the character.
|
||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||
|
||||
// Check for unexpected low surrogate area.
|
||||
if value&0xFC00 == 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"unexpected low surrogate area",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Check for a high surrogate area.
|
||||
if value&0xFC00 == 0xD800 {
|
||||
width = 4
|
||||
|
||||
// Check for incomplete surrogate pair.
|
||||
if raw_unread < 4 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 surrogate pair",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the next character.
|
||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||
|
||||
// Check for a low surrogate area.
|
||||
if value2&0xFC00 != 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"expected low surrogate area",
|
||||
parser.offset+2, int(value2))
|
||||
}
|
||||
|
||||
// Generate the value of the surrogate pair.
|
||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
|
||||
default:
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
// Check if the character is in the allowed range:
|
||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||
// | [#x10000-#x10FFFF] (32 bit)
|
||||
switch {
|
||||
case value == 0x09:
|
||||
case value == 0x0A:
|
||||
case value == 0x0D:
|
||||
case value >= 0x20 && value <= 0x7E:
|
||||
case value == 0x85:
|
||||
case value >= 0xA0 && value <= 0xD7FF:
|
||||
case value >= 0xE000 && value <= 0xFFFD:
|
||||
case value >= 0x10000 && value <= 0x10FFFF:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"control characters are not allowed",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Move the raw pointers.
|
||||
parser.raw_buffer_pos += width
|
||||
parser.offset += width
|
||||
|
||||
// Finally put the character into the buffer.
|
||||
if value <= 0x7F {
|
||||
// 0000 0000-0000 007F . 0xxxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(value)
|
||||
buffer_len += 1
|
||||
} else if value <= 0x7FF {
|
||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 2
|
||||
} else if value <= 0xFFFF {
|
||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 3
|
||||
} else {
|
||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 4
|
||||
}
|
||||
|
||||
parser.unread++
|
||||
}
|
||||
|
||||
// On EOF, put NUL into the buffer and return.
|
||||
if parser.eof {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
parser.unread++
|
||||
break
|
||||
}
|
||||
}
|
||||
// [Go] Read the documentation of this function above. To return true,
|
||||
// we need to have the given length in the buffer. Not doing that means
|
||||
// every single check that calls this function to make sure the buffer
|
||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
||||
// This happens here due to the EOF above breaking early.
|
||||
for buffer_len < length {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
258
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
258
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
@ -1,258 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type resolveMapItem struct {
|
||||
value interface{}
|
||||
tag string
|
||||
}
|
||||
|
||||
var resolveTable = make([]byte, 256)
|
||||
var resolveMap = make(map[string]resolveMapItem)
|
||||
|
||||
func init() {
|
||||
t := resolveTable
|
||||
t[int('+')] = 'S' // Sign
|
||||
t[int('-')] = 'S'
|
||||
for _, c := range "0123456789" {
|
||||
t[int(c)] = 'D' // Digit
|
||||
}
|
||||
for _, c := range "yYnNtTfFoO~" {
|
||||
t[int(c)] = 'M' // In map
|
||||
}
|
||||
t[int('.')] = '.' // Float (potentially in map)
|
||||
|
||||
var resolveMapList = []struct {
|
||||
v interface{}
|
||||
tag string
|
||||
l []string
|
||||
}{
|
||||
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
|
||||
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
|
||||
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
|
||||
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
|
||||
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
|
||||
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
|
||||
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
|
||||
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
|
||||
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||
{"<<", yaml_MERGE_TAG, []string{"<<"}},
|
||||
}
|
||||
|
||||
m := resolveMap
|
||||
for _, item := range resolveMapList {
|
||||
for _, s := range item.l {
|
||||
m[s] = resolveMapItem{item.v, item.tag}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const longTagPrefix = "tag:yaml.org,2002:"
|
||||
|
||||
func shortTag(tag string) string {
|
||||
// TODO This can easily be made faster and produce less garbage.
|
||||
if strings.HasPrefix(tag, longTagPrefix) {
|
||||
return "!!" + tag[len(longTagPrefix):]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func longTag(tag string) string {
|
||||
if strings.HasPrefix(tag, "!!") {
|
||||
return longTagPrefix + tag[2:]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
if !resolvableTag(tag) {
|
||||
return tag, in
|
||||
}
|
||||
|
||||
defer func() {
|
||||
switch tag {
|
||||
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||
return
|
||||
case yaml_FLOAT_TAG:
|
||||
if rtag == yaml_INT_TAG {
|
||||
switch v := out.(type) {
|
||||
case int64:
|
||||
rtag = yaml_FLOAT_TAG
|
||||
out = float64(v)
|
||||
return
|
||||
case int:
|
||||
rtag = yaml_FLOAT_TAG
|
||||
out = float64(v)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||||
}()
|
||||
|
||||
// Any data is accepted as a !!str or !!binary.
|
||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||
hint := byte('N')
|
||||
if in != "" {
|
||||
hint = resolveTable[in[0]]
|
||||
}
|
||||
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
|
||||
// Handle things we can lookup in a map.
|
||||
if item, ok := resolveMap[in]; ok {
|
||||
return item.tag, item.value
|
||||
}
|
||||
|
||||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||
// are purposefully unsupported here. They're still quoted on
|
||||
// the way out for compatibility with other parser, though.
|
||||
|
||||
switch hint {
|
||||
case 'M':
|
||||
// We've already checked the map above.
|
||||
|
||||
case '.':
|
||||
// Not in the map, so maybe a normal float.
|
||||
floatv, err := strconv.ParseFloat(in, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
||||
// !!timestamp tag.
|
||||
if tag == "" || tag == yaml_TIMESTAMP_TAG {
|
||||
t, ok := parseTimestamp(in)
|
||||
if ok {
|
||||
return yaml_TIMESTAMP_TAG, t
|
||||
}
|
||||
}
|
||||
|
||||
plain := strings.Replace(in, "_", "", -1)
|
||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain, 0, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
if yamlStyleFloat.MatchString(plain) {
|
||||
floatv, err := strconv.ParseFloat(plain, 64)
|
||||
if err == nil {
|
||||
return yaml_FLOAT_TAG, floatv
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
if true || intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||
}
|
||||
}
|
||||
return yaml_STR_TAG, in
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
// as appropriate for the resulting length.
|
||||
func encodeBase64(s string) string {
|
||||
const lineLen = 70
|
||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
lines := encLen/lineLen + 1
|
||||
buf := make([]byte, encLen*2+lines)
|
||||
in := buf[0:encLen]
|
||||
out := buf[encLen:]
|
||||
base64.StdEncoding.Encode(in, []byte(s))
|
||||
k := 0
|
||||
for i := 0; i < len(in); i += lineLen {
|
||||
j := i + lineLen
|
||||
if j > len(in) {
|
||||
j = len(in)
|
||||
}
|
||||
k += copy(out[k:], in[i:j])
|
||||
if lines > 1 {
|
||||
out[k] = '\n'
|
||||
k++
|
||||
}
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
||||
|
||||
// This is a subset of the formats allowed by the regular expression
|
||||
// defined at http://yaml.org/type/timestamp.html.
|
||||
var allowedTimestampFormats = []string{
|
||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||
"2006-1-2", // date only
|
||||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
||||
// from the set of examples.
|
||||
}
|
||||
|
||||
// parseTimestamp parses s as a timestamp string and
|
||||
// returns the timestamp and reports whether it succeeded.
|
||||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
||||
func parseTimestamp(s string) (time.Time, bool) {
|
||||
// TODO write code to check all the formats supported by
|
||||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
||||
|
||||
// Quick check: all date formats start with YYYY-.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if c := s[i]; c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i != 4 || i == len(s) || s[i] != '-' {
|
||||
return time.Time{}, false
|
||||
}
|
||||
for _, format := range allowedTimestampFormats {
|
||||
if t, err := time.Parse(format, s); err == nil {
|
||||
return t, true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
2696
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
2696
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
File diff suppressed because it is too large
Load Diff
113
vendor/gopkg.in/yaml.v2/sorter.go
generated
vendored
113
vendor/gopkg.in/yaml.v2/sorter.go
generated
vendored
@ -1,113 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type keyList []reflect.Value
|
||||
|
||||
func (l keyList) Len() int { return len(l) }
|
||||
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l keyList) Less(i, j int) bool {
|
||||
a := l[i]
|
||||
b := l[j]
|
||||
ak := a.Kind()
|
||||
bk := b.Kind()
|
||||
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
||||
a = a.Elem()
|
||||
ak = a.Kind()
|
||||
}
|
||||
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
||||
b = b.Elem()
|
||||
bk = b.Kind()
|
||||
}
|
||||
af, aok := keyFloat(a)
|
||||
bf, bok := keyFloat(b)
|
||||
if aok && bok {
|
||||
if af != bf {
|
||||
return af < bf
|
||||
}
|
||||
if ak != bk {
|
||||
return ak < bk
|
||||
}
|
||||
return numLess(a, b)
|
||||
}
|
||||
if ak != reflect.String || bk != reflect.String {
|
||||
return ak < bk
|
||||
}
|
||||
ar, br := []rune(a.String()), []rune(b.String())
|
||||
for i := 0; i < len(ar) && i < len(br); i++ {
|
||||
if ar[i] == br[i] {
|
||||
continue
|
||||
}
|
||||
al := unicode.IsLetter(ar[i])
|
||||
bl := unicode.IsLetter(br[i])
|
||||
if al && bl {
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
if al || bl {
|
||||
return bl
|
||||
}
|
||||
var ai, bi int
|
||||
var an, bn int64
|
||||
if ar[i] == '0' || br[i] == '0' {
|
||||
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
|
||||
if ar[j] != '0' {
|
||||
an = 1
|
||||
bn = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||||
an = an*10 + int64(ar[ai]-'0')
|
||||
}
|
||||
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
||||
bn = bn*10 + int64(br[bi]-'0')
|
||||
}
|
||||
if an != bn {
|
||||
return an < bn
|
||||
}
|
||||
if ai != bi {
|
||||
return ai < bi
|
||||
}
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
return len(ar) < len(br)
|
||||
}
|
||||
|
||||
// keyFloat returns a float value for v if it is a number/bool
|
||||
// and whether it is a number/bool or not.
|
||||
func keyFloat(v reflect.Value) (f float64, ok bool) {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return float64(v.Int()), true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float(), true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return float64(v.Uint()), true
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return 1, true
|
||||
}
|
||||
return 0, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// numLess returns whether a < b.
|
||||
// a and b must necessarily have the same kind.
|
||||
func numLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
}
|
||||
panic("not a number")
|
||||
}
|
26
vendor/gopkg.in/yaml.v2/writerc.go
generated
vendored
26
vendor/gopkg.in/yaml.v2/writerc.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
package yaml
|
||||
|
||||
// Set the writer error and return false.
|
||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||||
emitter.error = yaml_WRITER_ERROR
|
||||
emitter.problem = problem
|
||||
return false
|
||||
}
|
||||
|
||||
// Flush the output buffer.
|
||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||
if emitter.write_handler == nil {
|
||||
panic("write handler not set")
|
||||
}
|
||||
|
||||
// Check if the buffer is empty.
|
||||
if emitter.buffer_pos == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
return true
|
||||
}
|
466
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
466
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
@ -1,466 +0,0 @@
|
||||
// Package yaml implements YAML support for the Go language.
|
||||
//
|
||||
// Source code and other details for the project are available at GitHub:
|
||||
//
|
||||
// https://github.com/go-yaml/yaml
|
||||
//
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MapSlice encodes and decodes as a YAML map.
|
||||
// The order of keys is preserved when encoding and decoding.
|
||||
type MapSlice []MapItem
|
||||
|
||||
// MapItem is an item in a MapSlice.
|
||||
type MapItem struct {
|
||||
Key, Value interface{}
|
||||
}
|
||||
|
||||
// The Unmarshaler interface may be implemented by types to customize their
|
||||
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
|
||||
// method receives a function that may be called to unmarshal the original
|
||||
// YAML value into a field or variable. It is safe to call the unmarshal
|
||||
// function parameter more than once if necessary.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
}
|
||||
|
||||
// The Marshaler interface may be implemented by types to customize their
|
||||
// behavior when being marshaled into a YAML document. The returned value
|
||||
// is marshaled in place of the original value implementing Marshaler.
|
||||
//
|
||||
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||||
// and returns with the provided error.
|
||||
type Marshaler interface {
|
||||
MarshalYAML() (interface{}, error)
|
||||
}
|
||||
|
||||
// Unmarshal decodes the first document found within the in byte slice
|
||||
// and assigns decoded values into the out value.
|
||||
//
|
||||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||
// values. If an internal pointer within a struct is not initialized,
|
||||
// the yaml package will initialize it if necessary for unmarshalling
|
||||
// the provided data. The out parameter must not be nil.
|
||||
//
|
||||
// The type of the decoded values should be compatible with the respective
|
||||
// values in out. If one or more values cannot be decoded due to a type
|
||||
// mismatches, decoding continues partially until the end of the YAML
|
||||
// content, and a *yaml.TypeError is returned with details for all
|
||||
// missed values.
|
||||
//
|
||||
// Struct fields are only unmarshalled if they are exported (have an
|
||||
// upper case first letter), and are unmarshalled using the field name
|
||||
// lowercased as the default key. Custom keys may be defined via the
|
||||
// "yaml" name in the field tag: the content preceding the first comma
|
||||
// is used as the key, and the following comma-separated options are
|
||||
// used to tweak the marshalling process (see Marshal).
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
//
|
||||
// See the documentation of Marshal for the format of tags and a list of
|
||||
// supported tag options.
|
||||
//
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, false)
|
||||
}
|
||||
|
||||
// UnmarshalStrict is like Unmarshal except that any fields that are found
|
||||
// in the data that do not have corresponding struct members, or mapping
|
||||
// keys that are duplicates, will result in
|
||||
// an error.
|
||||
func UnmarshalStrict(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, true)
|
||||
}
|
||||
|
||||
// A Decorder reads and decodes YAML values from an input stream.
|
||||
type Decoder struct {
|
||||
strict bool
|
||||
parser *parser
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may read
|
||||
// data from r beyond the YAML values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
parser: newParserFromReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
// SetStrict sets whether strict decoding behaviour is enabled when
|
||||
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
|
||||
func (dec *Decoder) SetStrict(strict bool) {
|
||||
dec.strict = strict
|
||||
}
|
||||
|
||||
// Decode reads the next YAML-encoded value from its input
|
||||
// and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about the
|
||||
// conversion of YAML into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) (err error) {
|
||||
d := newDecoder(dec.strict)
|
||||
defer handleErr(&err)
|
||||
node := dec.parser.parse()
|
||||
if node == nil {
|
||||
return io.EOF
|
||||
}
|
||||
out := reflect.ValueOf(v)
|
||||
if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||||
out = out.Elem()
|
||||
}
|
||||
d.unmarshal(node, out)
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
defer handleErr(&err)
|
||||
d := newDecoder(strict)
|
||||
p := newParser(in)
|
||||
defer p.destroy()
|
||||
node := p.parse()
|
||||
if node != nil {
|
||||
v := reflect.ValueOf(out)
|
||||
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
d.unmarshal(node, v)
|
||||
}
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal serializes the value provided into a YAML document. The structure
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
//
|
||||
// Struct fields are only marshalled if they are exported (have an upper case
|
||||
// first letter), and are marshalled using the field name lowercased as the
|
||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||
// tag: the content preceding the first comma is used as the key, and the
|
||||
// following comma-separated options are used to tweak the marshalling process.
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// The field tag format accepted is:
|
||||
//
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be included if that method returns true.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the yaml keys of other struct fields.
|
||||
//
|
||||
// In addition, if the key is "-", the field is ignored.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
//
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
defer e.destroy()
|
||||
e.marshalDoc("", reflect.ValueOf(in))
|
||||
e.finish()
|
||||
out = e.out
|
||||
return
|
||||
}
|
||||
|
||||
// An Encoder writes YAML values to an output stream.
|
||||
type Encoder struct {
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
// The Encoder should be closed after use to flush all data
|
||||
// to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
encoder: newEncoderWithWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes the YAML encoding of v to the stream.
|
||||
// If multiple items are encoded to the stream, the
|
||||
// second and subsequent document will be preceded
|
||||
// with a "---" document separator, but the first will not.
|
||||
//
|
||||
// See the documentation for Marshal for details about the conversion of Go
|
||||
// values to YAML.
|
||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.marshalDoc("", reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the encoder by writing any remaining data.
|
||||
// It does not write a stream terminating string "...".
|
||||
func (e *Encoder) Close() (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if v := recover(); v != nil {
|
||||
if e, ok := v.(yamlError); ok {
|
||||
*err = e.err
|
||||
} else {
|
||||
panic(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type yamlError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func fail(err error) {
|
||||
panic(yamlError{err})
|
||||
}
|
||||
|
||||
func failf(format string, args ...interface{}) {
|
||||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||||
}
|
||||
|
||||
// A TypeError is returned by Unmarshal when one or more fields in
|
||||
// the YAML document cannot be properly decoded into the requested
|
||||
// types. When this error is returned, the value is still
|
||||
// unmarshaled partially.
|
||||
type TypeError struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
// The code in this section was copied from mgo/bson.
|
||||
|
||||
// structInfo holds details for the serialization of fields of
|
||||
// a given struct.
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
|
||||
// InlineMap is the number of the field in the struct that
|
||||
// contains an ,inline map, or -1 if there's none.
|
||||
InlineMap int
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
Flow bool
|
||||
// Id holds the unique field identifier, so we can cheaply
|
||||
// check for field duplicates without maintaining an extra map.
|
||||
Id int
|
||||
|
||||
// Inline holds the field index if the field is part of an inlined struct.
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var fieldMapMutex sync.RWMutex
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
fieldMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("yaml")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "flow":
|
||||
info.Flow = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct:
|
||||
sinfo, err := getStructInfo(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
finfo.Id = len(fieldsList)
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
default:
|
||||
//return nil, errors.New("Option ,inline needs a struct value or map field")
|
||||
return nil, errors.New("Option ,inline needs a struct value field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
info.Id = len(fieldsList)
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
|
||||
sinfo = &structInfo{
|
||||
FieldsMap: fieldsMap,
|
||||
FieldsList: fieldsList,
|
||||
InlineMap: inlineMap,
|
||||
}
|
||||
|
||||
fieldMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
fieldMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
// IsZeroer is used to check whether an object is zero to
|
||||
// determine whether it should be omitted when marshaling
|
||||
// with the omitempty flag. One notable implementation
|
||||
// is time.Time.
|
||||
type IsZeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
if z, ok := v.Interface().(IsZeroer); ok {
|
||||
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
|
||||
return true
|
||||
}
|
||||
return z.IsZero()
|
||||
}
|
||||
switch kind {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
for i := v.NumField() - 1; i >= 0; i-- {
|
||||
if vt.Field(i).PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
738
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
738
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
@ -1,738 +0,0 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// The version directive data.
|
||||
type yaml_version_directive_t struct {
|
||||
major int8 // The major version number.
|
||||
minor int8 // The minor version number.
|
||||
}
|
||||
|
||||
// The tag directive data.
|
||||
type yaml_tag_directive_t struct {
|
||||
handle []byte // The tag handle.
|
||||
prefix []byte // The tag prefix.
|
||||
}
|
||||
|
||||
type yaml_encoding_t int
|
||||
|
||||
// The stream encoding.
|
||||
const (
|
||||
// Let the parser choose the encoding.
|
||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
||||
|
||||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||||
)
|
||||
|
||||
type yaml_break_t int
|
||||
|
||||
// Line break types.
|
||||
const (
|
||||
// Let the parser choose the break type.
|
||||
yaml_ANY_BREAK yaml_break_t = iota
|
||||
|
||||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||||
)
|
||||
|
||||
type yaml_error_type_t int
|
||||
|
||||
// Many bad things could happen with the parser and emitter.
|
||||
const (
|
||||
// No error is produced.
|
||||
yaml_NO_ERROR yaml_error_type_t = iota
|
||||
|
||||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
||||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
||||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
||||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||||
)
|
||||
|
||||
// The pointer position.
|
||||
type yaml_mark_t struct {
|
||||
index int // The position index.
|
||||
line int // The position line.
|
||||
column int // The position column.
|
||||
}
|
||||
|
||||
// Node Styles
|
||||
|
||||
type yaml_style_t int8
|
||||
|
||||
type yaml_scalar_style_t yaml_style_t
|
||||
|
||||
// Scalar styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
|
||||
|
||||
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
|
||||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||||
)
|
||||
|
||||
type yaml_sequence_style_t yaml_style_t
|
||||
|
||||
// Sequence styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||||
|
||||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||||
)
|
||||
|
||||
type yaml_mapping_style_t yaml_style_t
|
||||
|
||||
// Mapping styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||||
|
||||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||||
)
|
||||
|
||||
// Tokens
|
||||
|
||||
type yaml_token_type_t int
|
||||
|
||||
// Token types.
|
||||
const (
|
||||
// An empty token.
|
||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
||||
|
||||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||||
|
||||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||||
|
||||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||||
|
||||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||||
|
||||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||||
yaml_KEY_TOKEN // A KEY token.
|
||||
yaml_VALUE_TOKEN // A VALUE token.
|
||||
|
||||
yaml_ALIAS_TOKEN // An ALIAS token.
|
||||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||||
yaml_TAG_TOKEN // A TAG token.
|
||||
yaml_SCALAR_TOKEN // A SCALAR token.
|
||||
)
|
||||
|
||||
func (tt yaml_token_type_t) String() string {
|
||||
switch tt {
|
||||
case yaml_NO_TOKEN:
|
||||
return "yaml_NO_TOKEN"
|
||||
case yaml_STREAM_START_TOKEN:
|
||||
return "yaml_STREAM_START_TOKEN"
|
||||
case yaml_STREAM_END_TOKEN:
|
||||
return "yaml_STREAM_END_TOKEN"
|
||||
case yaml_VERSION_DIRECTIVE_TOKEN:
|
||||
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||||
case yaml_TAG_DIRECTIVE_TOKEN:
|
||||
return "yaml_TAG_DIRECTIVE_TOKEN"
|
||||
case yaml_DOCUMENT_START_TOKEN:
|
||||
return "yaml_DOCUMENT_START_TOKEN"
|
||||
case yaml_DOCUMENT_END_TOKEN:
|
||||
return "yaml_DOCUMENT_END_TOKEN"
|
||||
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||||
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||||
case yaml_BLOCK_MAPPING_START_TOKEN:
|
||||
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||||
case yaml_BLOCK_END_TOKEN:
|
||||
return "yaml_BLOCK_END_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||||
case yaml_FLOW_MAPPING_START_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_START_TOKEN"
|
||||
case yaml_FLOW_MAPPING_END_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_END_TOKEN"
|
||||
case yaml_BLOCK_ENTRY_TOKEN:
|
||||
return "yaml_BLOCK_ENTRY_TOKEN"
|
||||
case yaml_FLOW_ENTRY_TOKEN:
|
||||
return "yaml_FLOW_ENTRY_TOKEN"
|
||||
case yaml_KEY_TOKEN:
|
||||
return "yaml_KEY_TOKEN"
|
||||
case yaml_VALUE_TOKEN:
|
||||
return "yaml_VALUE_TOKEN"
|
||||
case yaml_ALIAS_TOKEN:
|
||||
return "yaml_ALIAS_TOKEN"
|
||||
case yaml_ANCHOR_TOKEN:
|
||||
return "yaml_ANCHOR_TOKEN"
|
||||
case yaml_TAG_TOKEN:
|
||||
return "yaml_TAG_TOKEN"
|
||||
case yaml_SCALAR_TOKEN:
|
||||
return "yaml_SCALAR_TOKEN"
|
||||
}
|
||||
return "<unknown token>"
|
||||
}
|
||||
|
||||
// The token structure.
|
||||
type yaml_token_t struct {
|
||||
// The token type.
|
||||
typ yaml_token_type_t
|
||||
|
||||
// The start/end of the token.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The alias/anchor/scalar value or tag/tag directive handle
|
||||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||||
value []byte
|
||||
|
||||
// The tag suffix (for yaml_TAG_TOKEN).
|
||||
suffix []byte
|
||||
|
||||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||||
prefix []byte
|
||||
|
||||
// The scalar style (for yaml_SCALAR_TOKEN).
|
||||
style yaml_scalar_style_t
|
||||
|
||||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||||
major, minor int8
|
||||
}
|
||||
|
||||
// Events
|
||||
|
||||
type yaml_event_type_t int8
|
||||
|
||||
// Event types.
|
||||
const (
|
||||
// An empty event.
|
||||
yaml_NO_EVENT yaml_event_type_t = iota
|
||||
|
||||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
||||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
||||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||||
yaml_ALIAS_EVENT // An ALIAS event.
|
||||
yaml_SCALAR_EVENT // A SCALAR event.
|
||||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||
)
|
||||
|
||||
var eventStrings = []string{
|
||||
yaml_NO_EVENT: "none",
|
||||
yaml_STREAM_START_EVENT: "stream start",
|
||||
yaml_STREAM_END_EVENT: "stream end",
|
||||
yaml_DOCUMENT_START_EVENT: "document start",
|
||||
yaml_DOCUMENT_END_EVENT: "document end",
|
||||
yaml_ALIAS_EVENT: "alias",
|
||||
yaml_SCALAR_EVENT: "scalar",
|
||||
yaml_SEQUENCE_START_EVENT: "sequence start",
|
||||
yaml_SEQUENCE_END_EVENT: "sequence end",
|
||||
yaml_MAPPING_START_EVENT: "mapping start",
|
||||
yaml_MAPPING_END_EVENT: "mapping end",
|
||||
}
|
||||
|
||||
func (e yaml_event_type_t) String() string {
|
||||
if e < 0 || int(e) >= len(eventStrings) {
|
||||
return fmt.Sprintf("unknown event %d", e)
|
||||
}
|
||||
return eventStrings[e]
|
||||
}
|
||||
|
||||
// The event structure.
|
||||
type yaml_event_t struct {
|
||||
|
||||
// The event type.
|
||||
typ yaml_event_type_t
|
||||
|
||||
// The start and end of the event.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The document encoding (for yaml_STREAM_START_EVENT).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||||
tag_directives []yaml_tag_directive_t
|
||||
|
||||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||||
anchor []byte
|
||||
|
||||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
tag []byte
|
||||
|
||||
// The scalar value (for yaml_SCALAR_EVENT).
|
||||
value []byte
|
||||
|
||||
// Is the document start/end indicator implicit, or the tag optional?
|
||||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||||
implicit bool
|
||||
|
||||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||||
quoted_implicit bool
|
||||
|
||||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
style yaml_style_t
|
||||
}
|
||||
|
||||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||||
|
||||
// Nodes
|
||||
|
||||
const (
|
||||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||||
|
||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||
|
||||
// Not in original libyaml.
|
||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||
|
||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||||
)
|
||||
|
||||
type yaml_node_type_t int
|
||||
|
||||
// Node types.
|
||||
const (
|
||||
// An empty node.
|
||||
yaml_NO_NODE yaml_node_type_t = iota
|
||||
|
||||
yaml_SCALAR_NODE // A scalar node.
|
||||
yaml_SEQUENCE_NODE // A sequence node.
|
||||
yaml_MAPPING_NODE // A mapping node.
|
||||
)
|
||||
|
||||
// An element of a sequence node.
|
||||
type yaml_node_item_t int
|
||||
|
||||
// An element of a mapping node.
|
||||
type yaml_node_pair_t struct {
|
||||
key int // The key of the element.
|
||||
value int // The value of the element.
|
||||
}
|
||||
|
||||
// The node structure.
|
||||
type yaml_node_t struct {
|
||||
typ yaml_node_type_t // The node type.
|
||||
tag []byte // The node tag.
|
||||
|
||||
// The node data.
|
||||
|
||||
// The scalar parameters (for yaml_SCALAR_NODE).
|
||||
scalar struct {
|
||||
value []byte // The scalar value.
|
||||
length int // The length of the scalar value.
|
||||
style yaml_scalar_style_t // The scalar style.
|
||||
}
|
||||
|
||||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
||||
sequence struct {
|
||||
items_data []yaml_node_item_t // The stack of sequence items.
|
||||
style yaml_sequence_style_t // The sequence style.
|
||||
}
|
||||
|
||||
// The mapping parameters (for yaml_MAPPING_NODE).
|
||||
mapping struct {
|
||||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||||
pairs_end *yaml_node_pair_t // The end of the stack.
|
||||
pairs_top *yaml_node_pair_t // The top of the stack.
|
||||
style yaml_mapping_style_t // The mapping style.
|
||||
}
|
||||
|
||||
start_mark yaml_mark_t // The beginning of the node.
|
||||
end_mark yaml_mark_t // The end of the node.
|
||||
|
||||
}
|
||||
|
||||
// The document structure.
|
||||
type yaml_document_t struct {
|
||||
|
||||
// The document nodes.
|
||||
nodes []yaml_node_t
|
||||
|
||||
// The version directive.
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives.
|
||||
tag_directives_data []yaml_tag_directive_t
|
||||
tag_directives_start int // The beginning of the tag directives list.
|
||||
tag_directives_end int // The end of the tag directives list.
|
||||
|
||||
start_implicit int // Is the document start indicator implicit?
|
||||
end_implicit int // Is the document end indicator implicit?
|
||||
|
||||
// The start/end of the document.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
}
|
||||
|
||||
// The prototype of a read handler.
|
||||
//
|
||||
// The read handler is called when the parser needs to read more bytes from the
|
||||
// source. The handler should write not more than size bytes to the buffer.
|
||||
// The number of written bytes should be set to the size_read variable.
|
||||
//
|
||||
// [in,out] data A pointer to an application data specified by
|
||||
// yaml_parser_set_input().
|
||||
// [out] buffer The buffer to write the data from the source.
|
||||
// [in] size The size of the buffer.
|
||||
// [out] size_read The actual number of bytes read from the source.
|
||||
//
|
||||
// On success, the handler should return 1. If the handler failed,
|
||||
// the returned value should be 0. On EOF, the handler should set the
|
||||
// size_read to 0 and return 1.
|
||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||||
|
||||
// This structure holds information about a potential simple key.
|
||||
type yaml_simple_key_t struct {
|
||||
possible bool // Is a simple key possible?
|
||||
required bool // Is a simple key required?
|
||||
token_number int // The number of the token.
|
||||
mark yaml_mark_t // The position mark.
|
||||
}
|
||||
|
||||
// The states of the parser.
|
||||
type yaml_parser_state_t int
|
||||
|
||||
const (
|
||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||||
|
||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||||
yaml_PARSE_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
func (ps yaml_parser_state_t) String() string {
|
||||
switch ps {
|
||||
case yaml_PARSE_STREAM_START_STATE:
|
||||
return "yaml_PARSE_STREAM_START_STATE"
|
||||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||||
case yaml_PARSE_DOCUMENT_END_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_END_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||||
case yaml_PARSE_FLOW_NODE_STATE:
|
||||
return "yaml_PARSE_FLOW_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||||
case yaml_PARSE_END_STATE:
|
||||
return "yaml_PARSE_END_STATE"
|
||||
}
|
||||
return "<unknown parser state>"
|
||||
}
|
||||
|
||||
// This structure holds aliases data.
|
||||
type yaml_alias_data_t struct {
|
||||
anchor []byte // The anchor.
|
||||
index int // The node id.
|
||||
mark yaml_mark_t // The anchor mark.
|
||||
}
|
||||
|
||||
// The parser structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the
|
||||
// yaml_parser_ family of functions.
|
||||
type yaml_parser_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
|
||||
problem string // Error description.
|
||||
|
||||
// The byte about which the problem occurred.
|
||||
problem_offset int
|
||||
problem_value int
|
||||
problem_mark yaml_mark_t
|
||||
|
||||
// The error context.
|
||||
context string
|
||||
context_mark yaml_mark_t
|
||||
|
||||
// Reader stuff
|
||||
|
||||
read_handler yaml_read_handler_t // Read handler.
|
||||
|
||||
input_reader io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
|
||||
eof bool // EOF flag
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
unread int // The number of unread characters in the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The input encoding.
|
||||
|
||||
offset int // The offset of the current position (in bytes).
|
||||
mark yaml_mark_t // The mark of the current position.
|
||||
|
||||
// Scanner stuff
|
||||
|
||||
stream_start_produced bool // Have we started to scan the input stream?
|
||||
stream_end_produced bool // Have we reached the end of the input stream?
|
||||
|
||||
flow_level int // The number of unclosed '[' and '{' indicators.
|
||||
|
||||
tokens []yaml_token_t // The tokens queue.
|
||||
tokens_head int // The head of the tokens queue.
|
||||
tokens_parsed int // The number of tokens fetched from the queue.
|
||||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||||
|
||||
indent int // The current indentation level.
|
||||
indents []int // The indentation levels stack.
|
||||
|
||||
simple_key_allowed bool // May a simple key occur at the current position?
|
||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||
|
||||
// Parser stuff
|
||||
|
||||
state yaml_parser_state_t // The current parser state.
|
||||
states []yaml_parser_state_t // The parser states stack.
|
||||
marks []yaml_mark_t // The stack of marks.
|
||||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
aliases []yaml_alias_data_t // The alias data.
|
||||
|
||||
document *yaml_document_t // The currently parsed document.
|
||||
}
|
||||
|
||||
// Emitter Definitions
|
||||
|
||||
// The prototype of a write handler.
|
||||
//
|
||||
// The write handler is called when the emitter needs to flush the accumulated
|
||||
// characters to the output. The handler should write @a size bytes of the
|
||||
// @a buffer to the output.
|
||||
//
|
||||
// @param[in,out] data A pointer to an application data specified by
|
||||
// yaml_emitter_set_output().
|
||||
// @param[in] buffer The buffer with bytes to be written.
|
||||
// @param[in] size The size of the buffer.
|
||||
//
|
||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||
// the returned value should be @c 0.
|
||||
//
|
||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||
|
||||
type yaml_emitter_state_t int
|
||||
|
||||
// The emitter states.
|
||||
const (
|
||||
// Expect STREAM-START.
|
||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||||
|
||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||||
yaml_EMIT_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
// The emitter structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
||||
// family of functions.
|
||||
type yaml_emitter_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
problem string // Error description.
|
||||
|
||||
// Writer stuff
|
||||
|
||||
write_handler yaml_write_handler_t // Write handler.
|
||||
|
||||
output_buffer *[]byte // String output data.
|
||||
output_writer io.Writer // File output data.
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The stream encoding.
|
||||
|
||||
// Emitter stuff
|
||||
|
||||
canonical bool // If the output is in the canonical style?
|
||||
best_indent int // The number of indentation spaces.
|
||||
best_width int // The preferred width of the output lines.
|
||||
unicode bool // Allow unescaped non-ASCII characters?
|
||||
line_break yaml_break_t // The preferred line break.
|
||||
|
||||
state yaml_emitter_state_t // The current emitter state.
|
||||
states []yaml_emitter_state_t // The stack of states.
|
||||
|
||||
events []yaml_event_t // The event queue.
|
||||
events_head int // The head of the event queue.
|
||||
|
||||
indents []int // The stack of indentation levels.
|
||||
|
||||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||||
|
||||
indent int // The current indentation level.
|
||||
|
||||
flow_level int // The current flow level.
|
||||
|
||||
root_context bool // Is it the document root context?
|
||||
sequence_context bool // Is it a sequence context?
|
||||
mapping_context bool // Is it a mapping context?
|
||||
simple_key_context bool // Is it a simple mapping key context?
|
||||
|
||||
line int // The current line.
|
||||
column int // The current column.
|
||||
whitespace bool // If the last character was a whitespace?
|
||||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||||
open_ended bool // If an explicit document end is required?
|
||||
|
||||
// Anchor analysis.
|
||||
anchor_data struct {
|
||||
anchor []byte // The anchor value.
|
||||
alias bool // Is it an alias?
|
||||
}
|
||||
|
||||
// Tag analysis.
|
||||
tag_data struct {
|
||||
handle []byte // The tag handle.
|
||||
suffix []byte // The tag suffix.
|
||||
}
|
||||
|
||||
// Scalar analysis.
|
||||
scalar_data struct {
|
||||
value []byte // The scalar value.
|
||||
multiline bool // Does the scalar contain line breaks?
|
||||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||||
style yaml_scalar_style_t // The output style.
|
||||
}
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
opened bool // If the stream was already opened?
|
||||
closed bool // If the stream was already closed?
|
||||
|
||||
// The information associated with the document nodes.
|
||||
anchors *struct {
|
||||
references int // The number of references.
|
||||
anchor int // The anchor id.
|
||||
serialized bool // If the node has been emitted?
|
||||
}
|
||||
|
||||
last_anchor_id int // The last assigned anchor id.
|
||||
|
||||
document *yaml_document_t // The currently emitted document.
|
||||
}
|
173
vendor/gopkg.in/yaml.v2/yamlprivateh.go
generated
vendored
173
vendor/gopkg.in/yaml.v2/yamlprivateh.go
generated
vendored
@ -1,173 +0,0 @@
|
||||
package yaml
|
||||
|
||||
const (
|
||||
// The size of the input raw buffer.
|
||||
input_raw_buffer_size = 512
|
||||
|
||||
// The size of the input buffer.
|
||||
// It should be possible to decode the whole raw buffer.
|
||||
input_buffer_size = input_raw_buffer_size * 3
|
||||
|
||||
// The size of the output buffer.
|
||||
output_buffer_size = 128
|
||||
|
||||
// The size of the output raw buffer.
|
||||
// It should be possible to encode the whole output buffer.
|
||||
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||||
|
||||
// The size of other stacks and queues.
|
||||
initial_stack_size = 16
|
||||
initial_queue_size = 16
|
||||
initial_string_size = 16
|
||||
)
|
||||
|
||||
// Check if the character at the specified position is an alphabetical
|
||||
// character, a digit, '_', or '-'.
|
||||
func is_alpha(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a digit.
|
||||
func is_digit(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9'
|
||||
}
|
||||
|
||||
// Get the value of a digit.
|
||||
func as_digit(b []byte, i int) int {
|
||||
return int(b[i]) - '0'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a hex-digit.
|
||||
func is_hex(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||||
}
|
||||
|
||||
// Get the value of a hex-digit.
|
||||
func as_hex(b []byte, i int) int {
|
||||
bi := b[i]
|
||||
if bi >= 'A' && bi <= 'F' {
|
||||
return int(bi) - 'A' + 10
|
||||
}
|
||||
if bi >= 'a' && bi <= 'f' {
|
||||
return int(bi) - 'a' + 10
|
||||
}
|
||||
return int(bi) - '0'
|
||||
}
|
||||
|
||||
// Check if the character is ASCII.
|
||||
func is_ascii(b []byte, i int) bool {
|
||||
return b[i] <= 0x7F
|
||||
}
|
||||
|
||||
// Check if the character at the start of the buffer can be printed unescaped.
|
||||
func is_printable(b []byte, i int) bool {
|
||||
return ((b[i] == 0x0A) || // . == #x0A
|
||||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
||||
(b[i] == 0xEE) ||
|
||||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is NUL.
|
||||
func is_z(b []byte, i int) bool {
|
||||
return b[i] == 0x00
|
||||
}
|
||||
|
||||
// Check if the beginning of the buffer is a BOM.
|
||||
func is_bom(b []byte, i int) bool {
|
||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is space.
|
||||
func is_space(b []byte, i int) bool {
|
||||
return b[i] == ' '
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is tab.
|
||||
func is_tab(b []byte, i int) bool {
|
||||
return b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is blank (space or tab).
|
||||
func is_blank(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_tab(b, i)
|
||||
return b[i] == ' ' || b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a line break.
|
||||
func is_break(b []byte, i int) bool {
|
||||
return (b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||||
}
|
||||
|
||||
func is_crlf(b []byte, i int) bool {
|
||||
return b[i] == '\r' && b[i+1] == '\n'
|
||||
}
|
||||
|
||||
// Check if the character is a line break or NUL.
|
||||
func is_breakz(b []byte, i int) bool {
|
||||
//return is_break(b, i) || is_z(b, i)
|
||||
return ( // is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
// is_z:
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, or NUL.
|
||||
func is_spacez(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_breakz(b, i)
|
||||
return ( // is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, tab, or NUL.
|
||||
func is_blankz(b []byte, i int) bool {
|
||||
//return is_blank(b, i) || is_breakz(b, i)
|
||||
return ( // is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Determine the width of the character.
|
||||
func width(b byte) int {
|
||||
// Don't replace these by a switch without first
|
||||
// confirming that it is being inlined.
|
||||
if b&0x80 == 0x00 {
|
||||
return 1
|
||||
}
|
||||
if b&0xE0 == 0xC0 {
|
||||
return 2
|
||||
}
|
||||
if b&0xF0 == 0xE0 {
|
||||
return 3
|
||||
}
|
||||
if b&0xF8 == 0xF0 {
|
||||
return 4
|
||||
}
|
||||
return 0
|
||||
|
||||
}
|
31
vendor/vendor.json
vendored
31
vendor/vendor.json
vendored
@ -1,31 +0,0 @@
|
||||
{
|
||||
"comment": "",
|
||||
"ignore": "test",
|
||||
"package": [
|
||||
{
|
||||
"checksumSHA1": "J6lNRPdrYhKft6S8x33K9brxyhE=",
|
||||
"path": "golang.org/x/crypto/acme",
|
||||
"revision": "c126467f60eb25f8f27e5a981f32a87e3965053f",
|
||||
"revisionTime": "2018-07-23T15:26:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "EFjIi/zCZ1Cte0MQtyxGCTgSzk8=",
|
||||
"path": "golang.org/x/crypto/acme/autocert",
|
||||
"revision": "c126467f60eb25f8f27e5a981f32a87e3965053f",
|
||||
"revisionTime": "2018-07-23T15:26:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=",
|
||||
"path": "golang.org/x/crypto/pbkdf2",
|
||||
"revision": "c126467f60eb25f8f27e5a981f32a87e3965053f",
|
||||
"revisionTime": "2018-07-23T15:26:11Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ZSWoOPUNRr5+3dhkLK3C4cZAQPk=",
|
||||
"path": "gopkg.in/yaml.v2",
|
||||
"revision": "5420a8b6744d3b0345ab293f6fcba19c978f1183",
|
||||
"revisionTime": "2018-03-28T19:50:20Z"
|
||||
}
|
||||
],
|
||||
"rootPath": "github.com/astaxie/beego"
|
||||
}
|
Loading…
Reference in New Issue
Block a user