", result)
- }
- }
-
- func() {
- ctrl.TplName = "file2.tpl"
- defer func() {
- if r := recover(); r == nil {
- t.Fatal("TestAdditionalViewPaths expected error")
- }
- }()
- ctrl.RenderString()
- }()
-
- ctrl.TplName = "file2.tpl"
- ctrl.ViewPath = dir2
- ctrl.RenderString()
-}
diff --git a/doc.go b/doc.go
deleted file mode 100644
index 72284c67..00000000
--- a/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package beego provide a MVC framework
-beego: an open-source, high-performance, modular, full-stack web framework
-
-It is used for rapid development of RESTful APIs, web apps and backend services in Go.
-beego is inspired by Tornado, Sinatra and Flask with the added benefit of some Go-specific features such as interfaces and struct embedding.
-
- package main
- import "github.com/astaxie/beego"
-
- func main() {
- beego.Run()
- }
-
-more information: http://beego.me
-
-Deprecated: using pkg/, we will delete this in v2.1.0
-*/
-package beego
diff --git a/error.go b/error.go
deleted file mode 100644
index 40eea5fa..00000000
--- a/error.go
+++ /dev/null
@@ -1,492 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "fmt"
- "html/template"
- "net/http"
- "reflect"
- "runtime"
- "strconv"
- "strings"
-
- "github.com/astaxie/beego/context"
- "github.com/astaxie/beego/utils"
-)
-
-const (
- errorTypeHandler = iota
- errorTypeController
-)
-
-var tpl = `
-
-
-
-
- beego application error
-
-
-
-
-
- {{.Content}}
- Go Home
-
- Powered by beego {{.BeegoVersion}}
-
-
-
-
-
-`
-
-type errorInfo struct {
- controllerType reflect.Type
- handler http.HandlerFunc
- method string
- errorType int
-}
-
-// ErrorMaps holds map of http handlers for each error string.
-// there is 10 kinds default error(40x and 50x)
-// Deprecated: using pkg/, we will delete this in v2.1.0
-var ErrorMaps = make(map[string]*errorInfo, 10)
-
-// show 401 unauthorized error.
-func unauthorized(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 401,
- " The page you have requested can't be authorized."+
- " Perhaps you are here because:"+
- "
"+
- " The credentials you supplied are incorrect"+
- " There are errors in the website address"+
- "
",
- )
-}
-
-// show 402 Payment Required
-func paymentRequired(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 402,
- " The page you have requested Payment Required."+
- " Perhaps you are here because:"+
- "
"+
- " The credentials you supplied are incorrect"+
- " There are errors in the website address"+
- "
",
- )
-}
-
-// show 403 forbidden error.
-func forbidden(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 403,
- " The page you have requested is forbidden."+
- " Perhaps you are here because:"+
- "
"+
- " Your address may be blocked"+
- " The site may be disabled"+
- " You need to log in"+
- "
",
- )
-}
-
-// show 422 missing xsrf token
-func missingxsrf(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 422,
- " The page you have requested is forbidden."+
- " Perhaps you are here because:"+
- "
"+
- " '_xsrf' argument missing from POST"+
- "
",
- )
-}
-
-// show 417 invalid xsrf token
-func invalidxsrf(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 417,
- " The page you have requested is forbidden."+
- " Perhaps you are here because:"+
- "
"+
- " expected XSRF not found"+
- "
",
- )
-}
-
-// show 404 not found error.
-func notFound(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 404,
- " The page you have requested has flown the coop."+
- " Perhaps you are here because:"+
- "
"+
- " The page has moved"+
- " The page no longer exists"+
- " You were looking for your puppy and got lost"+
- " You like 404 pages"+
- "
",
- )
-}
-
-// show 405 Method Not Allowed
-func methodNotAllowed(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 405,
- " The method you have requested Not Allowed."+
- " Perhaps you are here because:"+
- "
"+
- " The method specified in the Request-Line is not allowed for the resource identified by the Request-URI"+
- " The response MUST include an Allow header containing a list of valid methods for the requested resource."+
- "
",
- )
-}
-
-// show 500 internal server error.
-func internalServerError(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 500,
- " The page you have requested is down right now."+
- "
"+
- " Please try again later and report the error to the website administrator"+
- "
",
- )
-}
-
-// show 501 Not Implemented.
-func notImplemented(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 501,
- " The page you have requested is Not Implemented."+
- "
"+
- " Please try again later and report the error to the website administrator"+
- "
",
- )
-}
-
-// show 502 Bad Gateway.
-func badGateway(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 502,
- " The page you have requested is down right now."+
- "
"+
- " The server, while acting as a gateway or proxy, received an invalid response from the upstream server it accessed in attempting to fulfill the request."+
- " Please try again later and report the error to the website administrator"+
- "
",
- )
-}
-
-// show 503 service unavailable error.
-func serviceUnavailable(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 503,
- " The page you have requested is unavailable."+
- " Perhaps you are here because:"+
- "
"+
- "
The page is overloaded"+
- " Please try again later."+
- "
",
- )
-}
-
-// show 504 Gateway Timeout.
-func gatewayTimeout(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 504,
- " The page you have requested is unavailable"+
- " Perhaps you are here because:"+
- "
"+
- "
The server, while acting as a gateway or proxy, did not receive a timely response from the upstream server specified by the URI."+
- " Please try again later."+
- "
",
- )
-}
-
-// show 413 Payload Too Large
-func payloadTooLarge(rw http.ResponseWriter, r *http.Request) {
- responseError(rw, r,
- 413,
- ` The page you have requested is unavailable.
- Perhaps you are here because:
-
- The request entity is larger than limits defined by server.
- Please change the request entity and try again.
-
- `,
- )
-}
-
-func responseError(rw http.ResponseWriter, r *http.Request, errCode int, errContent string) {
- t, _ := template.New("beegoerrortemp").Parse(errtpl)
- data := M{
- "Title": http.StatusText(errCode),
- "BeegoVersion": VERSION,
- "Content": template.HTML(errContent),
- }
- t.Execute(rw, data)
-}
-
-// ErrorHandler registers http.HandlerFunc to each http err code string.
-// usage:
-// beego.ErrorHandler("404",NotFound)
-// beego.ErrorHandler("500",InternalServerError)
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func ErrorHandler(code string, h http.HandlerFunc) *App {
- ErrorMaps[code] = &errorInfo{
- errorType: errorTypeHandler,
- handler: h,
- method: code,
- }
- return BeeApp
-}
-
-// ErrorController registers ControllerInterface to each http err code string.
-// usage:
-// beego.ErrorController(&controllers.ErrorController{})
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func ErrorController(c ControllerInterface) *App {
- reflectVal := reflect.ValueOf(c)
- rt := reflectVal.Type()
- ct := reflect.Indirect(reflectVal).Type()
- for i := 0; i < rt.NumMethod(); i++ {
- methodName := rt.Method(i).Name
- if !utils.InSlice(methodName, exceptMethod) && strings.HasPrefix(methodName, "Error") {
- errName := strings.TrimPrefix(methodName, "Error")
- ErrorMaps[errName] = &errorInfo{
- errorType: errorTypeController,
- controllerType: ct,
- method: methodName,
- }
- }
- }
- return BeeApp
-}
-
-// Exception Write HttpStatus with errCode and Exec error handler if exist.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func Exception(errCode uint64, ctx *context.Context) {
- exception(strconv.FormatUint(errCode, 10), ctx)
-}
-
-// show error string as simple text message.
-// if error string is empty, show 503 or 500 error as default.
-func exception(errCode string, ctx *context.Context) {
- atoi := func(code string) int {
- v, err := strconv.Atoi(code)
- if err == nil {
- return v
- }
- if ctx.Output.Status == 0 {
- return 503
- }
- return ctx.Output.Status
- }
-
- for _, ec := range []string{errCode, "503", "500"} {
- if h, ok := ErrorMaps[ec]; ok {
- executeError(h, ctx, atoi(ec))
- return
- }
- }
- //if 50x error has been removed from errorMap
- ctx.ResponseWriter.WriteHeader(atoi(errCode))
- ctx.WriteString(errCode)
-}
-
-func executeError(err *errorInfo, ctx *context.Context, code int) {
- //make sure to log the error in the access log
- LogAccess(ctx, nil, code)
-
- if err.errorType == errorTypeHandler {
- ctx.ResponseWriter.WriteHeader(code)
- err.handler(ctx.ResponseWriter, ctx.Request)
- return
- }
- if err.errorType == errorTypeController {
- ctx.Output.SetStatus(code)
- //Invoke the request handler
- vc := reflect.New(err.controllerType)
- execController, ok := vc.Interface().(ControllerInterface)
- if !ok {
- panic("controller is not ControllerInterface")
- }
- //call the controller init function
- execController.Init(ctx, err.controllerType.Name(), err.method, vc.Interface())
-
- //call prepare function
- execController.Prepare()
-
- execController.URLMapping()
-
- method := vc.MethodByName(err.method)
- method.Call([]reflect.Value{})
-
- //render template
- if BConfig.WebConfig.AutoRender {
- if err := execController.Render(); err != nil {
- panic(err)
- }
- }
-
- // finish all runrouter. release resource
- execController.Finish()
- }
-}
diff --git a/error_test.go b/error_test.go
deleted file mode 100644
index 378aa953..00000000
--- a/error_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2016 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "net/http"
- "net/http/httptest"
- "strconv"
- "strings"
- "testing"
-)
-
-type errorTestController struct {
- Controller
-}
-
-const parseCodeError = "parse code error"
-
-func (ec *errorTestController) Get() {
- errorCode, err := ec.GetInt("code")
- if err != nil {
- ec.Abort(parseCodeError)
- }
- if errorCode != 0 {
- ec.CustomAbort(errorCode, ec.GetString("code"))
- }
- ec.Abort("404")
-}
-
-func TestErrorCode_01(t *testing.T) {
- registerDefaultErrorHandler()
- for k := range ErrorMaps {
- r, _ := http.NewRequest("GET", "/error?code="+k, nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/error", &errorTestController{})
- handler.ServeHTTP(w, r)
- code, _ := strconv.Atoi(k)
- if w.Code != code {
- t.Fail()
- }
- if !strings.Contains(w.Body.String(), http.StatusText(code)) {
- t.Fail()
- }
- }
-}
-
-func TestErrorCode_02(t *testing.T) {
- registerDefaultErrorHandler()
- r, _ := http.NewRequest("GET", "/error?code=0", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/error", &errorTestController{})
- handler.ServeHTTP(w, r)
- if w.Code != 404 {
- t.Fail()
- }
-}
-
-func TestErrorCode_03(t *testing.T) {
- registerDefaultErrorHandler()
- r, _ := http.NewRequest("GET", "/error?code=panic", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/error", &errorTestController{})
- handler.ServeHTTP(w, r)
- if w.Code != 200 {
- t.Fail()
- }
- if w.Body.String() != parseCodeError {
- t.Fail()
- }
-}
diff --git a/filter.go b/filter.go
deleted file mode 100644
index 8596d288..00000000
--- a/filter.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import "github.com/astaxie/beego/context"
-
-// FilterFunc defines a filter function which is invoked before the controller handler is executed.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type FilterFunc func(*context.Context)
-
-// FilterRouter defines a filter operation which is invoked before the controller handler is executed.
-// It can match the URL against a pattern, and execute a filter function
-// when a request with a matching URL arrives.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type FilterRouter struct {
- filterFunc FilterFunc
- tree *Tree
- pattern string
- returnOnOutput bool
- resetParams bool
-}
-
-// ValidRouter checks if the current request is matched by this filter.
-// If the request is matched, the values of the URL parameters defined
-// by the filter pattern are also returned.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (f *FilterRouter) ValidRouter(url string, ctx *context.Context) bool {
- isOk := f.tree.Match(url, ctx)
- if isOk != nil {
- if b, ok := isOk.(bool); ok {
- return b
- }
- }
- return false
-}
diff --git a/filter_test.go b/filter_test.go
deleted file mode 100644
index 4ca4d2b8..00000000
--- a/filter_test.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego/context"
-)
-
-var FilterUser = func(ctx *context.Context) {
- ctx.Output.Body([]byte("i am " + ctx.Input.Param(":last") + ctx.Input.Param(":first")))
-}
-
-func TestFilter(t *testing.T) {
- r, _ := http.NewRequest("GET", "/person/asta/Xie", nil)
- w := httptest.NewRecorder()
- handler := NewControllerRegister()
- handler.InsertFilter("/person/:last/:first", BeforeRouter, FilterUser)
- handler.Add("/person/:last/:first", &TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am astaXie" {
- t.Errorf("user define func can't run")
- }
-}
-
-var FilterAdminUser = func(ctx *context.Context) {
- ctx.Output.Body([]byte("i am admin"))
-}
-
-// Filter pattern /admin/:all
-// all url like /admin/ /admin/xie will all get filter
-
-func TestPatternTwo(t *testing.T) {
- r, _ := http.NewRequest("GET", "/admin/", nil)
- w := httptest.NewRecorder()
- handler := NewControllerRegister()
- handler.InsertFilter("/admin/?:all", BeforeRouter, FilterAdminUser)
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am admin" {
- t.Errorf("filter /admin/ can't run")
- }
-}
-
-func TestPatternThree(t *testing.T) {
- r, _ := http.NewRequest("GET", "/admin/astaxie", nil)
- w := httptest.NewRecorder()
- handler := NewControllerRegister()
- handler.InsertFilter("/admin/:all", BeforeRouter, FilterAdminUser)
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am admin" {
- t.Errorf("filter /admin/astaxie can't run")
- }
-}
diff --git a/flash.go b/flash.go
deleted file mode 100644
index fe3fb974..00000000
--- a/flash.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "fmt"
- "net/url"
- "strings"
-)
-
-// FlashData is a tools to maintain data when using across request.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type FlashData struct {
- Data map[string]string
-}
-
-// NewFlash return a new empty FlashData struct.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NewFlash() *FlashData {
- return &FlashData{
- Data: make(map[string]string),
- }
-}
-
-// Set message to flash
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (fd *FlashData) Set(key string, msg string, args ...interface{}) {
- if len(args) == 0 {
- fd.Data[key] = msg
- } else {
- fd.Data[key] = fmt.Sprintf(msg, args...)
- }
-}
-
-// Success writes success message to flash.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (fd *FlashData) Success(msg string, args ...interface{}) {
- if len(args) == 0 {
- fd.Data["success"] = msg
- } else {
- fd.Data["success"] = fmt.Sprintf(msg, args...)
- }
-}
-
-// Notice writes notice message to flash.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (fd *FlashData) Notice(msg string, args ...interface{}) {
- if len(args) == 0 {
- fd.Data["notice"] = msg
- } else {
- fd.Data["notice"] = fmt.Sprintf(msg, args...)
- }
-}
-
-// Warning writes warning message to flash.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (fd *FlashData) Warning(msg string, args ...interface{}) {
- if len(args) == 0 {
- fd.Data["warning"] = msg
- } else {
- fd.Data["warning"] = fmt.Sprintf(msg, args...)
- }
-}
-
-// Error writes error message to flash.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (fd *FlashData) Error(msg string, args ...interface{}) {
- if len(args) == 0 {
- fd.Data["error"] = msg
- } else {
- fd.Data["error"] = fmt.Sprintf(msg, args...)
- }
-}
-
-// Store does the saving operation of flash data.
-// the data are encoded and saved in cookie.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (fd *FlashData) Store(c *Controller) {
- c.Data["flash"] = fd.Data
- var flashValue string
- for key, value := range fd.Data {
- flashValue += "\x00" + key + "\x23" + BConfig.WebConfig.FlashSeparator + "\x23" + value + "\x00"
- }
- c.Ctx.SetCookie(BConfig.WebConfig.FlashName, url.QueryEscape(flashValue), 0, "/")
-}
-
-// ReadFromRequest parsed flash data from encoded values in cookie.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func ReadFromRequest(c *Controller) *FlashData {
- flash := NewFlash()
- if cookie, err := c.Ctx.Request.Cookie(BConfig.WebConfig.FlashName); err == nil {
- v, _ := url.QueryUnescape(cookie.Value)
- vals := strings.Split(v, "\x00")
- for _, v := range vals {
- if len(v) > 0 {
- kv := strings.Split(v, "\x23"+BConfig.WebConfig.FlashSeparator+"\x23")
- if len(kv) == 2 {
- flash.Data[kv[0]] = kv[1]
- }
- }
- }
- //read one time then delete it
- c.Ctx.SetCookie(BConfig.WebConfig.FlashName, "", -1, "/")
- }
- c.Data["flash"] = flash.Data
- return flash
-}
diff --git a/flash_test.go b/flash_test.go
deleted file mode 100644
index d5e9608d..00000000
--- a/flash_test.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-)
-
-type TestFlashController struct {
- Controller
-}
-
-func (t *TestFlashController) TestWriteFlash() {
- flash := NewFlash()
- flash.Notice("TestFlashString")
- flash.Store(&t.Controller)
- // we choose to serve json because we don't want to load a template html file
- t.ServeJSON(true)
-}
-
-func TestFlashHeader(t *testing.T) {
- // create fake GET request
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
-
- // setup the handler
- handler := NewControllerRegister()
- handler.Add("/", &TestFlashController{}, "get:TestWriteFlash")
- handler.ServeHTTP(w, r)
-
- // get the Set-Cookie value
- sc := w.Header().Get("Set-Cookie")
- // match for the expected header
- res := strings.Contains(sc, "BEEGO_FLASH=%00notice%23BEEGOFLASH%23TestFlashString%00")
- // validate the assertion
- if !res {
- t.Errorf("TestFlashHeader() unable to validate flash message")
- }
-}
diff --git a/fs.go b/fs.go
deleted file mode 100644
index 3300813d..00000000
--- a/fs.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package beego
-
-import (
- "net/http"
- "os"
- "path/filepath"
-)
-
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type FileSystem struct {
-}
-
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (d FileSystem) Open(name string) (http.File, error) {
- return os.Open(name)
-}
-
-// Walk walks the file tree rooted at root in filesystem, calling walkFn for each file or
-// directory in the tree, including root. All errors that arise visiting files
-// and directories are filtered by walkFn.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func Walk(fs http.FileSystem, root string, walkFn filepath.WalkFunc) error {
-
- f, err := fs.Open(root)
- if err != nil {
- return err
- }
- info, err := f.Stat()
- if err != nil {
- err = walkFn(root, nil, err)
- } else {
- err = walk(fs, root, info, walkFn)
- }
- if err == filepath.SkipDir {
- return nil
- }
- return err
-}
-
-// walk recursively descends path, calling walkFn.
-func walk(fs http.FileSystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
- var err error
- if !info.IsDir() {
- return walkFn(path, info, nil)
- }
-
- dir, err := fs.Open(path)
- if err != nil {
- if err1 := walkFn(path, info, err); err1 != nil {
- return err1
- }
- return err
- }
- defer dir.Close()
- dirs, err := dir.Readdir(-1)
- err1 := walkFn(path, info, err)
- // If err != nil, walk can't walk into this directory.
- // err1 != nil means walkFn want walk to skip this directory or stop walking.
- // Therefore, if one of err and err1 isn't nil, walk will return.
- if err != nil || err1 != nil {
- // The caller's behavior is controlled by the return value, which is decided
- // by walkFn. walkFn may ignore err and return nil.
- // If walkFn returns SkipDir, it will be handled by the caller.
- // So walk should return whatever walkFn returns.
- return err1
- }
-
- for _, fileInfo := range dirs {
- filename := filepath.Join(path, fileInfo.Name())
- if err = walk(fs, filename, fileInfo, walkFn); err != nil {
- if !fileInfo.IsDir() || err != filepath.SkipDir {
- return err
- }
- }
- }
- return nil
-}
diff --git a/go.mod b/go.mod
index e1b9fcc2..91bd9aef 100644
--- a/go.mod
+++ b/go.mod
@@ -36,6 +36,7 @@ require (
golang.org/x/tools v0.0.0-20200117065230-39095c1d176c
google.golang.org/grpc v1.31.0 // indirect
gopkg.in/yaml.v2 v2.2.8
+ honnef.co/go/tools v0.0.1-2020.1.5 // indirect
)
replace golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 => github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85
diff --git a/go.sum b/go.sum
index 1666981d..95babc92 100644
--- a/go.sum
+++ b/go.sum
@@ -93,6 +93,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
@@ -100,6 +101,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -159,6 +161,7 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 h1:X+yvsM2yrEktyI+b2qND5gpH8YhURn0k8OCaeRnkINo=
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg=
github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0 h1:QIF48X1cihydXibm+4wfAc0r/qyPyuFiPFRNphdMpEE=
@@ -182,16 +185,23 @@ github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2K
github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b h1:0Ve0/CCjiAiyKddUMUn3RwIGlq2iTW4GuVzyoKBYO/8=
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -204,12 +214,15 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -220,6 +233,7 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
@@ -231,11 +245,18 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200117065230-39095c1d176c h1:FodBYPZKH5tAN2O60HlglMwXGAeV/4k+NKbli79M/2c=
golang.org/x/tools v0.0.0-20200117065230-39095c1d176c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200815165600-90abf76919f3 h1:0aScV/0rLmANzEYIhjCOi2pTvDyhZNduBUMD2q3iqs4=
+golang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
@@ -254,9 +275,11 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
@@ -268,4 +291,7 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2020.1.5 h1:nI5egYTGJakVyOryqLs1cQO5dO0ksin5XXs2pspk75k=
+honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
diff --git a/grace/grace.go b/grace/grace.go
deleted file mode 100644
index 39d067fd..00000000
--- a/grace/grace.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package grace use to hot reload
-// Description: http://grisha.org/blog/2014/06/03/graceful-restart-in-golang/
-//
-// Usage:
-//
-// import(
-// "log"
-// "net/http"
-// "os"
-//
-// "github.com/astaxie/beego/grace"
-// )
-//
-// func handler(w http.ResponseWriter, r *http.Request) {
-// w.Write([]byte("WORLD!"))
-// }
-//
-// func main() {
-// mux := http.NewServeMux()
-// mux.HandleFunc("/hello", handler)
-//
-// err := grace.ListenAndServe("localhost:8080", mux)
-// if err != nil {
-// log.Println(err)
-// }
-// log.Println("Server on 8080 stopped")
-// os.Exit(0)
-// }
-package grace
-
-import (
- "flag"
- "net/http"
- "os"
- "strings"
- "sync"
- "syscall"
- "time"
-)
-
-const (
- // PreSignal is the position to add filter before signal
- // Deprecated: using pkg/grace, we will delete this in v2.1.0
- PreSignal = iota
- // PostSignal is the position to add filter after signal
- // Deprecated: using pkg/grace, we will delete this in v2.1.0
- PostSignal
- // StateInit represent the application inited
- // Deprecated: using pkg/grace, we will delete this in v2.1.0
- StateInit
- // StateRunning represent the application is running
- // Deprecated: using pkg/grace, we will delete this in v2.1.0
- StateRunning
- // StateShuttingDown represent the application is shutting down
- // Deprecated: using pkg/grace, we will delete this in v2.1.0
- StateShuttingDown
- // StateTerminate represent the application is killed
- // Deprecated: using pkg/grace, we will delete this in v2.1.0
- StateTerminate
-)
-
-var (
- regLock *sync.Mutex
- runningServers map[string]*Server
- runningServersOrder []string
- socketPtrOffsetMap map[string]uint
- runningServersForked bool
-
- // DefaultReadTimeOut is the HTTP read timeout
- DefaultReadTimeOut time.Duration
- // DefaultWriteTimeOut is the HTTP Write timeout
- DefaultWriteTimeOut time.Duration
- // DefaultMaxHeaderBytes is the Max HTTP Header size, default is 0, no limit
- DefaultMaxHeaderBytes int
- // DefaultTimeout is the shutdown server's timeout. default is 60s
- DefaultTimeout = 60 * time.Second
-
- isChild bool
- socketOrder string
-
- hookableSignals []os.Signal
-)
-
-func init() {
- flag.BoolVar(&isChild, "graceful", false, "listen on open fd (after forking)")
- flag.StringVar(&socketOrder, "socketorder", "", "previous initialization order - used when more than one listener was started")
-
- regLock = &sync.Mutex{}
- runningServers = make(map[string]*Server)
- runningServersOrder = []string{}
- socketPtrOffsetMap = make(map[string]uint)
-
- hookableSignals = []os.Signal{
- syscall.SIGHUP,
- syscall.SIGINT,
- syscall.SIGTERM,
- }
-}
-
-// NewServer returns a new graceServer.
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func NewServer(addr string, handler http.Handler) (srv *Server) {
- regLock.Lock()
- defer regLock.Unlock()
-
- if !flag.Parsed() {
- flag.Parse()
- }
- if len(socketOrder) > 0 {
- for i, addr := range strings.Split(socketOrder, ",") {
- socketPtrOffsetMap[addr] = uint(i)
- }
- } else {
- socketPtrOffsetMap[addr] = uint(len(runningServersOrder))
- }
-
- srv = &Server{
- sigChan: make(chan os.Signal),
- isChild: isChild,
- SignalHooks: map[int]map[os.Signal][]func(){
- PreSignal: {
- syscall.SIGHUP: {},
- syscall.SIGINT: {},
- syscall.SIGTERM: {},
- },
- PostSignal: {
- syscall.SIGHUP: {},
- syscall.SIGINT: {},
- syscall.SIGTERM: {},
- },
- },
- state: StateInit,
- Network: "tcp",
- terminalChan: make(chan error), //no cache channel
- }
- srv.Server = &http.Server{
- Addr: addr,
- ReadTimeout: DefaultReadTimeOut,
- WriteTimeout: DefaultWriteTimeOut,
- MaxHeaderBytes: DefaultMaxHeaderBytes,
- Handler: handler,
- }
-
- runningServersOrder = append(runningServersOrder, addr)
- runningServers[addr] = srv
- return srv
-}
-
-// ListenAndServe refer http.ListenAndServe
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func ListenAndServe(addr string, handler http.Handler) error {
- server := NewServer(addr, handler)
- return server.ListenAndServe()
-}
-
-// ListenAndServeTLS refer http.ListenAndServeTLS
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func ListenAndServeTLS(addr string, certFile string, keyFile string, handler http.Handler) error {
- server := NewServer(addr, handler)
- return server.ListenAndServeTLS(certFile, keyFile)
-}
diff --git a/grace/server.go b/grace/server.go
deleted file mode 100644
index cd659f82..00000000
--- a/grace/server.go
+++ /dev/null
@@ -1,362 +0,0 @@
-package grace
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "os"
- "os/exec"
- "os/signal"
- "strings"
- "syscall"
- "time"
-)
-
-// Server embedded http.Server
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-type Server struct {
- *http.Server
- ln net.Listener
- SignalHooks map[int]map[os.Signal][]func()
- sigChan chan os.Signal
- isChild bool
- state uint8
- Network string
- terminalChan chan error
-}
-
-// Serve accepts incoming connections on the Listener l,
-// creating a new service goroutine for each.
-// The service goroutines read requests and then call srv.Handler to reply to them.
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func (srv *Server) Serve() (err error) {
- srv.state = StateRunning
- defer func() { srv.state = StateTerminate }()
-
- // When Shutdown is called, Serve, ListenAndServe, and ListenAndServeTLS
- // immediately return ErrServerClosed. Make sure the program doesn't exit
- // and waits instead for Shutdown to return.
- if err = srv.Server.Serve(srv.ln); err != nil && err != http.ErrServerClosed {
- log.Println(syscall.Getpid(), "Server.Serve() error:", err)
- return err
- }
-
- log.Println(syscall.Getpid(), srv.ln.Addr(), "Listener closed.")
- // wait for Shutdown to return
- if shutdownErr := <-srv.terminalChan; shutdownErr != nil {
- return shutdownErr
- }
- return
-}
-
-// ListenAndServe listens on the TCP network address srv.Addr and then calls Serve
-// to handle requests on incoming connections. If srv.Addr is blank, ":http" is
-// used.
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func (srv *Server) ListenAndServe() (err error) {
- addr := srv.Addr
- if addr == "" {
- addr = ":http"
- }
-
- go srv.handleSignals()
-
- srv.ln, err = srv.getListener(addr)
- if err != nil {
- log.Println(err)
- return err
- }
-
- if srv.isChild {
- process, err := os.FindProcess(os.Getppid())
- if err != nil {
- log.Println(err)
- return err
- }
- err = process.Signal(syscall.SIGTERM)
- if err != nil {
- return err
- }
- }
-
- log.Println(os.Getpid(), srv.Addr)
- return srv.Serve()
-}
-
-// ListenAndServeTLS listens on the TCP network address srv.Addr and then calls
-// Serve to handle requests on incoming TLS connections.
-//
-// Filenames containing a certificate and matching private key for the server must
-// be provided. If the certificate is signed by a certificate authority, the
-// certFile should be the concatenation of the server's certificate followed by the
-// CA's certificate.
-//
-// If srv.Addr is blank, ":https" is used.
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func (srv *Server) ListenAndServeTLS(certFile, keyFile string) (err error) {
- addr := srv.Addr
- if addr == "" {
- addr = ":https"
- }
-
- if srv.TLSConfig == nil {
- srv.TLSConfig = &tls.Config{}
- }
- if srv.TLSConfig.NextProtos == nil {
- srv.TLSConfig.NextProtos = []string{"http/1.1"}
- }
-
- srv.TLSConfig.Certificates = make([]tls.Certificate, 1)
- srv.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return
- }
-
- go srv.handleSignals()
-
- ln, err := srv.getListener(addr)
- if err != nil {
- log.Println(err)
- return err
- }
- srv.ln = tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)
-
- if srv.isChild {
- process, err := os.FindProcess(os.Getppid())
- if err != nil {
- log.Println(err)
- return err
- }
- err = process.Signal(syscall.SIGTERM)
- if err != nil {
- return err
- }
- }
-
- log.Println(os.Getpid(), srv.Addr)
- return srv.Serve()
-}
-
-// ListenAndServeMutualTLS listens on the TCP network address srv.Addr and then calls
-// Serve to handle requests on incoming mutual TLS connections.
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func (srv *Server) ListenAndServeMutualTLS(certFile, keyFile, trustFile string) (err error) {
- addr := srv.Addr
- if addr == "" {
- addr = ":https"
- }
-
- if srv.TLSConfig == nil {
- srv.TLSConfig = &tls.Config{}
- }
- if srv.TLSConfig.NextProtos == nil {
- srv.TLSConfig.NextProtos = []string{"http/1.1"}
- }
-
- srv.TLSConfig.Certificates = make([]tls.Certificate, 1)
- srv.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return
- }
- srv.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
- pool := x509.NewCertPool()
- data, err := ioutil.ReadFile(trustFile)
- if err != nil {
- log.Println(err)
- return err
- }
- pool.AppendCertsFromPEM(data)
- srv.TLSConfig.ClientCAs = pool
- log.Println("Mutual HTTPS")
- go srv.handleSignals()
-
- ln, err := srv.getListener(addr)
- if err != nil {
- log.Println(err)
- return err
- }
- srv.ln = tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig)
-
- if srv.isChild {
- process, err := os.FindProcess(os.Getppid())
- if err != nil {
- log.Println(err)
- return err
- }
- err = process.Signal(syscall.SIGTERM)
- if err != nil {
- return err
- }
- }
-
- log.Println(os.Getpid(), srv.Addr)
- return srv.Serve()
-}
-
-// getListener either opens a new socket to listen on, or takes the acceptor socket
-// it got passed when restarted.
-func (srv *Server) getListener(laddr string) (l net.Listener, err error) {
- if srv.isChild {
- var ptrOffset uint
- if len(socketPtrOffsetMap) > 0 {
- ptrOffset = socketPtrOffsetMap[laddr]
- log.Println("laddr", laddr, "ptr offset", socketPtrOffsetMap[laddr])
- }
-
- f := os.NewFile(uintptr(3+ptrOffset), "")
- l, err = net.FileListener(f)
- if err != nil {
- err = fmt.Errorf("net.FileListener error: %v", err)
- return
- }
- } else {
- l, err = net.Listen(srv.Network, laddr)
- if err != nil {
- err = fmt.Errorf("net.Listen error: %v", err)
- return
- }
- }
- return
-}
-
-type tcpKeepAliveListener struct {
- *net.TCPListener
-}
-
-func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
- tc, err := ln.AcceptTCP()
- if err != nil {
- return
- }
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
- return tc, nil
-}
-
-// handleSignals listens for os Signals and calls any hooked in function that the
-// user had registered with the signal.
-func (srv *Server) handleSignals() {
- var sig os.Signal
-
- signal.Notify(
- srv.sigChan,
- hookableSignals...,
- )
-
- pid := syscall.Getpid()
- for {
- sig = <-srv.sigChan
- srv.signalHooks(PreSignal, sig)
- switch sig {
- case syscall.SIGHUP:
- log.Println(pid, "Received SIGHUP. forking.")
- err := srv.fork()
- if err != nil {
- log.Println("Fork err:", err)
- }
- case syscall.SIGINT:
- log.Println(pid, "Received SIGINT.")
- srv.shutdown()
- case syscall.SIGTERM:
- log.Println(pid, "Received SIGTERM.")
- srv.shutdown()
- default:
- log.Printf("Received %v: nothing i care about...\n", sig)
- }
- srv.signalHooks(PostSignal, sig)
- }
-}
-
-func (srv *Server) signalHooks(ppFlag int, sig os.Signal) {
- if _, notSet := srv.SignalHooks[ppFlag][sig]; !notSet {
- return
- }
- for _, f := range srv.SignalHooks[ppFlag][sig] {
- f()
- }
-}
-
-// shutdown closes the listener so that no new connections are accepted. it also
-// starts a goroutine that will serverTimeout (stop all running requests) the server
-// after DefaultTimeout.
-func (srv *Server) shutdown() {
- if srv.state != StateRunning {
- return
- }
-
- srv.state = StateShuttingDown
- log.Println(syscall.Getpid(), "Waiting for connections to finish...")
- ctx := context.Background()
- if DefaultTimeout >= 0 {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(context.Background(), DefaultTimeout)
- defer cancel()
- }
- srv.terminalChan <- srv.Server.Shutdown(ctx)
-}
-
-func (srv *Server) fork() (err error) {
- regLock.Lock()
- defer regLock.Unlock()
- if runningServersForked {
- return
- }
- runningServersForked = true
-
- var files = make([]*os.File, len(runningServers))
- var orderArgs = make([]string, len(runningServers))
- for _, srvPtr := range runningServers {
- f, _ := srvPtr.ln.(*net.TCPListener).File()
- files[socketPtrOffsetMap[srvPtr.Server.Addr]] = f
- orderArgs[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.Server.Addr
- }
-
- log.Println(files)
- path := os.Args[0]
- var args []string
- if len(os.Args) > 1 {
- for _, arg := range os.Args[1:] {
- if arg == "-graceful" {
- break
- }
- args = append(args, arg)
- }
- }
- args = append(args, "-graceful")
- if len(runningServers) > 1 {
- args = append(args, fmt.Sprintf(`-socketorder=%s`, strings.Join(orderArgs, ",")))
- log.Println(args)
- }
- cmd := exec.Command(path, args...)
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- cmd.ExtraFiles = files
- err = cmd.Start()
- if err != nil {
- log.Fatalf("Restart: Failed to launch, error: %v", err)
- }
-
- return
-}
-
-// RegisterSignalHook registers a function to be run PreSignal or PostSignal for a given signal.
-// Deprecated: using pkg/grace, we will delete this in v2.1.0
-func (srv *Server) RegisterSignalHook(ppFlag int, sig os.Signal, f func()) (err error) {
- if ppFlag != PreSignal && ppFlag != PostSignal {
- err = fmt.Errorf("Invalid ppFlag argument. Must be either grace.PreSignal or grace.PostSignal")
- return
- }
- for _, s := range hookableSignals {
- if s == sig {
- srv.SignalHooks[ppFlag][sig] = append(srv.SignalHooks[ppFlag][sig], f)
- return
- }
- }
- err = fmt.Errorf("Signal '%v' is not supported", sig)
- return
-}
diff --git a/hooks.go b/hooks.go
deleted file mode 100644
index 49c42d5a..00000000
--- a/hooks.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package beego
-
-import (
- "encoding/json"
- "mime"
- "net/http"
- "path/filepath"
-
- "github.com/astaxie/beego/context"
- "github.com/astaxie/beego/logs"
- "github.com/astaxie/beego/session"
-)
-
-// register MIME type with content type
-func registerMime() error {
- for k, v := range mimemaps {
- mime.AddExtensionType(k, v)
- }
- return nil
-}
-
-// register default error http handlers, 404,401,403,500 and 503.
-func registerDefaultErrorHandler() error {
- m := map[string]func(http.ResponseWriter, *http.Request){
- "401": unauthorized,
- "402": paymentRequired,
- "403": forbidden,
- "404": notFound,
- "405": methodNotAllowed,
- "500": internalServerError,
- "501": notImplemented,
- "502": badGateway,
- "503": serviceUnavailable,
- "504": gatewayTimeout,
- "417": invalidxsrf,
- "422": missingxsrf,
- "413": payloadTooLarge,
- }
- for e, h := range m {
- if _, ok := ErrorMaps[e]; !ok {
- ErrorHandler(e, h)
- }
- }
- return nil
-}
-
-func registerSession() error {
- if BConfig.WebConfig.Session.SessionOn {
- var err error
- sessionConfig := AppConfig.String("sessionConfig")
- conf := new(session.ManagerConfig)
- if sessionConfig == "" {
- conf.CookieName = BConfig.WebConfig.Session.SessionName
- conf.EnableSetCookie = BConfig.WebConfig.Session.SessionAutoSetCookie
- conf.Gclifetime = BConfig.WebConfig.Session.SessionGCMaxLifetime
- conf.Secure = BConfig.Listen.EnableHTTPS
- conf.CookieLifeTime = BConfig.WebConfig.Session.SessionCookieLifeTime
- conf.ProviderConfig = filepath.ToSlash(BConfig.WebConfig.Session.SessionProviderConfig)
- conf.DisableHTTPOnly = BConfig.WebConfig.Session.SessionDisableHTTPOnly
- conf.Domain = BConfig.WebConfig.Session.SessionDomain
- conf.EnableSidInHTTPHeader = BConfig.WebConfig.Session.SessionEnableSidInHTTPHeader
- conf.SessionNameInHTTPHeader = BConfig.WebConfig.Session.SessionNameInHTTPHeader
- conf.EnableSidInURLQuery = BConfig.WebConfig.Session.SessionEnableSidInURLQuery
- } else {
- if err = json.Unmarshal([]byte(sessionConfig), conf); err != nil {
- return err
- }
- }
- if GlobalSessions, err = session.NewManager(BConfig.WebConfig.Session.SessionProvider, conf); err != nil {
- return err
- }
- go GlobalSessions.GC()
- }
- return nil
-}
-
-func registerTemplate() error {
- defer lockViewPaths()
- if err := AddViewPath(BConfig.WebConfig.ViewsPath); err != nil {
- if BConfig.RunMode == DEV {
- logs.Warn(err)
- }
- return err
- }
- return nil
-}
-
-func registerAdmin() error {
- if BConfig.Listen.EnableAdmin {
- go beeAdminApp.Run()
- }
- return nil
-}
-
-func registerGzip() error {
- if BConfig.EnableGzip {
- context.InitGzip(
- AppConfig.DefaultInt("gzipMinLength", -1),
- AppConfig.DefaultInt("gzipCompressLevel", -1),
- AppConfig.DefaultStrings("includedMethods", []string{"GET"}),
- )
- }
- return nil
-}
diff --git a/httplib/README.md b/httplib/README.md
deleted file mode 100644
index 97df8e6b..00000000
--- a/httplib/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# httplib
-httplib is an libs help you to curl remote url.
-
-# How to use?
-
-## GET
-you can use Get to crawl data.
-
- import "github.com/astaxie/beego/httplib"
-
- str, err := httplib.Get("http://beego.me/").String()
- if err != nil {
- // error
- }
- fmt.Println(str)
-
-## POST
-POST data to remote url
-
- req := httplib.Post("http://beego.me/")
- req.Param("username","astaxie")
- req.Param("password","123456")
- str, err := req.String()
- if err != nil {
- // error
- }
- fmt.Println(str)
-
-## Set timeout
-
-The default timeout is `60` seconds, function prototype:
-
- SetTimeout(connectTimeout, readWriteTimeout time.Duration)
-
-Example:
-
- // GET
- httplib.Get("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second)
-
- // POST
- httplib.Post("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second)
-
-
-## Debug
-
-If you want to debug the request info, set the debug on
-
- httplib.Get("http://beego.me/").Debug(true)
-
-## Set HTTP Basic Auth
-
- str, err := Get("http://beego.me/").SetBasicAuth("user", "passwd").String()
- if err != nil {
- // error
- }
- fmt.Println(str)
-
-## Set HTTPS
-
-If request url is https, You can set the client support TSL:
-
- httplib.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true})
-
-More info about the `tls.Config` please visit http://golang.org/pkg/crypto/tls/#Config
-
-## Set HTTP Version
-
-some servers need to specify the protocol version of HTTP
-
- httplib.Get("http://beego.me/").SetProtocolVersion("HTTP/1.1")
-
-## Set Cookie
-
-some http request need setcookie. So set it like this:
-
- cookie := &http.Cookie{}
- cookie.Name = "username"
- cookie.Value = "astaxie"
- httplib.Get("http://beego.me/").SetCookie(cookie)
-
-## Upload file
-
-httplib support mutil file upload, use `req.PostFile()`
-
- req := httplib.Post("http://beego.me/")
- req.Param("username","astaxie")
- req.PostFile("uploadfile1", "httplib.pdf")
- str, err := req.String()
- if err != nil {
- // error
- }
- fmt.Println(str)
-
-
-See godoc for further documentation and examples.
-
-* [godoc.org/github.com/astaxie/beego/httplib](https://godoc.org/github.com/astaxie/beego/httplib)
diff --git a/httplib/httplib.go b/httplib/httplib.go
deleted file mode 100644
index 8ae95641..00000000
--- a/httplib/httplib.go
+++ /dev/null
@@ -1,697 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package httplib is used as http.Client
-// Usage:
-//
-// import "github.com/astaxie/beego/httplib"
-//
-// b := httplib.Post("http://beego.me/")
-// b.Param("username","astaxie")
-// b.Param("password","123456")
-// b.PostFile("uploadfile1", "httplib.pdf")
-// b.PostFile("uploadfile2", "httplib.txt")
-// str, err := b.String()
-// if err != nil {
-// t.Fatal(err)
-// }
-// fmt.Println(str)
-//
-// more docs http://beego.me/docs/module/httplib.md
-package httplib
-
-import (
- "bytes"
- "compress/gzip"
- "crypto/tls"
- "encoding/json"
- "encoding/xml"
- "io"
- "io/ioutil"
- "log"
- "mime/multipart"
- "net"
- "net/http"
- "net/http/cookiejar"
- "net/http/httputil"
- "net/url"
- "os"
- "path"
- "strings"
- "sync"
- "time"
-
- "gopkg.in/yaml.v2"
-)
-
-var defaultSetting = BeegoHTTPSettings{
- UserAgent: "beegoServer",
- ConnectTimeout: 60 * time.Second,
- ReadWriteTimeout: 60 * time.Second,
- Gzip: true,
- DumpBody: true,
-}
-
-var defaultCookieJar http.CookieJar
-var settingMutex sync.Mutex
-
-// createDefaultCookie creates a global cookiejar to store cookies.
-func createDefaultCookie() {
- settingMutex.Lock()
- defer settingMutex.Unlock()
- defaultCookieJar, _ = cookiejar.New(nil)
-}
-
-// SetDefaultSetting Overwrite default settings
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func SetDefaultSetting(setting BeegoHTTPSettings) {
- settingMutex.Lock()
- defer settingMutex.Unlock()
- defaultSetting = setting
-}
-
-// NewBeegoRequest return *BeegoHttpRequest with specific method
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func NewBeegoRequest(rawurl, method string) *BeegoHTTPRequest {
- var resp http.Response
- u, err := url.Parse(rawurl)
- if err != nil {
- log.Println("Httplib:", err)
- }
- req := http.Request{
- URL: u,
- Method: method,
- Header: make(http.Header),
- Proto: "HTTP/1.1",
- ProtoMajor: 1,
- ProtoMinor: 1,
- }
- return &BeegoHTTPRequest{
- url: rawurl,
- req: &req,
- params: map[string][]string{},
- files: map[string]string{},
- setting: defaultSetting,
- resp: &resp,
- }
-}
-
-// Get returns *BeegoHttpRequest with GET method.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func Get(url string) *BeegoHTTPRequest {
- return NewBeegoRequest(url, "GET")
-}
-
-// Post returns *BeegoHttpRequest with POST method.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func Post(url string) *BeegoHTTPRequest {
- return NewBeegoRequest(url, "POST")
-}
-
-// Put returns *BeegoHttpRequest with PUT method.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func Put(url string) *BeegoHTTPRequest {
- return NewBeegoRequest(url, "PUT")
-}
-
-// Delete returns *BeegoHttpRequest DELETE method.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func Delete(url string) *BeegoHTTPRequest {
- return NewBeegoRequest(url, "DELETE")
-}
-
-// Head returns *BeegoHttpRequest with HEAD method.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func Head(url string) *BeegoHTTPRequest {
- return NewBeegoRequest(url, "HEAD")
-}
-
-// BeegoHTTPSettings is the http.Client setting
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-type BeegoHTTPSettings struct {
- ShowDebug bool
- UserAgent string
- ConnectTimeout time.Duration
- ReadWriteTimeout time.Duration
- TLSClientConfig *tls.Config
- Proxy func(*http.Request) (*url.URL, error)
- Transport http.RoundTripper
- CheckRedirect func(req *http.Request, via []*http.Request) error
- EnableCookie bool
- Gzip bool
- DumpBody bool
- Retries int // if set to -1 means will retry forever
- RetryDelay time.Duration
-}
-
-// BeegoHTTPRequest provides more useful methods for requesting one url than http.Request.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-type BeegoHTTPRequest struct {
- url string
- req *http.Request
- params map[string][]string
- files map[string]string
- setting BeegoHTTPSettings
- resp *http.Response
- body []byte
- dump []byte
-}
-
-// GetRequest return the request object
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) GetRequest() *http.Request {
- return b.req
-}
-
-// Setting Change request settings
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Setting(setting BeegoHTTPSettings) *BeegoHTTPRequest {
- b.setting = setting
- return b
-}
-
-// SetBasicAuth sets the request's Authorization header to use HTTP Basic Authentication with the provided username and password.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetBasicAuth(username, password string) *BeegoHTTPRequest {
- b.req.SetBasicAuth(username, password)
- return b
-}
-
-// SetEnableCookie sets enable/disable cookiejar
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetEnableCookie(enable bool) *BeegoHTTPRequest {
- b.setting.EnableCookie = enable
- return b
-}
-
-// SetUserAgent sets User-Agent header field
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetUserAgent(useragent string) *BeegoHTTPRequest {
- b.setting.UserAgent = useragent
- return b
-}
-
-// Debug sets show debug or not when executing request.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Debug(isdebug bool) *BeegoHTTPRequest {
- b.setting.ShowDebug = isdebug
- return b
-}
-
-// Retries sets Retries times.
-// default is 0 means no retried.
-// -1 means retried forever.
-// others means retried times.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Retries(times int) *BeegoHTTPRequest {
- b.setting.Retries = times
- return b
-}
-
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) RetryDelay(delay time.Duration) *BeegoHTTPRequest {
- b.setting.RetryDelay = delay
- return b
-}
-
-// DumpBody setting whether need to Dump the Body.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) DumpBody(isdump bool) *BeegoHTTPRequest {
- b.setting.DumpBody = isdump
- return b
-}
-
-// DumpRequest return the DumpRequest
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) DumpRequest() []byte {
- return b.dump
-}
-
-// SetTimeout sets connect time out and read-write time out for BeegoRequest.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetTimeout(connectTimeout, readWriteTimeout time.Duration) *BeegoHTTPRequest {
- b.setting.ConnectTimeout = connectTimeout
- b.setting.ReadWriteTimeout = readWriteTimeout
- return b
-}
-
-// SetTLSClientConfig sets tls connection configurations if visiting https url.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetTLSClientConfig(config *tls.Config) *BeegoHTTPRequest {
- b.setting.TLSClientConfig = config
- return b
-}
-
-// Header add header item string in request.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Header(key, value string) *BeegoHTTPRequest {
- b.req.Header.Set(key, value)
- return b
-}
-
-// SetHost set the request host
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetHost(host string) *BeegoHTTPRequest {
- b.req.Host = host
- return b
-}
-
-// SetProtocolVersion Set the protocol version for incoming requests.
-// Client requests always use HTTP/1.1.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetProtocolVersion(vers string) *BeegoHTTPRequest {
- if len(vers) == 0 {
- vers = "HTTP/1.1"
- }
-
- major, minor, ok := http.ParseHTTPVersion(vers)
- if ok {
- b.req.Proto = vers
- b.req.ProtoMajor = major
- b.req.ProtoMinor = minor
- }
-
- return b
-}
-
-// SetCookie add cookie into request.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetCookie(cookie *http.Cookie) *BeegoHTTPRequest {
- b.req.Header.Add("Cookie", cookie.String())
- return b
-}
-
-// SetTransport set the setting transport
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetTransport(transport http.RoundTripper) *BeegoHTTPRequest {
- b.setting.Transport = transport
- return b
-}
-
-// SetProxy set the http proxy
-// example:
-//
-// func(req *http.Request) (*url.URL, error) {
-// u, _ := url.ParseRequestURI("http://127.0.0.1:8118")
-// return u, nil
-// }
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetProxy(proxy func(*http.Request) (*url.URL, error)) *BeegoHTTPRequest {
- b.setting.Proxy = proxy
- return b
-}
-
-// SetCheckRedirect specifies the policy for handling redirects.
-//
-// If CheckRedirect is nil, the Client uses its default policy,
-// which is to stop after 10 consecutive requests.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) SetCheckRedirect(redirect func(req *http.Request, via []*http.Request) error) *BeegoHTTPRequest {
- b.setting.CheckRedirect = redirect
- return b
-}
-
-// Param adds query param in to request.
-// params build query string as ?key1=value1&key2=value2...
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Param(key, value string) *BeegoHTTPRequest {
- if param, ok := b.params[key]; ok {
- b.params[key] = append(param, value)
- } else {
- b.params[key] = []string{value}
- }
- return b
-}
-
-// PostFile add a post file to the request
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) PostFile(formname, filename string) *BeegoHTTPRequest {
- b.files[formname] = filename
- return b
-}
-
-// Body adds request raw body.
-// it supports string and []byte.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Body(data interface{}) *BeegoHTTPRequest {
- switch t := data.(type) {
- case string:
- bf := bytes.NewBufferString(t)
- b.req.Body = ioutil.NopCloser(bf)
- b.req.ContentLength = int64(len(t))
- case []byte:
- bf := bytes.NewBuffer(t)
- b.req.Body = ioutil.NopCloser(bf)
- b.req.ContentLength = int64(len(t))
- }
- return b
-}
-
-// XMLBody adds request raw body encoding by XML.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) XMLBody(obj interface{}) (*BeegoHTTPRequest, error) {
- if b.req.Body == nil && obj != nil {
- byts, err := xml.Marshal(obj)
- if err != nil {
- return b, err
- }
- b.req.Body = ioutil.NopCloser(bytes.NewReader(byts))
- b.req.ContentLength = int64(len(byts))
- b.req.Header.Set("Content-Type", "application/xml")
- }
- return b, nil
-}
-
-// YAMLBody adds request raw body encoding by YAML.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) YAMLBody(obj interface{}) (*BeegoHTTPRequest, error) {
- if b.req.Body == nil && obj != nil {
- byts, err := yaml.Marshal(obj)
- if err != nil {
- return b, err
- }
- b.req.Body = ioutil.NopCloser(bytes.NewReader(byts))
- b.req.ContentLength = int64(len(byts))
- b.req.Header.Set("Content-Type", "application/x+yaml")
- }
- return b, nil
-}
-
-// JSONBody adds request raw body encoding by JSON.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) JSONBody(obj interface{}) (*BeegoHTTPRequest, error) {
- if b.req.Body == nil && obj != nil {
- byts, err := json.Marshal(obj)
- if err != nil {
- return b, err
- }
- b.req.Body = ioutil.NopCloser(bytes.NewReader(byts))
- b.req.ContentLength = int64(len(byts))
- b.req.Header.Set("Content-Type", "application/json")
- }
- return b, nil
-}
-
-func (b *BeegoHTTPRequest) buildURL(paramBody string) {
- // build GET url with query string
- if b.req.Method == "GET" && len(paramBody) > 0 {
- if strings.Contains(b.url, "?") {
- b.url += "&" + paramBody
- } else {
- b.url = b.url + "?" + paramBody
- }
- return
- }
-
- // build POST/PUT/PATCH url and body
- if (b.req.Method == "POST" || b.req.Method == "PUT" || b.req.Method == "PATCH" || b.req.Method == "DELETE") && b.req.Body == nil {
- // with files
- if len(b.files) > 0 {
- pr, pw := io.Pipe()
- bodyWriter := multipart.NewWriter(pw)
- go func() {
- for formname, filename := range b.files {
- fileWriter, err := bodyWriter.CreateFormFile(formname, filename)
- if err != nil {
- log.Println("Httplib:", err)
- }
- fh, err := os.Open(filename)
- if err != nil {
- log.Println("Httplib:", err)
- }
- //iocopy
- _, err = io.Copy(fileWriter, fh)
- fh.Close()
- if err != nil {
- log.Println("Httplib:", err)
- }
- }
- for k, v := range b.params {
- for _, vv := range v {
- bodyWriter.WriteField(k, vv)
- }
- }
- bodyWriter.Close()
- pw.Close()
- }()
- b.Header("Content-Type", bodyWriter.FormDataContentType())
- b.req.Body = ioutil.NopCloser(pr)
- b.Header("Transfer-Encoding", "chunked")
- return
- }
-
- // with params
- if len(paramBody) > 0 {
- b.Header("Content-Type", "application/x-www-form-urlencoded")
- b.Body(paramBody)
- }
- }
-}
-
-func (b *BeegoHTTPRequest) getResponse() (*http.Response, error) {
- if b.resp.StatusCode != 0 {
- return b.resp, nil
- }
- resp, err := b.DoRequest()
- if err != nil {
- return nil, err
- }
- b.resp = resp
- return resp, nil
-}
-
-// DoRequest will do the client.Do
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) DoRequest() (resp *http.Response, err error) {
- var paramBody string
- if len(b.params) > 0 {
- var buf bytes.Buffer
- for k, v := range b.params {
- for _, vv := range v {
- buf.WriteString(url.QueryEscape(k))
- buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(vv))
- buf.WriteByte('&')
- }
- }
- paramBody = buf.String()
- paramBody = paramBody[0 : len(paramBody)-1]
- }
-
- b.buildURL(paramBody)
- urlParsed, err := url.Parse(b.url)
- if err != nil {
- return nil, err
- }
-
- b.req.URL = urlParsed
-
- trans := b.setting.Transport
-
- if trans == nil {
- // create default transport
- trans = &http.Transport{
- TLSClientConfig: b.setting.TLSClientConfig,
- Proxy: b.setting.Proxy,
- Dial: TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout),
- MaxIdleConnsPerHost: 100,
- }
- } else {
- // if b.transport is *http.Transport then set the settings.
- if t, ok := trans.(*http.Transport); ok {
- if t.TLSClientConfig == nil {
- t.TLSClientConfig = b.setting.TLSClientConfig
- }
- if t.Proxy == nil {
- t.Proxy = b.setting.Proxy
- }
- if t.Dial == nil {
- t.Dial = TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout)
- }
- }
- }
-
- var jar http.CookieJar
- if b.setting.EnableCookie {
- if defaultCookieJar == nil {
- createDefaultCookie()
- }
- jar = defaultCookieJar
- }
-
- client := &http.Client{
- Transport: trans,
- Jar: jar,
- }
-
- if b.setting.UserAgent != "" && b.req.Header.Get("User-Agent") == "" {
- b.req.Header.Set("User-Agent", b.setting.UserAgent)
- }
-
- if b.setting.CheckRedirect != nil {
- client.CheckRedirect = b.setting.CheckRedirect
- }
-
- if b.setting.ShowDebug {
- dump, err := httputil.DumpRequest(b.req, b.setting.DumpBody)
- if err != nil {
- log.Println(err.Error())
- }
- b.dump = dump
- }
- // retries default value is 0, it will run once.
- // retries equal to -1, it will run forever until success
- // retries is setted, it will retries fixed times.
- // Sleeps for a 400ms inbetween calls to reduce spam
- for i := 0; b.setting.Retries == -1 || i <= b.setting.Retries; i++ {
- resp, err = client.Do(b.req)
- if err == nil {
- break
- }
- time.Sleep(b.setting.RetryDelay)
- }
- return resp, err
-}
-
-// String returns the body string in response.
-// it calls Response inner.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) String() (string, error) {
- data, err := b.Bytes()
- if err != nil {
- return "", err
- }
-
- return string(data), nil
-}
-
-// Bytes returns the body []byte in response.
-// it calls Response inner.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Bytes() ([]byte, error) {
- if b.body != nil {
- return b.body, nil
- }
- resp, err := b.getResponse()
- if err != nil {
- return nil, err
- }
- if resp.Body == nil {
- return nil, nil
- }
- defer resp.Body.Close()
- if b.setting.Gzip && resp.Header.Get("Content-Encoding") == "gzip" {
- reader, err := gzip.NewReader(resp.Body)
- if err != nil {
- return nil, err
- }
- b.body, err = ioutil.ReadAll(reader)
- return b.body, err
- }
- b.body, err = ioutil.ReadAll(resp.Body)
- return b.body, err
-}
-
-// ToFile saves the body data in response to one file.
-// it calls Response inner.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) ToFile(filename string) error {
- resp, err := b.getResponse()
- if err != nil {
- return err
- }
- if resp.Body == nil {
- return nil
- }
- defer resp.Body.Close()
- err = pathExistAndMkdir(filename)
- if err != nil {
- return err
- }
- f, err := os.Create(filename)
- if err != nil {
- return err
- }
- defer f.Close()
- _, err = io.Copy(f, resp.Body)
- return err
-}
-
-//Check that the file directory exists, there is no automatically created
-func pathExistAndMkdir(filename string) (err error) {
- filename = path.Dir(filename)
- _, err = os.Stat(filename)
- if err == nil {
- return nil
- }
- if os.IsNotExist(err) {
- err = os.MkdirAll(filename, os.ModePerm)
- if err == nil {
- return nil
- }
- }
- return err
-}
-
-// ToJSON returns the map that marshals from the body bytes as json in response .
-// it calls Response inner.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) ToJSON(v interface{}) error {
- data, err := b.Bytes()
- if err != nil {
- return err
- }
- return json.Unmarshal(data, v)
-}
-
-// ToXML returns the map that marshals from the body bytes as xml in response .
-// it calls Response inner.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) ToXML(v interface{}) error {
- data, err := b.Bytes()
- if err != nil {
- return err
- }
- return xml.Unmarshal(data, v)
-}
-
-// ToYAML returns the map that marshals from the body bytes as yaml in response .
-// it calls Response inner.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) ToYAML(v interface{}) error {
- data, err := b.Bytes()
- if err != nil {
- return err
- }
- return yaml.Unmarshal(data, v)
-}
-
-// Response executes request client gets response mannually.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func (b *BeegoHTTPRequest) Response() (*http.Response, error) {
- return b.getResponse()
-}
-
-// TimeoutDialer returns functions of connection dialer with timeout settings for http.Transport Dial field.
-// Deprecated: using pkg/httplib, we will delete this in v2.1.0
-func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) {
- return func(netw, addr string) (net.Conn, error) {
- conn, err := net.DialTimeout(netw, addr, cTimeout)
- if err != nil {
- return nil, err
- }
- err = conn.SetDeadline(time.Now().Add(rwTimeout))
- return conn, err
- }
-}
diff --git a/httplib/httplib_test.go b/httplib/httplib_test.go
deleted file mode 100644
index f6be8571..00000000
--- a/httplib/httplib_test.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httplib
-
-import (
- "errors"
- "io/ioutil"
- "net"
- "net/http"
- "os"
- "strings"
- "testing"
- "time"
-)
-
-func TestResponse(t *testing.T) {
- req := Get("http://httpbin.org/get")
- resp, err := req.Response()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(resp)
-}
-
-func TestDoRequest(t *testing.T) {
- req := Get("https://goolnk.com/33BD2j")
- retryAmount := 1
- req.Retries(1)
- req.RetryDelay(1400 * time.Millisecond)
- retryDelay := 1400 * time.Millisecond
-
- req.setting.CheckRedirect = func(redirectReq *http.Request, redirectVia []*http.Request) error {
- return errors.New("Redirect triggered")
- }
-
- startTime := time.Now().UnixNano() / int64(time.Millisecond)
-
- _, err := req.Response()
- if err == nil {
- t.Fatal("Response should have yielded an error")
- }
-
- endTime := time.Now().UnixNano() / int64(time.Millisecond)
- elapsedTime := endTime - startTime
- delayedTime := int64(retryAmount) * retryDelay.Milliseconds()
-
- if elapsedTime < delayedTime {
- t.Errorf("Not enough retries. Took %dms. Delay was meant to take %dms", elapsedTime, delayedTime)
- }
-
-}
-
-func TestGet(t *testing.T) {
- req := Get("http://httpbin.org/get")
- b, err := req.Bytes()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(b)
-
- s, err := req.String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(s)
-
- if string(b) != s {
- t.Fatal("request data not match")
- }
-}
-
-func TestSimplePost(t *testing.T) {
- v := "smallfish"
- req := Post("http://httpbin.org/post")
- req.Param("username", v)
-
- str, err := req.String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-
- n := strings.Index(str, v)
- if n == -1 {
- t.Fatal(v + " not found in post")
- }
-}
-
-//func TestPostFile(t *testing.T) {
-// v := "smallfish"
-// req := Post("http://httpbin.org/post")
-// req.Debug(true)
-// req.Param("username", v)
-// req.PostFile("uploadfile", "httplib_test.go")
-
-// str, err := req.String()
-// if err != nil {
-// t.Fatal(err)
-// }
-// t.Log(str)
-
-// n := strings.Index(str, v)
-// if n == -1 {
-// t.Fatal(v + " not found in post")
-// }
-//}
-
-func TestSimplePut(t *testing.T) {
- str, err := Put("http://httpbin.org/put").String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-}
-
-func TestSimpleDelete(t *testing.T) {
- str, err := Delete("http://httpbin.org/delete").String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-}
-
-func TestSimpleDeleteParam(t *testing.T) {
- str, err := Delete("http://httpbin.org/delete").Param("key", "val").String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-}
-
-func TestWithCookie(t *testing.T) {
- v := "smallfish"
- str, err := Get("http://httpbin.org/cookies/set?k1=" + v).SetEnableCookie(true).String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-
- str, err = Get("http://httpbin.org/cookies").SetEnableCookie(true).String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-
- n := strings.Index(str, v)
- if n == -1 {
- t.Fatal(v + " not found in cookie")
- }
-}
-
-func TestWithBasicAuth(t *testing.T) {
- str, err := Get("http://httpbin.org/basic-auth/user/passwd").SetBasicAuth("user", "passwd").String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
- n := strings.Index(str, "authenticated")
- if n == -1 {
- t.Fatal("authenticated not found in response")
- }
-}
-
-func TestWithUserAgent(t *testing.T) {
- v := "beego"
- str, err := Get("http://httpbin.org/headers").SetUserAgent(v).String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-
- n := strings.Index(str, v)
- if n == -1 {
- t.Fatal(v + " not found in user-agent")
- }
-}
-
-func TestWithSetting(t *testing.T) {
- v := "beego"
- var setting BeegoHTTPSettings
- setting.EnableCookie = true
- setting.UserAgent = v
- setting.Transport = &http.Transport{
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }).DialContext,
- MaxIdleConns: 50,
- IdleConnTimeout: 90 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- }
- setting.ReadWriteTimeout = 5 * time.Second
- SetDefaultSetting(setting)
-
- str, err := Get("http://httpbin.org/get").String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-
- n := strings.Index(str, v)
- if n == -1 {
- t.Fatal(v + " not found in user-agent")
- }
-}
-
-func TestToJson(t *testing.T) {
- req := Get("http://httpbin.org/ip")
- resp, err := req.Response()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(resp)
-
- // httpbin will return http remote addr
- type IP struct {
- Origin string `json:"origin"`
- }
- var ip IP
- err = req.ToJSON(&ip)
- if err != nil {
- t.Fatal(err)
- }
- t.Log(ip.Origin)
- ips := strings.Split(ip.Origin, ",")
- if len(ips) == 0 {
- t.Fatal("response is not valid ip")
- }
- for i := range ips {
- if net.ParseIP(strings.TrimSpace(ips[i])).To4() == nil {
- t.Fatal("response is not valid ip")
- }
- }
-
-}
-
-func TestToFile(t *testing.T) {
- f := "beego_testfile"
- req := Get("http://httpbin.org/ip")
- err := req.ToFile(f)
- if err != nil {
- t.Fatal(err)
- }
- defer os.Remove(f)
- b, err := ioutil.ReadFile(f)
- if n := strings.Index(string(b), "origin"); n == -1 {
- t.Fatal(err)
- }
-}
-
-func TestToFileDir(t *testing.T) {
- f := "./files/beego_testfile"
- req := Get("http://httpbin.org/ip")
- err := req.ToFile(f)
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll("./files")
- b, err := ioutil.ReadFile(f)
- if n := strings.Index(string(b), "origin"); n == -1 {
- t.Fatal(err)
- }
-}
-
-func TestHeader(t *testing.T) {
- req := Get("http://httpbin.org/headers")
- req.Header("User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36")
- str, err := req.String()
- if err != nil {
- t.Fatal(err)
- }
- t.Log(str)
-}
diff --git a/logs/README.md b/logs/README.md
deleted file mode 100644
index c05bcc04..00000000
--- a/logs/README.md
+++ /dev/null
@@ -1,72 +0,0 @@
-## logs
-logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` .
-
-
-## How to install?
-
- go get github.com/astaxie/beego/logs
-
-
-## What adapters are supported?
-
-As of now this logs support console, file,smtp and conn.
-
-
-## How to use it?
-
-First you must import it
-
-```golang
-import (
- "github.com/astaxie/beego/logs"
-)
-```
-
-Then init a Log (example with console adapter)
-
-```golang
-log := logs.NewLogger(10000)
-log.SetLogger("console", "")
-```
-
-> the first params stand for how many channel
-
-Use it like this:
-
-```golang
-log.Trace("trace")
-log.Info("info")
-log.Warn("warning")
-log.Debug("debug")
-log.Critical("critical")
-```
-
-## File adapter
-
-Configure file adapter like this:
-
-```golang
-log := NewLogger(10000)
-log.SetLogger("file", `{"filename":"test.log"}`)
-```
-
-## Conn adapter
-
-Configure like this:
-
-```golang
-log := NewLogger(1000)
-log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
-log.Info("info")
-```
-
-## Smtp adapter
-
-Configure like this:
-
-```golang
-log := NewLogger(10000)
-log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
-log.Critical("sendmail critical")
-time.Sleep(time.Second * 30)
-```
diff --git a/logs/accesslog.go b/logs/accesslog.go
deleted file mode 100644
index 9011b602..00000000
--- a/logs/accesslog.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "strings"
- "time"
-)
-
-const (
- apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s"
- apacheFormat = "APACHE_FORMAT"
- jsonFormat = "JSON_FORMAT"
-)
-
-// AccessLogRecord struct for holding access log data.
-type AccessLogRecord struct {
- RemoteAddr string `json:"remote_addr"`
- RequestTime time.Time `json:"request_time"`
- RequestMethod string `json:"request_method"`
- Request string `json:"request"`
- ServerProtocol string `json:"server_protocol"`
- Host string `json:"host"`
- Status int `json:"status"`
- BodyBytesSent int64 `json:"body_bytes_sent"`
- ElapsedTime time.Duration `json:"elapsed_time"`
- HTTPReferrer string `json:"http_referrer"`
- HTTPUserAgent string `json:"http_user_agent"`
- RemoteUser string `json:"remote_user"`
-}
-
-func (r *AccessLogRecord) json() ([]byte, error) {
- buffer := &bytes.Buffer{}
- encoder := json.NewEncoder(buffer)
- disableEscapeHTML(encoder)
-
- err := encoder.Encode(r)
- return buffer.Bytes(), err
-}
-
-func disableEscapeHTML(i interface{}) {
- if e, ok := i.(interface {
- SetEscapeHTML(bool)
- }); ok {
- e.SetEscapeHTML(false)
- }
-}
-
-// AccessLog - Format and print access log.
-func AccessLog(r *AccessLogRecord, format string) {
- var msg string
- switch format {
- case apacheFormat:
- timeFormatted := r.RequestTime.Format("02/Jan/2006 03:04:05")
- msg = fmt.Sprintf(apacheFormatPattern, r.RemoteAddr, timeFormatted, r.Request, r.Status, r.BodyBytesSent,
- r.ElapsedTime.Seconds(), r.HTTPReferrer, r.HTTPUserAgent)
- case jsonFormat:
- fallthrough
- default:
- jsonData, err := r.json()
- if err != nil {
- msg = fmt.Sprintf(`{"Error": "%s"}`, err)
- } else {
- msg = string(jsonData)
- }
- }
- beeLogger.writeMsg(levelLoggerImpl, strings.TrimSpace(msg))
-}
diff --git a/logs/alils/alils.go b/logs/alils/alils.go
deleted file mode 100644
index 867ff4cb..00000000
--- a/logs/alils/alils.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package alils
-
-import (
- "encoding/json"
- "strings"
- "sync"
- "time"
-
- "github.com/astaxie/beego/logs"
- "github.com/gogo/protobuf/proto"
-)
-
-const (
- // CacheSize set the flush size
- CacheSize int = 64
- // Delimiter define the topic delimiter
- Delimiter string = "##"
-)
-
-// Config is the Config for Ali Log
-type Config struct {
- Project string `json:"project"`
- Endpoint string `json:"endpoint"`
- KeyID string `json:"key_id"`
- KeySecret string `json:"key_secret"`
- LogStore string `json:"log_store"`
- Topics []string `json:"topics"`
- Source string `json:"source"`
- Level int `json:"level"`
- FlushWhen int `json:"flush_when"`
-}
-
-// aliLSWriter implements LoggerInterface.
-// it writes messages in keep-live tcp connection.
-type aliLSWriter struct {
- store *LogStore
- group []*LogGroup
- withMap bool
- groupMap map[string]*LogGroup
- lock *sync.Mutex
- Config
-}
-
-// NewAliLS create a new Logger
-func NewAliLS() logs.Logger {
- alils := new(aliLSWriter)
- alils.Level = logs.LevelTrace
- return alils
-}
-
-// Init parse config and init struct
-func (c *aliLSWriter) Init(jsonConfig string) (err error) {
-
- json.Unmarshal([]byte(jsonConfig), c)
-
- if c.FlushWhen > CacheSize {
- c.FlushWhen = CacheSize
- }
-
- prj := &LogProject{
- Name: c.Project,
- Endpoint: c.Endpoint,
- AccessKeyID: c.KeyID,
- AccessKeySecret: c.KeySecret,
- }
-
- c.store, err = prj.GetLogStore(c.LogStore)
- if err != nil {
- return err
- }
-
- // Create default Log Group
- c.group = append(c.group, &LogGroup{
- Topic: proto.String(""),
- Source: proto.String(c.Source),
- Logs: make([]*Log, 0, c.FlushWhen),
- })
-
- // Create other Log Group
- c.groupMap = make(map[string]*LogGroup)
- for _, topic := range c.Topics {
-
- lg := &LogGroup{
- Topic: proto.String(topic),
- Source: proto.String(c.Source),
- Logs: make([]*Log, 0, c.FlushWhen),
- }
-
- c.group = append(c.group, lg)
- c.groupMap[topic] = lg
- }
-
- if len(c.group) == 1 {
- c.withMap = false
- } else {
- c.withMap = true
- }
-
- c.lock = &sync.Mutex{}
-
- return nil
-}
-
-// WriteMsg write message in connection.
-// if connection is down, try to re-connect.
-func (c *aliLSWriter) WriteMsg(when time.Time, msg string, level int) (err error) {
-
- if level > c.Level {
- return nil
- }
-
- var topic string
- var content string
- var lg *LogGroup
- if c.withMap {
-
- // Topic,LogGroup
- strs := strings.SplitN(msg, Delimiter, 2)
- if len(strs) == 2 {
- pos := strings.LastIndex(strs[0], " ")
- topic = strs[0][pos+1 : len(strs[0])]
- content = strs[0][0:pos] + strs[1]
- lg = c.groupMap[topic]
- }
-
- // send to empty Topic
- if lg == nil {
- content = msg
- lg = c.group[0]
- }
- } else {
- content = msg
- lg = c.group[0]
- }
-
- c1 := &LogContent{
- Key: proto.String("msg"),
- Value: proto.String(content),
- }
-
- l := &Log{
- Time: proto.Uint32(uint32(when.Unix())),
- Contents: []*LogContent{
- c1,
- },
- }
-
- c.lock.Lock()
- lg.Logs = append(lg.Logs, l)
- c.lock.Unlock()
-
- if len(lg.Logs) >= c.FlushWhen {
- c.flush(lg)
- }
-
- return nil
-}
-
-// Flush implementing method. empty.
-func (c *aliLSWriter) Flush() {
-
- // flush all group
- for _, lg := range c.group {
- c.flush(lg)
- }
-}
-
-// Destroy destroy connection writer and close tcp listener.
-func (c *aliLSWriter) Destroy() {
-}
-
-func (c *aliLSWriter) flush(lg *LogGroup) {
-
- c.lock.Lock()
- defer c.lock.Unlock()
- err := c.store.PutLogs(lg)
- if err != nil {
- return
- }
-
- lg.Logs = make([]*Log, 0, c.FlushWhen)
-}
-
-func init() {
- logs.Register(logs.AdapterAliLS, NewAliLS)
-}
diff --git a/logs/alils/config.go b/logs/alils/config.go
deleted file mode 100755
index e8c24448..00000000
--- a/logs/alils/config.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package alils
-
-const (
- version = "0.5.0" // SDK version
- signatureMethod = "hmac-sha1" // Signature method
-
- // OffsetNewest stands for the log head offset, i.e. the offset that will be
- // assigned to the next message that will be produced to the shard.
- OffsetNewest = "end"
- // OffsetOldest stands for the oldest offset available on the logstore for a
- // shard.
- OffsetOldest = "begin"
-)
diff --git a/logs/alils/log.pb.go b/logs/alils/log.pb.go
deleted file mode 100755
index 601b0d78..00000000
--- a/logs/alils/log.pb.go
+++ /dev/null
@@ -1,1038 +0,0 @@
-package alils
-
-import (
- "fmt"
- "io"
- "math"
-
- "github.com/gogo/protobuf/proto"
- github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-var (
- // ErrInvalidLengthLog invalid proto
- ErrInvalidLengthLog = fmt.Errorf("proto: negative length found during unmarshaling")
- // ErrIntOverflowLog overflow
- ErrIntOverflowLog = fmt.Errorf("proto: integer overflow")
-)
-
-// Log define the proto Log
-type Log struct {
- Time *uint32 `protobuf:"varint,1,req,name=Time" json:"Time,omitempty"`
- Contents []*LogContent `protobuf:"bytes,2,rep,name=Contents" json:"Contents,omitempty"`
- XXXUnrecognized []byte `json:"-"`
-}
-
-// Reset the Log
-func (m *Log) Reset() { *m = Log{} }
-
-// String return the Compact Log
-func (m *Log) String() string { return proto.CompactTextString(m) }
-
-// ProtoMessage not implemented
-func (*Log) ProtoMessage() {}
-
-// GetTime return the Log's Time
-func (m *Log) GetTime() uint32 {
- if m != nil && m.Time != nil {
- return *m.Time
- }
- return 0
-}
-
-// GetContents return the Log's Contents
-func (m *Log) GetContents() []*LogContent {
- if m != nil {
- return m.Contents
- }
- return nil
-}
-
-// LogContent define the Log content struct
-type LogContent struct {
- Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"`
- XXXUnrecognized []byte `json:"-"`
-}
-
-// Reset LogContent
-func (m *LogContent) Reset() { *m = LogContent{} }
-
-// String return the compact text
-func (m *LogContent) String() string { return proto.CompactTextString(m) }
-
-// ProtoMessage not implemented
-func (*LogContent) ProtoMessage() {}
-
-// GetKey return the Key
-func (m *LogContent) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-// GetValue return the Value
-func (m *LogContent) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-// LogGroup define the logs struct
-type LogGroup struct {
- Logs []*Log `protobuf:"bytes,1,rep,name=Logs" json:"Logs,omitempty"`
- Reserved *string `protobuf:"bytes,2,opt,name=Reserved" json:"Reserved,omitempty"`
- Topic *string `protobuf:"bytes,3,opt,name=Topic" json:"Topic,omitempty"`
- Source *string `protobuf:"bytes,4,opt,name=Source" json:"Source,omitempty"`
- XXXUnrecognized []byte `json:"-"`
-}
-
-// Reset LogGroup
-func (m *LogGroup) Reset() { *m = LogGroup{} }
-
-// String return the compact text
-func (m *LogGroup) String() string { return proto.CompactTextString(m) }
-
-// ProtoMessage not implemented
-func (*LogGroup) ProtoMessage() {}
-
-// GetLogs return the loggroup logs
-func (m *LogGroup) GetLogs() []*Log {
- if m != nil {
- return m.Logs
- }
- return nil
-}
-
-// GetReserved return Reserved
-func (m *LogGroup) GetReserved() string {
- if m != nil && m.Reserved != nil {
- return *m.Reserved
- }
- return ""
-}
-
-// GetTopic return Topic
-func (m *LogGroup) GetTopic() string {
- if m != nil && m.Topic != nil {
- return *m.Topic
- }
- return ""
-}
-
-// GetSource return Source
-func (m *LogGroup) GetSource() string {
- if m != nil && m.Source != nil {
- return *m.Source
- }
- return ""
-}
-
-// LogGroupList define the LogGroups
-type LogGroupList struct {
- LogGroups []*LogGroup `protobuf:"bytes,1,rep,name=logGroups" json:"logGroups,omitempty"`
- XXXUnrecognized []byte `json:"-"`
-}
-
-// Reset LogGroupList
-func (m *LogGroupList) Reset() { *m = LogGroupList{} }
-
-// String return compact text
-func (m *LogGroupList) String() string { return proto.CompactTextString(m) }
-
-// ProtoMessage not implemented
-func (*LogGroupList) ProtoMessage() {}
-
-// GetLogGroups return the LogGroups
-func (m *LogGroupList) GetLogGroups() []*LogGroup {
- if m != nil {
- return m.LogGroups
- }
- return nil
-}
-
-// Marshal the logs to byte slice
-func (m *Log) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-// MarshalTo data
-func (m *Log) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Time == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Time")
- }
- data[i] = 0x8
- i++
- i = encodeVarintLog(data, i, uint64(*m.Time))
- if len(m.Contents) > 0 {
- for _, msg := range m.Contents {
- data[i] = 0x12
- i++
- i = encodeVarintLog(data, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(data[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.XXXUnrecognized != nil {
- i += copy(data[i:], m.XXXUnrecognized)
- }
- return i, nil
-}
-
-// Marshal LogContent
-func (m *LogContent) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-// MarshalTo logcontent to data
-func (m *LogContent) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.Key == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Key")
- }
- data[i] = 0xa
- i++
- i = encodeVarintLog(data, i, uint64(len(*m.Key)))
- i += copy(data[i:], *m.Key)
-
- if m.Value == nil {
- return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Value")
- }
- data[i] = 0x12
- i++
- i = encodeVarintLog(data, i, uint64(len(*m.Value)))
- i += copy(data[i:], *m.Value)
- if m.XXXUnrecognized != nil {
- i += copy(data[i:], m.XXXUnrecognized)
- }
- return i, nil
-}
-
-// Marshal LogGroup
-func (m *LogGroup) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-// MarshalTo LogGroup to data
-func (m *LogGroup) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.Logs) > 0 {
- for _, msg := range m.Logs {
- data[i] = 0xa
- i++
- i = encodeVarintLog(data, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(data[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.Reserved != nil {
- data[i] = 0x12
- i++
- i = encodeVarintLog(data, i, uint64(len(*m.Reserved)))
- i += copy(data[i:], *m.Reserved)
- }
- if m.Topic != nil {
- data[i] = 0x1a
- i++
- i = encodeVarintLog(data, i, uint64(len(*m.Topic)))
- i += copy(data[i:], *m.Topic)
- }
- if m.Source != nil {
- data[i] = 0x22
- i++
- i = encodeVarintLog(data, i, uint64(len(*m.Source)))
- i += copy(data[i:], *m.Source)
- }
- if m.XXXUnrecognized != nil {
- i += copy(data[i:], m.XXXUnrecognized)
- }
- return i, nil
-}
-
-// Marshal LogGroupList
-func (m *LogGroupList) Marshal() (data []byte, err error) {
- size := m.Size()
- data = make([]byte, size)
- n, err := m.MarshalTo(data)
- if err != nil {
- return nil, err
- }
- return data[:n], nil
-}
-
-// MarshalTo LogGroupList to data
-func (m *LogGroupList) MarshalTo(data []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if len(m.LogGroups) > 0 {
- for _, msg := range m.LogGroups {
- data[i] = 0xa
- i++
- i = encodeVarintLog(data, i, uint64(msg.Size()))
- n, err := msg.MarshalTo(data[i:])
- if err != nil {
- return 0, err
- }
- i += n
- }
- }
- if m.XXXUnrecognized != nil {
- i += copy(data[i:], m.XXXUnrecognized)
- }
- return i, nil
-}
-
-func encodeFixed64Log(data []byte, offset int, v uint64) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- data[offset+4] = uint8(v >> 32)
- data[offset+5] = uint8(v >> 40)
- data[offset+6] = uint8(v >> 48)
- data[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Log(data []byte, offset int, v uint32) int {
- data[offset] = uint8(v)
- data[offset+1] = uint8(v >> 8)
- data[offset+2] = uint8(v >> 16)
- data[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintLog(data []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- data[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- data[offset] = uint8(v)
- return offset + 1
-}
-
-// Size return the log's size
-func (m *Log) Size() (n int) {
- var l int
- _ = l
- if m.Time != nil {
- n += 1 + sovLog(uint64(*m.Time))
- }
- if len(m.Contents) > 0 {
- for _, e := range m.Contents {
- l = e.Size()
- n += 1 + l + sovLog(uint64(l))
- }
- }
- if m.XXXUnrecognized != nil {
- n += len(m.XXXUnrecognized)
- }
- return n
-}
-
-// Size return LogContent size based on Key and Value
-func (m *LogContent) Size() (n int) {
- var l int
- _ = l
- if m.Key != nil {
- l = len(*m.Key)
- n += 1 + l + sovLog(uint64(l))
- }
- if m.Value != nil {
- l = len(*m.Value)
- n += 1 + l + sovLog(uint64(l))
- }
- if m.XXXUnrecognized != nil {
- n += len(m.XXXUnrecognized)
- }
- return n
-}
-
-// Size return LogGroup size based on Logs
-func (m *LogGroup) Size() (n int) {
- var l int
- _ = l
- if len(m.Logs) > 0 {
- for _, e := range m.Logs {
- l = e.Size()
- n += 1 + l + sovLog(uint64(l))
- }
- }
- if m.Reserved != nil {
- l = len(*m.Reserved)
- n += 1 + l + sovLog(uint64(l))
- }
- if m.Topic != nil {
- l = len(*m.Topic)
- n += 1 + l + sovLog(uint64(l))
- }
- if m.Source != nil {
- l = len(*m.Source)
- n += 1 + l + sovLog(uint64(l))
- }
- if m.XXXUnrecognized != nil {
- n += len(m.XXXUnrecognized)
- }
- return n
-}
-
-// Size return LogGroupList size
-func (m *LogGroupList) Size() (n int) {
- var l int
- _ = l
- if len(m.LogGroups) > 0 {
- for _, e := range m.LogGroups {
- l = e.Size()
- n += 1 + l + sovLog(uint64(l))
- }
- }
- if m.XXXUnrecognized != nil {
- n += len(m.XXXUnrecognized)
- }
- return n
-}
-
-func sovLog(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozLog(x uint64) (n int) {
- return sovLog((x << 1) ^ (x >> 63))
-}
-
-// Unmarshal data to log
-func (m *Log) Unmarshal(data []byte) error {
- var hasFields [1]uint64
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Log: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Log: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
- }
- var v uint32
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- v |= (uint32(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Time = &v
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Contents", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Contents = append(m.Contents, &LogContent{})
- if err := m.Contents[len(m.Contents)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLog(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLog
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Time")
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
-// Unmarshal data to LogContent
-func (m *LogContent) Unmarshal(data []byte) error {
- var hasFields [1]uint64
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Content: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Content: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(data[iNdEx:postIndex])
- m.Key = &s
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000001)
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(data[iNdEx:postIndex])
- m.Value = &s
- iNdEx = postIndex
- hasFields[0] |= uint64(0x00000002)
- default:
- iNdEx = preIndex
- skippy, err := skipLog(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLog
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
- if hasFields[0]&uint64(0x00000001) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Key")
- }
- if hasFields[0]&uint64(0x00000002) == 0 {
- return github_com_gogo_protobuf_proto.NewRequiredNotSetError("Value")
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
-// Unmarshal data to LogGroup
-func (m *LogGroup) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LogGroup: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LogGroup: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Logs = append(m.Logs, &Log{})
- if err := m.Logs[len(m.Logs)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(data[iNdEx:postIndex])
- m.Reserved = &s
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(data[iNdEx:postIndex])
- m.Topic = &s
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- stringLen |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + intStringLen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- s := string(data[iNdEx:postIndex])
- m.Source = &s
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLog(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLog
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
-// Unmarshal data to LogGroupList
-func (m *LogGroupList) Unmarshal(data []byte) error {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LogGroupList: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LogGroupList: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogGroups", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLog
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLog
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.LogGroups = append(m.LogGroups, &LogGroup{})
- if err := m.LogGroups[len(m.LogGroups)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLog(data[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLog
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
-func skipLog(data []byte) (n int, err error) {
- l := len(data)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLog
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLog
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if data[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLog
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthLog
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLog
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := data[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipLog(data[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
diff --git a/logs/alils/log_config.go b/logs/alils/log_config.go
deleted file mode 100755
index e8564efb..00000000
--- a/logs/alils/log_config.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package alils
-
-// InputDetail define log detail
-type InputDetail struct {
- LogType string `json:"logType"`
- LogPath string `json:"logPath"`
- FilePattern string `json:"filePattern"`
- LocalStorage bool `json:"localStorage"`
- TimeFormat string `json:"timeFormat"`
- LogBeginRegex string `json:"logBeginRegex"`
- Regex string `json:"regex"`
- Keys []string `json:"key"`
- FilterKeys []string `json:"filterKey"`
- FilterRegex []string `json:"filterRegex"`
- TopicFormat string `json:"topicFormat"`
-}
-
-// OutputDetail define the output detail
-type OutputDetail struct {
- Endpoint string `json:"endpoint"`
- LogStoreName string `json:"logstoreName"`
-}
-
-// LogConfig define Log Config
-type LogConfig struct {
- Name string `json:"configName"`
- InputType string `json:"inputType"`
- InputDetail InputDetail `json:"inputDetail"`
- OutputType string `json:"outputType"`
- OutputDetail OutputDetail `json:"outputDetail"`
-
- CreateTime uint32
- LastModifyTime uint32
-
- project *LogProject
-}
-
-// GetAppliedMachineGroup returns applied machine group of this config.
-func (c *LogConfig) GetAppliedMachineGroup(confName string) (groupNames []string, err error) {
- groupNames, err = c.project.GetAppliedMachineGroups(c.Name)
- return
-}
diff --git a/logs/alils/log_project.go b/logs/alils/log_project.go
deleted file mode 100755
index 59db8cbf..00000000
--- a/logs/alils/log_project.go
+++ /dev/null
@@ -1,819 +0,0 @@
-/*
-Package alils implements the SDK(v0.5.0) of Simple Log Service(abbr. SLS).
-
-For more description about SLS, please read this article:
-http://gitlab.alibaba-inc.com/sls/doc.
-*/
-package alils
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httputil"
-)
-
-// Error message in SLS HTTP response.
-type errorMessage struct {
- Code string `json:"errorCode"`
- Message string `json:"errorMessage"`
-}
-
-// LogProject Define the Ali Project detail
-type LogProject struct {
- Name string // Project name
- Endpoint string // IP or hostname of SLS endpoint
- AccessKeyID string
- AccessKeySecret string
-}
-
-// NewLogProject creates a new SLS project.
-func NewLogProject(name, endpoint, AccessKeyID, accessKeySecret string) (p *LogProject, err error) {
- p = &LogProject{
- Name: name,
- Endpoint: endpoint,
- AccessKeyID: AccessKeyID,
- AccessKeySecret: accessKeySecret,
- }
- return p, nil
-}
-
-// ListLogStore returns all logstore names of project p.
-func (p *LogProject) ListLogStore() (storeNames []string, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/logstores")
- r, err := request(p, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to list logstore")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- type Body struct {
- Count int
- LogStores []string
- }
- body := &Body{}
-
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
-
- storeNames = body.LogStores
-
- return
-}
-
-// GetLogStore returns logstore according by logstore name.
-func (p *LogProject) GetLogStore(name string) (s *LogStore, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- r, err := request(p, "GET", "/logstores/"+name, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to get logstore")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- s = &LogStore{}
- err = json.Unmarshal(buf, s)
- if err != nil {
- return
- }
- s.project = p
- return
-}
-
-// CreateLogStore creates a new logstore in SLS,
-// where name is logstore name,
-// and ttl is time-to-live(in day) of logs,
-// and shardCnt is the number of shards.
-func (p *LogProject) CreateLogStore(name string, ttl, shardCnt int) (err error) {
-
- type Body struct {
- Name string `json:"logstoreName"`
- TTL int `json:"ttl"`
- ShardCount int `json:"shardCount"`
- }
-
- store := &Body{
- Name: name,
- TTL: ttl,
- ShardCount: shardCnt,
- }
-
- body, err := json.Marshal(store)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/json",
- "Accept-Encoding": "deflate", // TODO: support lz4
- }
-
- r, err := request(p, "POST", "/logstores", h, body)
- if err != nil {
- return
- }
-
- body, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to create logstore")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- return
-}
-
-// DeleteLogStore deletes a logstore according by logstore name.
-func (p *LogProject) DeleteLogStore(name string) (err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- r, err := request(p, "DELETE", "/logstores/"+name, h, nil)
- if err != nil {
- return
- }
-
- body, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to delete logstore")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
- return
-}
-
-// UpdateLogStore updates a logstore according by logstore name,
-// obviously we can't modify the logstore name itself.
-func (p *LogProject) UpdateLogStore(name string, ttl, shardCnt int) (err error) {
-
- type Body struct {
- Name string `json:"logstoreName"`
- TTL int `json:"ttl"`
- ShardCount int `json:"shardCount"`
- }
-
- store := &Body{
- Name: name,
- TTL: ttl,
- ShardCount: shardCnt,
- }
-
- body, err := json.Marshal(store)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/json",
- "Accept-Encoding": "deflate", // TODO: support lz4
- }
-
- r, err := request(p, "PUT", "/logstores", h, body)
- if err != nil {
- return
- }
-
- body, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to update logstore")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- return
-}
-
-// ListMachineGroup returns machine group name list and the total number of machine groups.
-// The offset starts from 0 and the size is the max number of machine groups could be returned.
-func (p *LogProject) ListMachineGroup(offset, size int) (m []string, total int, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- if size <= 0 {
- size = 500
- }
-
- uri := fmt.Sprintf("/machinegroups?offset=%v&size=%v", offset, size)
- r, err := request(p, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to list machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- type Body struct {
- MachineGroups []string
- Count int
- Total int
- }
- body := &Body{}
-
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
-
- m = body.MachineGroups
- total = body.Total
-
- return
-}
-
-// GetMachineGroup retruns machine group according by machine group name.
-func (p *LogProject) GetMachineGroup(name string) (m *MachineGroup, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- r, err := request(p, "GET", "/machinegroups/"+name, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to get machine group:%v", name)
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- m = &MachineGroup{}
- err = json.Unmarshal(buf, m)
- if err != nil {
- return
- }
- m.project = p
- return
-}
-
-// CreateMachineGroup creates a new machine group in SLS.
-func (p *LogProject) CreateMachineGroup(m *MachineGroup) (err error) {
-
- body, err := json.Marshal(m)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/json",
- "Accept-Encoding": "deflate", // TODO: support lz4
- }
-
- r, err := request(p, "POST", "/machinegroups", h, body)
- if err != nil {
- return
- }
-
- body, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to create machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- return
-}
-
-// UpdateMachineGroup updates a machine group.
-func (p *LogProject) UpdateMachineGroup(m *MachineGroup) (err error) {
-
- body, err := json.Marshal(m)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/json",
- "Accept-Encoding": "deflate", // TODO: support lz4
- }
-
- r, err := request(p, "PUT", "/machinegroups/"+m.Name, h, body)
- if err != nil {
- return
- }
-
- body, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to update machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- return
-}
-
-// DeleteMachineGroup deletes machine group according machine group name.
-func (p *LogProject) DeleteMachineGroup(name string) (err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- r, err := request(p, "DELETE", "/machinegroups/"+name, h, nil)
- if err != nil {
- return
- }
-
- body, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to delete machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
- return
-}
-
-// ListConfig returns config names list and the total number of configs.
-// The offset starts from 0 and the size is the max number of configs could be returned.
-func (p *LogProject) ListConfig(offset, size int) (cfgNames []string, total int, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- if size <= 0 {
- size = 100
- }
-
- uri := fmt.Sprintf("/configs?offset=%v&size=%v", offset, size)
- r, err := request(p, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to delete machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- type Body struct {
- Total int
- Configs []string
- }
- body := &Body{}
-
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
-
- cfgNames = body.Configs
- total = body.Total
- return
-}
-
-// GetConfig returns config according by config name.
-func (p *LogProject) GetConfig(name string) (c *LogConfig, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- r, err := request(p, "GET", "/configs/"+name, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to delete config")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- c = &LogConfig{}
- err = json.Unmarshal(buf, c)
- if err != nil {
- return
- }
- c.project = p
- return
-}
-
-// UpdateConfig updates a config.
-func (p *LogProject) UpdateConfig(c *LogConfig) (err error) {
-
- body, err := json.Marshal(c)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/json",
- "Accept-Encoding": "deflate", // TODO: support lz4
- }
-
- r, err := request(p, "PUT", "/configs/"+c.Name, h, body)
- if err != nil {
- return
- }
-
- body, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to update config")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- return
-}
-
-// CreateConfig creates a new config in SLS.
-func (p *LogProject) CreateConfig(c *LogConfig) (err error) {
-
- body, err := json.Marshal(c)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/json",
- "Accept-Encoding": "deflate", // TODO: support lz4
- }
-
- r, err := request(p, "POST", "/configs", h, body)
- if err != nil {
- return
- }
-
- body, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to update config")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- return
-}
-
-// DeleteConfig deletes a config according by config name.
-func (p *LogProject) DeleteConfig(name string) (err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- r, err := request(p, "DELETE", "/configs/"+name, h, nil)
- if err != nil {
- return
- }
-
- body, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(body, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to delete config")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
- return
-}
-
-// GetAppliedMachineGroups returns applied machine group names list according config name.
-func (p *LogProject) GetAppliedMachineGroups(confName string) (groupNames []string, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/configs/%v/machinegroups", confName)
- r, err := request(p, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to get applied machine groups")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- type Body struct {
- Count int
- Machinegroups []string
- }
-
- body := &Body{}
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
-
- groupNames = body.Machinegroups
- return
-}
-
-// GetAppliedConfigs returns applied config names list according machine group name groupName.
-func (p *LogProject) GetAppliedConfigs(groupName string) (confNames []string, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/machinegroups/%v/configs", groupName)
- r, err := request(p, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to applied configs")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- type Cfg struct {
- Count int `json:"count"`
- Configs []string `json:"configs"`
- }
-
- body := &Cfg{}
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
-
- confNames = body.Configs
- return
-}
-
-// ApplyConfigToMachineGroup applies config to machine group.
-func (p *LogProject) ApplyConfigToMachineGroup(confName, groupName string) (err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/machinegroups/%v/configs/%v", groupName, confName)
- r, err := request(p, "PUT", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to apply config to machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
- return
-}
-
-// RemoveConfigFromMachineGroup removes config from machine group.
-func (p *LogProject) RemoveConfigFromMachineGroup(confName, groupName string) (err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/machinegroups/%v/configs/%v", groupName, confName)
- r, err := request(p, "DELETE", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to remove config from machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Printf("%s\n", dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
- return
-}
diff --git a/logs/alils/log_store.go b/logs/alils/log_store.go
deleted file mode 100755
index fa502736..00000000
--- a/logs/alils/log_store.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package alils
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httputil"
- "strconv"
-
- lz4 "github.com/cloudflare/golz4"
- "github.com/gogo/protobuf/proto"
-)
-
-// LogStore Store the logs
-type LogStore struct {
- Name string `json:"logstoreName"`
- TTL int
- ShardCount int
-
- CreateTime uint32
- LastModifyTime uint32
-
- project *LogProject
-}
-
-// Shard define the Log Shard
-type Shard struct {
- ShardID int `json:"shardID"`
-}
-
-// ListShards returns shard id list of this logstore.
-func (s *LogStore) ListShards() (shardIDs []int, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/logstores/%v/shards", s.Name)
- r, err := request(s.project, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to list logstore")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Println(dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- var shards []*Shard
- err = json.Unmarshal(buf, &shards)
- if err != nil {
- return
- }
-
- for _, v := range shards {
- shardIDs = append(shardIDs, v.ShardID)
- }
- return
-}
-
-// PutLogs put logs into logstore.
-// The callers should transform user logs into LogGroup.
-func (s *LogStore) PutLogs(lg *LogGroup) (err error) {
- body, err := proto.Marshal(lg)
- if err != nil {
- return
- }
-
- // Compresse body with lz4
- out := make([]byte, lz4.CompressBound(body))
- n, err := lz4.Compress(body, out)
- if err != nil {
- return
- }
-
- h := map[string]string{
- "x-sls-compresstype": "lz4",
- "x-sls-bodyrawsize": fmt.Sprintf("%v", len(body)),
- "Content-Type": "application/x-protobuf",
- }
-
- uri := fmt.Sprintf("/logstores/%v", s.Name)
- r, err := request(s.project, "POST", uri, h, out[:n])
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to put logs")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Println(dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
- return
-}
-
-// GetCursor gets log cursor of one shard specified by shardID.
-// The from can be in three form: a) unix timestamp in seccond, b) "begin", c) "end".
-// For more detail please read: http://gitlab.alibaba-inc.com/sls/doc/blob/master/api/shard.md#logstore
-func (s *LogStore) GetCursor(shardID int, from string) (cursor string, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/logstores/%v/shards/%v?type=cursor&from=%v",
- s.Name, shardID, from)
-
- r, err := request(s.project, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to get cursor")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Println(dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- type Body struct {
- Cursor string
- }
- body := &Body{}
-
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
- cursor = body.Cursor
- return
-}
-
-// GetLogsBytes gets logs binary data from shard specified by shardID according cursor.
-// The logGroupMaxCount is the max number of logGroup could be returned.
-// The nextCursor is the next curosr can be used to read logs at next time.
-func (s *LogStore) GetLogsBytes(shardID int, cursor string,
- logGroupMaxCount int) (out []byte, nextCursor string, err error) {
-
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- "Accept": "application/x-protobuf",
- "Accept-Encoding": "lz4",
- }
-
- uri := fmt.Sprintf("/logstores/%v/shards/%v?type=logs&cursor=%v&count=%v",
- s.Name, shardID, cursor, logGroupMaxCount)
-
- r, err := request(s.project, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to get cursor")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Println(dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- v, ok := r.Header["X-Sls-Compresstype"]
- if !ok || len(v) == 0 {
- err = fmt.Errorf("can't find 'x-sls-compresstype' header")
- return
- }
- if v[0] != "lz4" {
- err = fmt.Errorf("unexpected compress type:%v", v[0])
- return
- }
-
- v, ok = r.Header["X-Sls-Cursor"]
- if !ok || len(v) == 0 {
- err = fmt.Errorf("can't find 'x-sls-cursor' header")
- return
- }
- nextCursor = v[0]
-
- v, ok = r.Header["X-Sls-Bodyrawsize"]
- if !ok || len(v) == 0 {
- err = fmt.Errorf("can't find 'x-sls-bodyrawsize' header")
- return
- }
- bodyRawSize, err := strconv.Atoi(v[0])
- if err != nil {
- return
- }
-
- out = make([]byte, bodyRawSize)
- err = lz4.Uncompress(buf, out)
- if err != nil {
- return
- }
-
- return
-}
-
-// LogsBytesDecode decodes logs binary data retruned by GetLogsBytes API
-func LogsBytesDecode(data []byte) (gl *LogGroupList, err error) {
-
- gl = &LogGroupList{}
- err = proto.Unmarshal(data, gl)
- if err != nil {
- return
- }
-
- return
-}
-
-// GetLogs gets logs from shard specified by shardID according cursor.
-// The logGroupMaxCount is the max number of logGroup could be returned.
-// The nextCursor is the next curosr can be used to read logs at next time.
-func (s *LogStore) GetLogs(shardID int, cursor string,
- logGroupMaxCount int) (gl *LogGroupList, nextCursor string, err error) {
-
- out, nextCursor, err := s.GetLogsBytes(shardID, cursor, logGroupMaxCount)
- if err != nil {
- return
- }
-
- gl, err = LogsBytesDecode(out)
- if err != nil {
- return
- }
-
- return
-}
diff --git a/logs/alils/machine_group.go b/logs/alils/machine_group.go
deleted file mode 100755
index b6c69a14..00000000
--- a/logs/alils/machine_group.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package alils
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/http/httputil"
-)
-
-// MachineGroupAttribute define the Attribute
-type MachineGroupAttribute struct {
- ExternalName string `json:"externalName"`
- TopicName string `json:"groupTopic"`
-}
-
-// MachineGroup define the machine Group
-type MachineGroup struct {
- Name string `json:"groupName"`
- Type string `json:"groupType"`
- MachineIDType string `json:"machineIdentifyType"`
- MachineIDList []string `json:"machineList"`
-
- Attribute MachineGroupAttribute `json:"groupAttribute"`
-
- CreateTime uint32
- LastModifyTime uint32
-
- project *LogProject
-}
-
-// Machine define the Machine
-type Machine struct {
- IP string
- UniqueID string `json:"machine-uniqueid"`
- UserdefinedID string `json:"userdefined-id"`
-}
-
-// MachineList define the Machine List
-type MachineList struct {
- Total int
- Machines []*Machine
-}
-
-// ListMachines returns machine list of this machine group.
-func (m *MachineGroup) ListMachines() (ms []*Machine, total int, err error) {
- h := map[string]string{
- "x-sls-bodyrawsize": "0",
- }
-
- uri := fmt.Sprintf("/machinegroups/%v/machines", m.Name)
- r, err := request(m.project, "GET", uri, h, nil)
- if err != nil {
- return
- }
-
- buf, err := ioutil.ReadAll(r.Body)
- if err != nil {
- return
- }
-
- if r.StatusCode != http.StatusOK {
- errMsg := &errorMessage{}
- err = json.Unmarshal(buf, errMsg)
- if err != nil {
- err = fmt.Errorf("failed to remove config from machine group")
- dump, _ := httputil.DumpResponse(r, true)
- fmt.Println(dump)
- return
- }
- err = fmt.Errorf("%v:%v", errMsg.Code, errMsg.Message)
- return
- }
-
- body := &MachineList{}
- err = json.Unmarshal(buf, body)
- if err != nil {
- return
- }
-
- ms = body.Machines
- total = body.Total
-
- return
-}
-
-// GetAppliedConfigs returns applied configs of this machine group.
-func (m *MachineGroup) GetAppliedConfigs() (confNames []string, err error) {
- confNames, err = m.project.GetAppliedConfigs(m.Name)
- return
-}
diff --git a/logs/alils/request.go b/logs/alils/request.go
deleted file mode 100755
index 50d9c43c..00000000
--- a/logs/alils/request.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package alils
-
-import (
- "bytes"
- "crypto/md5"
- "fmt"
- "net/http"
-)
-
-// request sends a request to SLS.
-func request(project *LogProject, method, uri string, headers map[string]string,
- body []byte) (resp *http.Response, err error) {
-
- // The caller should provide 'x-sls-bodyrawsize' header
- if _, ok := headers["x-sls-bodyrawsize"]; !ok {
- err = fmt.Errorf("Can't find 'x-sls-bodyrawsize' header")
- return
- }
-
- // SLS public request headers
- headers["Host"] = project.Name + "." + project.Endpoint
- headers["Date"] = nowRFC1123()
- headers["x-sls-apiversion"] = version
- headers["x-sls-signaturemethod"] = signatureMethod
- if body != nil {
- bodyMD5 := fmt.Sprintf("%X", md5.Sum(body))
- headers["Content-MD5"] = bodyMD5
-
- if _, ok := headers["Content-Type"]; !ok {
- err = fmt.Errorf("Can't find 'Content-Type' header")
- return
- }
- }
-
- // Calc Authorization
- // Authorization = "SLS :"
- digest, err := signature(project, method, uri, headers)
- if err != nil {
- return
- }
- auth := fmt.Sprintf("SLS %v:%v", project.AccessKeyID, digest)
- headers["Authorization"] = auth
-
- // Initialize http request
- reader := bytes.NewReader(body)
- urlStr := fmt.Sprintf("http://%v.%v%v", project.Name, project.Endpoint, uri)
- req, err := http.NewRequest(method, urlStr, reader)
- if err != nil {
- return
- }
- for k, v := range headers {
- req.Header.Add(k, v)
- }
-
- // Get ready to do request
- resp, err = http.DefaultClient.Do(req)
- if err != nil {
- return
- }
-
- return
-}
diff --git a/logs/alils/signature.go b/logs/alils/signature.go
deleted file mode 100755
index 2d611307..00000000
--- a/logs/alils/signature.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package alils
-
-import (
- "crypto/hmac"
- "crypto/sha1"
- "encoding/base64"
- "fmt"
- "net/url"
- "sort"
- "strings"
- "time"
-)
-
-// GMT location
-var gmtLoc = time.FixedZone("GMT", 0)
-
-// NowRFC1123 returns now time in RFC1123 format with GMT timezone,
-// eg. "Mon, 02 Jan 2006 15:04:05 GMT".
-func nowRFC1123() string {
- return time.Now().In(gmtLoc).Format(time.RFC1123)
-}
-
-// signature calculates a request's signature digest.
-func signature(project *LogProject, method, uri string,
- headers map[string]string) (digest string, err error) {
- var contentMD5, contentType, date, canoHeaders, canoResource string
- var slsHeaderKeys sort.StringSlice
-
- // SignString = VERB + "\n"
- // + CONTENT-MD5 + "\n"
- // + CONTENT-TYPE + "\n"
- // + DATE + "\n"
- // + CanonicalizedSLSHeaders + "\n"
- // + CanonicalizedResource
-
- if val, ok := headers["Content-MD5"]; ok {
- contentMD5 = val
- }
-
- if val, ok := headers["Content-Type"]; ok {
- contentType = val
- }
-
- date, ok := headers["Date"]
- if !ok {
- err = fmt.Errorf("Can't find 'Date' header")
- return
- }
-
- // Calc CanonicalizedSLSHeaders
- slsHeaders := make(map[string]string, len(headers))
- for k, v := range headers {
- l := strings.TrimSpace(strings.ToLower(k))
- if strings.HasPrefix(l, "x-sls-") {
- slsHeaders[l] = strings.TrimSpace(v)
- slsHeaderKeys = append(slsHeaderKeys, l)
- }
- }
-
- sort.Sort(slsHeaderKeys)
- for i, k := range slsHeaderKeys {
- canoHeaders += k + ":" + slsHeaders[k]
- if i+1 < len(slsHeaderKeys) {
- canoHeaders += "\n"
- }
- }
-
- // Calc CanonicalizedResource
- u, err := url.Parse(uri)
- if err != nil {
- return
- }
-
- canoResource += url.QueryEscape(u.Path)
- if u.RawQuery != "" {
- var keys sort.StringSlice
-
- vals := u.Query()
- for k := range vals {
- keys = append(keys, k)
- }
-
- sort.Sort(keys)
- canoResource += "?"
- for i, k := range keys {
- if i > 0 {
- canoResource += "&"
- }
-
- for _, v := range vals[k] {
- canoResource += k + "=" + v
- }
- }
- }
-
- signStr := method + "\n" +
- contentMD5 + "\n" +
- contentType + "\n" +
- date + "\n" +
- canoHeaders + "\n" +
- canoResource
-
- // Signature = base64(hmac-sha1(UTF8-Encoding-Of(SignString),AccessKeySecret))
- mac := hmac.New(sha1.New, []byte(project.AccessKeySecret))
- _, err = mac.Write([]byte(signStr))
- if err != nil {
- return
- }
- digest = base64.StdEncoding.EncodeToString(mac.Sum(nil))
- return
-}
diff --git a/logs/conn.go b/logs/conn.go
deleted file mode 100644
index 74c458ab..00000000
--- a/logs/conn.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "encoding/json"
- "io"
- "net"
- "time"
-)
-
-// connWriter implements LoggerInterface.
-// it writes messages in keep-live tcp connection.
-type connWriter struct {
- lg *logWriter
- innerWriter io.WriteCloser
- ReconnectOnMsg bool `json:"reconnectOnMsg"`
- Reconnect bool `json:"reconnect"`
- Net string `json:"net"`
- Addr string `json:"addr"`
- Level int `json:"level"`
-}
-
-// NewConn create new ConnWrite returning as LoggerInterface.
-func NewConn() Logger {
- conn := new(connWriter)
- conn.Level = LevelTrace
- return conn
-}
-
-// Init init connection writer with json config.
-// json config only need key "level".
-func (c *connWriter) Init(jsonConfig string) error {
- return json.Unmarshal([]byte(jsonConfig), c)
-}
-
-// WriteMsg write message in connection.
-// if connection is down, try to re-connect.
-func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error {
- if level > c.Level {
- return nil
- }
- if c.needToConnectOnMsg() {
- err := c.connect()
- if err != nil {
- return err
- }
- }
-
- if c.ReconnectOnMsg {
- defer c.innerWriter.Close()
- }
-
- _, err := c.lg.writeln(when, msg)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Flush implementing method. empty.
-func (c *connWriter) Flush() {
-
-}
-
-// Destroy destroy connection writer and close tcp listener.
-func (c *connWriter) Destroy() {
- if c.innerWriter != nil {
- c.innerWriter.Close()
- }
-}
-
-func (c *connWriter) connect() error {
- if c.innerWriter != nil {
- c.innerWriter.Close()
- c.innerWriter = nil
- }
-
- conn, err := net.Dial(c.Net, c.Addr)
- if err != nil {
- return err
- }
-
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- tcpConn.SetKeepAlive(true)
- }
-
- c.innerWriter = conn
- c.lg = newLogWriter(conn)
- return nil
-}
-
-func (c *connWriter) needToConnectOnMsg() bool {
- if c.Reconnect {
- return true
- }
-
- if c.innerWriter == nil {
- return true
- }
-
- return c.ReconnectOnMsg
-}
-
-func init() {
- Register(AdapterConn, NewConn)
-}
diff --git a/logs/conn_test.go b/logs/conn_test.go
deleted file mode 100644
index 7cfb4d2b..00000000
--- a/logs/conn_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "net"
- "os"
- "testing"
-)
-
-// ConnTCPListener takes a TCP listener and accepts n TCP connections
-// Returns connections using connChan
-func connTCPListener(t *testing.T, n int, ln net.Listener, connChan chan<- net.Conn) {
-
- // Listen and accept n incoming connections
- for i := 0; i < n; i++ {
- conn, err := ln.Accept()
- if err != nil {
- t.Log("Error accepting connection: ", err.Error())
- os.Exit(1)
- }
-
- // Send accepted connection to channel
- connChan <- conn
- }
- ln.Close()
- close(connChan)
-}
-
-func TestConn(t *testing.T) {
- log := NewLogger(1000)
- log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
- log.Informational("informational")
-}
-
-func TestReconnect(t *testing.T) {
- // Setup connection listener
- newConns := make(chan net.Conn)
- connNum := 2
- ln, err := net.Listen("tcp", ":6002")
- if err != nil {
- t.Log("Error listening:", err.Error())
- os.Exit(1)
- }
- go connTCPListener(t, connNum, ln, newConns)
-
- // Setup logger
- log := NewLogger(1000)
- log.SetPrefix("test")
- log.SetLogger(AdapterConn, `{"net":"tcp","reconnect":true,"level":6,"addr":":6002"}`)
- log.Informational("informational 1")
-
- // Refuse first connection
- first := <-newConns
- first.Close()
-
- // Send another log after conn closed
- log.Informational("informational 2")
-
- // Check if there was a second connection attempt
- // close this because we moved the codes to pkg/logs
- // select {
- // case second := <-newConns:
- // second.Close()
- // default:
- // t.Error("Did not reconnect")
- // }
-}
diff --git a/logs/console.go b/logs/console.go
deleted file mode 100644
index 3dcaee1d..00000000
--- a/logs/console.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "encoding/json"
- "os"
- "strings"
- "time"
-
- "github.com/shiena/ansicolor"
-)
-
-// brush is a color join function
-type brush func(string) string
-
-// newBrush return a fix color Brush
-func newBrush(color string) brush {
- pre := "\033["
- reset := "\033[0m"
- return func(text string) string {
- return pre + color + "m" + text + reset
- }
-}
-
-var colors = []brush{
- newBrush("1;37"), // Emergency white
- newBrush("1;36"), // Alert cyan
- newBrush("1;35"), // Critical magenta
- newBrush("1;31"), // Error red
- newBrush("1;33"), // Warning yellow
- newBrush("1;32"), // Notice green
- newBrush("1;34"), // Informational blue
- newBrush("1;44"), // Debug Background blue
-}
-
-// consoleWriter implements LoggerInterface and writes messages to terminal.
-type consoleWriter struct {
- lg *logWriter
- Level int `json:"level"`
- Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color
-}
-
-// NewConsole create ConsoleWriter returning as LoggerInterface.
-func NewConsole() Logger {
- cw := &consoleWriter{
- lg: newLogWriter(ansicolor.NewAnsiColorWriter(os.Stdout)),
- Level: LevelDebug,
- Colorful: true,
- }
- return cw
-}
-
-// Init init console logger.
-// jsonConfig like '{"level":LevelTrace}'.
-func (c *consoleWriter) Init(jsonConfig string) error {
- if len(jsonConfig) == 0 {
- return nil
- }
- return json.Unmarshal([]byte(jsonConfig), c)
-}
-
-// WriteMsg write message in console.
-func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error {
- if level > c.Level {
- return nil
- }
- if c.Colorful {
- msg = strings.Replace(msg, levelPrefix[level], colors[level](levelPrefix[level]), 1)
- }
- c.lg.writeln(when, msg)
- return nil
-}
-
-// Destroy implementing method. empty.
-func (c *consoleWriter) Destroy() {
-
-}
-
-// Flush implementing method. empty.
-func (c *consoleWriter) Flush() {
-
-}
-
-func init() {
- Register(AdapterConsole, NewConsole)
-}
diff --git a/logs/console_test.go b/logs/console_test.go
deleted file mode 100644
index 4bc45f57..00000000
--- a/logs/console_test.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "testing"
- "time"
-)
-
-// Try each log level in decreasing order of priority.
-func testConsoleCalls(bl *BeeLogger) {
- bl.Emergency("emergency")
- bl.Alert("alert")
- bl.Critical("critical")
- bl.Error("error")
- bl.Warning("warning")
- bl.Notice("notice")
- bl.Informational("informational")
- bl.Debug("debug")
-}
-
-// Test console logging by visually comparing the lines being output with and
-// without a log level specification.
-func TestConsole(t *testing.T) {
- log1 := NewLogger(10000)
- log1.EnableFuncCallDepth(true)
- log1.SetLogger("console", "")
- testConsoleCalls(log1)
-
- log2 := NewLogger(100)
- log2.SetLogger("console", `{"level":3}`)
- testConsoleCalls(log2)
-}
-
-// Test console without color
-func TestConsoleNoColor(t *testing.T) {
- log := NewLogger(100)
- log.SetLogger("console", `{"color":false}`)
- testConsoleCalls(log)
-}
-
-// Test console async
-func TestConsoleAsync(t *testing.T) {
- log := NewLogger(100)
- log.SetLogger("console")
- log.Async()
- //log.Close()
- testConsoleCalls(log)
- for len(log.msgChan) != 0 {
- time.Sleep(1 * time.Millisecond)
- }
-}
diff --git a/logs/es/es.go b/logs/es/es.go
deleted file mode 100644
index 2b7b1710..00000000
--- a/logs/es/es.go
+++ /dev/null
@@ -1,102 +0,0 @@
-package es
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/url"
- "strings"
- "time"
-
- "github.com/elastic/go-elasticsearch/v6"
- "github.com/elastic/go-elasticsearch/v6/esapi"
-
- "github.com/astaxie/beego/logs"
-)
-
-// NewES return a LoggerInterface
-func NewES() logs.Logger {
- cw := &esLogger{
- Level: logs.LevelDebug,
- }
- return cw
-}
-
-// esLogger will log msg into ES
-// before you using this implementation,
-// please import this package
-// usually means that you can import this package in your main package
-// for example, anonymous:
-// import _ "github.com/astaxie/beego/logs/es"
-type esLogger struct {
- *elasticsearch.Client
- DSN string `json:"dsn"`
- Level int `json:"level"`
-}
-
-// {"dsn":"http://localhost:9200/","level":1}
-func (el *esLogger) Init(jsonconfig string) error {
- err := json.Unmarshal([]byte(jsonconfig), el)
- if err != nil {
- return err
- }
- if el.DSN == "" {
- return errors.New("empty dsn")
- } else if u, err := url.Parse(el.DSN); err != nil {
- return err
- } else if u.Path == "" {
- return errors.New("missing prefix")
- } else {
- conn, err := elasticsearch.NewClient(elasticsearch.Config{
- Addresses: []string{el.DSN},
- })
- if err != nil {
- return err
- }
- el.Client = conn
- }
- return nil
-}
-
-// WriteMsg will write the msg and level into es
-func (el *esLogger) WriteMsg(when time.Time, msg string, level int) error {
- if level > el.Level {
- return nil
- }
-
- idx := LogDocument{
- Timestamp: when.Format(time.RFC3339),
- Msg: msg,
- }
-
- body, err := json.Marshal(idx)
- if err != nil {
- return err
- }
- req := esapi.IndexRequest{
- Index: fmt.Sprintf("%04d.%02d.%02d", when.Year(), when.Month(), when.Day()),
- DocumentType: "logs",
- Body: strings.NewReader(string(body)),
- }
- _, err = req.Do(context.Background(), el.Client)
- return err
-}
-
-// Destroy is a empty method
-func (el *esLogger) Destroy() {
-}
-
-// Flush is a empty method
-func (el *esLogger) Flush() {
-
-}
-
-type LogDocument struct {
- Timestamp string `json:"timestamp"`
- Msg string `json:"msg"`
-}
-
-func init() {
- logs.Register(logs.AdapterEs, NewES)
-}
diff --git a/logs/file.go b/logs/file.go
deleted file mode 100644
index 40a3572a..00000000
--- a/logs/file.go
+++ /dev/null
@@ -1,409 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// fileLogWriter implements LoggerInterface.
-// It writes messages by lines limit, file size limit, or time frequency.
-type fileLogWriter struct {
- sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize
- // The opened file
- Filename string `json:"filename"`
- fileWriter *os.File
-
- // Rotate at line
- MaxLines int `json:"maxlines"`
- maxLinesCurLines int
-
- MaxFiles int `json:"maxfiles"`
- MaxFilesCurFiles int
-
- // Rotate at size
- MaxSize int `json:"maxsize"`
- maxSizeCurSize int
-
- // Rotate daily
- Daily bool `json:"daily"`
- MaxDays int64 `json:"maxdays"`
- dailyOpenDate int
- dailyOpenTime time.Time
-
- // Rotate hourly
- Hourly bool `json:"hourly"`
- MaxHours int64 `json:"maxhours"`
- hourlyOpenDate int
- hourlyOpenTime time.Time
-
- Rotate bool `json:"rotate"`
-
- Level int `json:"level"`
-
- Perm string `json:"perm"`
-
- RotatePerm string `json:"rotateperm"`
-
- fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix
-}
-
-// newFileWriter create a FileLogWriter returning as LoggerInterface.
-func newFileWriter() Logger {
- w := &fileLogWriter{
- Daily: true,
- MaxDays: 7,
- Hourly: false,
- MaxHours: 168,
- Rotate: true,
- RotatePerm: "0440",
- Level: LevelTrace,
- Perm: "0660",
- MaxLines: 10000000,
- MaxFiles: 999,
- MaxSize: 1 << 28,
- }
- return w
-}
-
-// Init file logger with json config.
-// jsonConfig like:
-// {
-// "filename":"logs/beego.log",
-// "maxLines":10000,
-// "maxsize":1024,
-// "daily":true,
-// "maxDays":15,
-// "rotate":true,
-// "perm":"0600"
-// }
-func (w *fileLogWriter) Init(jsonConfig string) error {
- err := json.Unmarshal([]byte(jsonConfig), w)
- if err != nil {
- return err
- }
- if len(w.Filename) == 0 {
- return errors.New("jsonconfig must have filename")
- }
- w.suffix = filepath.Ext(w.Filename)
- w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix)
- if w.suffix == "" {
- w.suffix = ".log"
- }
- err = w.startLogger()
- return err
-}
-
-// start file logger. create log file and set to locker-inside file writer.
-func (w *fileLogWriter) startLogger() error {
- file, err := w.createLogFile()
- if err != nil {
- return err
- }
- if w.fileWriter != nil {
- w.fileWriter.Close()
- }
- w.fileWriter = file
- return w.initFd()
-}
-
-func (w *fileLogWriter) needRotateDaily(size int, day int) bool {
- return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
- (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
- (w.Daily && day != w.dailyOpenDate)
-}
-
-func (w *fileLogWriter) needRotateHourly(size int, hour int) bool {
- return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
- (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
- (w.Hourly && hour != w.hourlyOpenDate)
-
-}
-
-// WriteMsg write logger message into file.
-func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
- if level > w.Level {
- return nil
- }
- hd, d, h := formatTimeHeader(when)
- msg = string(hd) + msg + "\n"
- if w.Rotate {
- w.RLock()
- if w.needRotateHourly(len(msg), h) {
- w.RUnlock()
- w.Lock()
- if w.needRotateHourly(len(msg), h) {
- if err := w.doRotate(when); err != nil {
- fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
- }
- }
- w.Unlock()
- } else if w.needRotateDaily(len(msg), d) {
- w.RUnlock()
- w.Lock()
- if w.needRotateDaily(len(msg), d) {
- if err := w.doRotate(when); err != nil {
- fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
- }
- }
- w.Unlock()
- } else {
- w.RUnlock()
- }
- }
-
- w.Lock()
- _, err := w.fileWriter.Write([]byte(msg))
- if err == nil {
- w.maxLinesCurLines++
- w.maxSizeCurSize += len(msg)
- }
- w.Unlock()
- return err
-}
-
-func (w *fileLogWriter) createLogFile() (*os.File, error) {
- // Open the log file
- perm, err := strconv.ParseInt(w.Perm, 8, 64)
- if err != nil {
- return nil, err
- }
-
- filepath := path.Dir(w.Filename)
- os.MkdirAll(filepath, os.FileMode(perm))
-
- fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm))
- if err == nil {
- // Make sure file perm is user set perm cause of `os.OpenFile` will obey umask
- os.Chmod(w.Filename, os.FileMode(perm))
- }
- return fd, err
-}
-
-func (w *fileLogWriter) initFd() error {
- fd := w.fileWriter
- fInfo, err := fd.Stat()
- if err != nil {
- return fmt.Errorf("get stat err: %s", err)
- }
- w.maxSizeCurSize = int(fInfo.Size())
- w.dailyOpenTime = time.Now()
- w.dailyOpenDate = w.dailyOpenTime.Day()
- w.hourlyOpenTime = time.Now()
- w.hourlyOpenDate = w.hourlyOpenTime.Hour()
- w.maxLinesCurLines = 0
- if w.Hourly {
- go w.hourlyRotate(w.hourlyOpenTime)
- } else if w.Daily {
- go w.dailyRotate(w.dailyOpenTime)
- }
- if fInfo.Size() > 0 && w.MaxLines > 0 {
- count, err := w.lines()
- if err != nil {
- return err
- }
- w.maxLinesCurLines = count
- }
- return nil
-}
-
-func (w *fileLogWriter) dailyRotate(openTime time.Time) {
- y, m, d := openTime.Add(24 * time.Hour).Date()
- nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location())
- tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100))
- <-tm.C
- w.Lock()
- if w.needRotateDaily(0, time.Now().Day()) {
- if err := w.doRotate(time.Now()); err != nil {
- fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
- }
- }
- w.Unlock()
-}
-
-func (w *fileLogWriter) hourlyRotate(openTime time.Time) {
- y, m, d := openTime.Add(1 * time.Hour).Date()
- h, _, _ := openTime.Add(1 * time.Hour).Clock()
- nextHour := time.Date(y, m, d, h, 0, 0, 0, openTime.Location())
- tm := time.NewTimer(time.Duration(nextHour.UnixNano() - openTime.UnixNano() + 100))
- <-tm.C
- w.Lock()
- if w.needRotateHourly(0, time.Now().Hour()) {
- if err := w.doRotate(time.Now()); err != nil {
- fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
- }
- }
- w.Unlock()
-}
-
-func (w *fileLogWriter) lines() (int, error) {
- fd, err := os.Open(w.Filename)
- if err != nil {
- return 0, err
- }
- defer fd.Close()
-
- buf := make([]byte, 32768) // 32k
- count := 0
- lineSep := []byte{'\n'}
-
- for {
- c, err := fd.Read(buf)
- if err != nil && err != io.EOF {
- return count, err
- }
-
- count += bytes.Count(buf[:c], lineSep)
-
- if err == io.EOF {
- break
- }
- }
-
- return count, nil
-}
-
-// DoRotate means it need to write file in new file.
-// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size)
-func (w *fileLogWriter) doRotate(logTime time.Time) error {
- // file exists
- // Find the next available number
- num := w.MaxFilesCurFiles + 1
- fName := ""
- format := ""
- var openTime time.Time
- rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64)
- if err != nil {
- return err
- }
-
- _, err = os.Lstat(w.Filename)
- if err != nil {
- //even if the file is not exist or other ,we should RESTART the logger
- goto RESTART_LOGGER
- }
-
- if w.Hourly {
- format = "2006010215"
- openTime = w.hourlyOpenTime
- } else if w.Daily {
- format = "2006-01-02"
- openTime = w.dailyOpenTime
- }
-
- // only when one of them be setted, then the file would be splited
- if w.MaxLines > 0 || w.MaxSize > 0 {
- for ; err == nil && num <= w.MaxFiles; num++ {
- fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format(format), num, w.suffix)
- _, err = os.Lstat(fName)
- }
- } else {
- fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", openTime.Format(format), num, w.suffix)
- _, err = os.Lstat(fName)
- w.MaxFilesCurFiles = num
- }
-
- // return error if the last file checked still existed
- if err == nil {
- return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename)
- }
-
- // close fileWriter before rename
- w.fileWriter.Close()
-
- // Rename the file to its new found name
- // even if occurs error,we MUST guarantee to restart new logger
- err = os.Rename(w.Filename, fName)
- if err != nil {
- goto RESTART_LOGGER
- }
-
- err = os.Chmod(fName, os.FileMode(rotatePerm))
-
-RESTART_LOGGER:
-
- startLoggerErr := w.startLogger()
- go w.deleteOldLog()
-
- if startLoggerErr != nil {
- return fmt.Errorf("Rotate StartLogger: %s", startLoggerErr)
- }
- if err != nil {
- return fmt.Errorf("Rotate: %s", err)
- }
- return nil
-}
-
-func (w *fileLogWriter) deleteOldLog() {
- dir := filepath.Dir(w.Filename)
- absolutePath, err := filepath.EvalSymlinks(w.Filename)
- if err == nil {
- dir = filepath.Dir(absolutePath)
- }
- filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
- defer func() {
- if r := recover(); r != nil {
- fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r)
- }
- }()
-
- if info == nil {
- return
- }
- if w.Hourly {
- if !info.IsDir() && info.ModTime().Add(1*time.Hour*time.Duration(w.MaxHours)).Before(time.Now()) {
- if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
- strings.HasSuffix(filepath.Base(path), w.suffix) {
- os.Remove(path)
- }
- }
- } else if w.Daily {
- if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) {
- if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
- strings.HasSuffix(filepath.Base(path), w.suffix) {
- os.Remove(path)
- }
- }
- }
- return
- })
-}
-
-// Destroy close the file description, close file writer.
-func (w *fileLogWriter) Destroy() {
- w.fileWriter.Close()
-}
-
-// Flush flush file logger.
-// there are no buffering messages in file logger in memory.
-// flush file means sync file from disk.
-func (w *fileLogWriter) Flush() {
- w.fileWriter.Sync()
-}
-
-func init() {
- Register(AdapterFile, newFileWriter)
-}
diff --git a/logs/file_test.go b/logs/file_test.go
deleted file mode 100644
index 385eac43..00000000
--- a/logs/file_test.go
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "bufio"
- "fmt"
- "io/ioutil"
- "os"
- "strconv"
- "testing"
- "time"
-)
-
-func TestFilePerm(t *testing.T) {
- log := NewLogger(10000)
- // use 0666 as test perm cause the default umask is 022
- log.SetLogger("file", `{"filename":"test.log", "perm": "0666"}`)
- log.Debug("debug")
- log.Informational("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- file, err := os.Stat("test.log")
- if err != nil {
- t.Fatal(err)
- }
- if file.Mode() != 0666 {
- t.Fatal("unexpected log file permission")
- }
- os.Remove("test.log")
-}
-
-func TestFile1(t *testing.T) {
- log := NewLogger(10000)
- log.SetLogger("file", `{"filename":"test.log"}`)
- log.Debug("debug")
- log.Informational("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- f, err := os.Open("test.log")
- if err != nil {
- t.Fatal(err)
- }
- b := bufio.NewReader(f)
- lineNum := 0
- for {
- line, _, err := b.ReadLine()
- if err != nil {
- break
- }
- if len(line) > 0 {
- lineNum++
- }
- }
- var expected = LevelDebug + 1
- if lineNum != expected {
- t.Fatal(lineNum, "not "+strconv.Itoa(expected)+" lines")
- }
- os.Remove("test.log")
-}
-
-func TestFile2(t *testing.T) {
- log := NewLogger(10000)
- log.SetLogger("file", fmt.Sprintf(`{"filename":"test2.log","level":%d}`, LevelError))
- log.Debug("debug")
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- f, err := os.Open("test2.log")
- if err != nil {
- t.Fatal(err)
- }
- b := bufio.NewReader(f)
- lineNum := 0
- for {
- line, _, err := b.ReadLine()
- if err != nil {
- break
- }
- if len(line) > 0 {
- lineNum++
- }
- }
- var expected = LevelError + 1
- if lineNum != expected {
- t.Fatal(lineNum, "not "+strconv.Itoa(expected)+" lines")
- }
- os.Remove("test2.log")
-}
-
-func TestFileDailyRotate_01(t *testing.T) {
- log := NewLogger(10000)
- log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`)
- log.Debug("debug")
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1) + ".log"
- b, err := exists(rotateName)
- if !b || err != nil {
- os.Remove("test3.log")
- t.Fatal("rotate not generated")
- }
- os.Remove(rotateName)
- os.Remove("test3.log")
-}
-
-func TestFileDailyRotate_02(t *testing.T) {
- fn1 := "rotate_day.log"
- fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
- testFileRotate(t, fn1, fn2, true, false)
-}
-
-func TestFileDailyRotate_03(t *testing.T) {
- fn1 := "rotate_day.log"
- fn := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log"
- os.Create(fn)
- fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
- testFileRotate(t, fn1, fn2, true, false)
- os.Remove(fn)
-}
-
-func TestFileDailyRotate_04(t *testing.T) {
- fn1 := "rotate_day.log"
- fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
- testFileDailyRotate(t, fn1, fn2)
-}
-
-func TestFileDailyRotate_05(t *testing.T) {
- fn1 := "rotate_day.log"
- fn := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log"
- os.Create(fn)
- fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
- testFileDailyRotate(t, fn1, fn2)
- os.Remove(fn)
-}
-func TestFileDailyRotate_06(t *testing.T) { //test file mode
- log := NewLogger(10000)
- log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`)
- log.Debug("debug")
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1) + ".log"
- s, _ := os.Lstat(rotateName)
- if s.Mode() != 0440 {
- os.Remove(rotateName)
- os.Remove("test3.log")
- t.Fatal("rotate file mode error")
- }
- os.Remove(rotateName)
- os.Remove("test3.log")
-}
-
-func TestFileHourlyRotate_01(t *testing.T) {
- log := NewLogger(10000)
- log.SetLogger("file", `{"filename":"test3.log","hourly":true,"maxlines":4}`)
- log.Debug("debug")
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006010215"), 1) + ".log"
- b, err := exists(rotateName)
- if !b || err != nil {
- os.Remove("test3.log")
- t.Fatal("rotate not generated")
- }
- os.Remove(rotateName)
- os.Remove("test3.log")
-}
-
-func TestFileHourlyRotate_02(t *testing.T) {
- fn1 := "rotate_hour.log"
- fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
- testFileRotate(t, fn1, fn2, false, true)
-}
-
-func TestFileHourlyRotate_03(t *testing.T) {
- fn1 := "rotate_hour.log"
- fn := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".log"
- os.Create(fn)
- fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
- testFileRotate(t, fn1, fn2, false, true)
- os.Remove(fn)
-}
-
-func TestFileHourlyRotate_04(t *testing.T) {
- fn1 := "rotate_hour.log"
- fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
- testFileHourlyRotate(t, fn1, fn2)
-}
-
-func TestFileHourlyRotate_05(t *testing.T) {
- fn1 := "rotate_hour.log"
- fn := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".log"
- os.Create(fn)
- fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
- testFileHourlyRotate(t, fn1, fn2)
- os.Remove(fn)
-}
-
-func TestFileHourlyRotate_06(t *testing.T) { //test file mode
- log := NewLogger(10000)
- log.SetLogger("file", `{"filename":"test3.log", "hourly":true, "maxlines":4}`)
- log.Debug("debug")
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006010215"), 1) + ".log"
- s, _ := os.Lstat(rotateName)
- if s.Mode() != 0440 {
- os.Remove(rotateName)
- os.Remove("test3.log")
- t.Fatal("rotate file mode error")
- }
- os.Remove(rotateName)
- os.Remove("test3.log")
-}
-
-func testFileRotate(t *testing.T, fn1, fn2 string, daily, hourly bool) {
- fw := &fileLogWriter{
- Daily: daily,
- MaxDays: 7,
- Hourly: hourly,
- MaxHours: 168,
- Rotate: true,
- Level: LevelTrace,
- Perm: "0660",
- RotatePerm: "0440",
- }
-
- if daily {
- fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1))
- fw.dailyOpenTime = time.Now().Add(-24 * time.Hour)
- fw.dailyOpenDate = fw.dailyOpenTime.Day()
- }
-
- if hourly {
- fw.Init(fmt.Sprintf(`{"filename":"%v","maxhours":1}`, fn1))
- fw.hourlyOpenTime = time.Now().Add(-1 * time.Hour)
- fw.hourlyOpenDate = fw.hourlyOpenTime.Day()
- }
-
- fw.WriteMsg(time.Now(), "this is a msg for test", LevelDebug)
-
- for _, file := range []string{fn1, fn2} {
- _, err := os.Stat(file)
- if err != nil {
- t.Log(err)
- t.FailNow()
- }
- os.Remove(file)
- }
- fw.Destroy()
-}
-
-func testFileDailyRotate(t *testing.T, fn1, fn2 string) {
- fw := &fileLogWriter{
- Daily: true,
- MaxDays: 7,
- Rotate: true,
- Level: LevelTrace,
- Perm: "0660",
- RotatePerm: "0440",
- }
- fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1))
- fw.dailyOpenTime = time.Now().Add(-24 * time.Hour)
- fw.dailyOpenDate = fw.dailyOpenTime.Day()
- today, _ := time.ParseInLocation("2006-01-02", time.Now().Format("2006-01-02"), fw.dailyOpenTime.Location())
- today = today.Add(-1 * time.Second)
- fw.dailyRotate(today)
- for _, file := range []string{fn1, fn2} {
- _, err := os.Stat(file)
- if err != nil {
- t.FailNow()
- }
- content, err := ioutil.ReadFile(file)
- if err != nil {
- t.FailNow()
- }
- if len(content) > 0 {
- t.FailNow()
- }
- os.Remove(file)
- }
- fw.Destroy()
-}
-
-func testFileHourlyRotate(t *testing.T, fn1, fn2 string) {
- fw := &fileLogWriter{
- Hourly: true,
- MaxHours: 168,
- Rotate: true,
- Level: LevelTrace,
- Perm: "0660",
- RotatePerm: "0440",
- }
- fw.Init(fmt.Sprintf(`{"filename":"%v","maxhours":1}`, fn1))
- fw.hourlyOpenTime = time.Now().Add(-1 * time.Hour)
- fw.hourlyOpenDate = fw.hourlyOpenTime.Hour()
- hour, _ := time.ParseInLocation("2006010215", time.Now().Format("2006010215"), fw.hourlyOpenTime.Location())
- hour = hour.Add(-1 * time.Second)
- fw.hourlyRotate(hour)
- for _, file := range []string{fn1, fn2} {
- _, err := os.Stat(file)
- if err != nil {
- t.FailNow()
- }
- content, err := ioutil.ReadFile(file)
- if err != nil {
- t.FailNow()
- }
- if len(content) > 0 {
- t.FailNow()
- }
- os.Remove(file)
- }
- fw.Destroy()
-}
-func exists(path string) (bool, error) {
- _, err := os.Stat(path)
- if err == nil {
- return true, nil
- }
- if os.IsNotExist(err) {
- return false, nil
- }
- return false, err
-}
-
-func BenchmarkFile(b *testing.B) {
- log := NewLogger(100000)
- log.SetLogger("file", `{"filename":"test4.log"}`)
- for i := 0; i < b.N; i++ {
- log.Debug("debug")
- }
- os.Remove("test4.log")
-}
-
-func BenchmarkFileAsynchronous(b *testing.B) {
- log := NewLogger(100000)
- log.SetLogger("file", `{"filename":"test4.log"}`)
- log.Async()
- for i := 0; i < b.N; i++ {
- log.Debug("debug")
- }
- os.Remove("test4.log")
-}
-
-func BenchmarkFileCallDepth(b *testing.B) {
- log := NewLogger(100000)
- log.SetLogger("file", `{"filename":"test4.log"}`)
- log.EnableFuncCallDepth(true)
- log.SetLogFuncCallDepth(2)
- for i := 0; i < b.N; i++ {
- log.Debug("debug")
- }
- os.Remove("test4.log")
-}
-
-func BenchmarkFileAsynchronousCallDepth(b *testing.B) {
- log := NewLogger(100000)
- log.SetLogger("file", `{"filename":"test4.log"}`)
- log.EnableFuncCallDepth(true)
- log.SetLogFuncCallDepth(2)
- log.Async()
- for i := 0; i < b.N; i++ {
- log.Debug("debug")
- }
- os.Remove("test4.log")
-}
-
-func BenchmarkFileOnGoroutine(b *testing.B) {
- log := NewLogger(100000)
- log.SetLogger("file", `{"filename":"test4.log"}`)
- for i := 0; i < b.N; i++ {
- go log.Debug("debug")
- }
- os.Remove("test4.log")
-}
diff --git a/logs/jianliao.go b/logs/jianliao.go
deleted file mode 100644
index 88ba0f9a..00000000
--- a/logs/jianliao.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package logs
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "time"
-)
-
-// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook
-type JLWriter struct {
- AuthorName string `json:"authorname"`
- Title string `json:"title"`
- WebhookURL string `json:"webhookurl"`
- RedirectURL string `json:"redirecturl,omitempty"`
- ImageURL string `json:"imageurl,omitempty"`
- Level int `json:"level"`
-}
-
-// newJLWriter create jiaoliao writer.
-func newJLWriter() Logger {
- return &JLWriter{Level: LevelTrace}
-}
-
-// Init JLWriter with json config string
-func (s *JLWriter) Init(jsonconfig string) error {
- return json.Unmarshal([]byte(jsonconfig), s)
-}
-
-// WriteMsg write message in smtp writer.
-// it will send an email with subject and only this message.
-func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error {
- if level > s.Level {
- return nil
- }
-
- text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg)
-
- form := url.Values{}
- form.Add("authorName", s.AuthorName)
- form.Add("title", s.Title)
- form.Add("text", text)
- if s.RedirectURL != "" {
- form.Add("redirectUrl", s.RedirectURL)
- }
- if s.ImageURL != "" {
- form.Add("imageUrl", s.ImageURL)
- }
-
- resp, err := http.PostForm(s.WebhookURL, form)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
- }
- return nil
-}
-
-// Flush implementing method. empty.
-func (s *JLWriter) Flush() {
-}
-
-// Destroy implementing method. empty.
-func (s *JLWriter) Destroy() {
-}
-
-func init() {
- Register(AdapterJianLiao, newJLWriter)
-}
diff --git a/logs/log.go b/logs/log.go
deleted file mode 100644
index 39c006d2..00000000
--- a/logs/log.go
+++ /dev/null
@@ -1,669 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package logs provide a general log interface
-// Usage:
-//
-// import "github.com/astaxie/beego/logs"
-//
-// log := NewLogger(10000)
-// log.SetLogger("console", "")
-//
-// > the first params stand for how many channel
-//
-// Use it like this:
-//
-// log.Trace("trace")
-// log.Info("info")
-// log.Warn("warning")
-// log.Debug("debug")
-// log.Critical("critical")
-//
-// more docs http://beego.me/docs/module/logs.md
-package logs
-
-import (
- "fmt"
- "log"
- "os"
- "path"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-// RFC5424 log message levels.
-const (
- LevelEmergency = iota
- LevelAlert
- LevelCritical
- LevelError
- LevelWarning
- LevelNotice
- LevelInformational
- LevelDebug
-)
-
-// levelLogLogger is defined to implement log.Logger
-// the real log level will be LevelEmergency
-const levelLoggerImpl = -1
-
-// Name for adapter with beego official support
-const (
- AdapterConsole = "console"
- AdapterFile = "file"
- AdapterMultiFile = "multifile"
- AdapterMail = "smtp"
- AdapterConn = "conn"
- AdapterEs = "es"
- AdapterJianLiao = "jianliao"
- AdapterSlack = "slack"
- AdapterAliLS = "alils"
-)
-
-// Legacy log level constants to ensure backwards compatibility.
-const (
- LevelInfo = LevelInformational
- LevelTrace = LevelDebug
- LevelWarn = LevelWarning
-)
-
-type newLoggerFunc func() Logger
-
-// Logger defines the behavior of a log provider.
-type Logger interface {
- Init(config string) error
- WriteMsg(when time.Time, msg string, level int) error
- Destroy()
- Flush()
-}
-
-var adapters = make(map[string]newLoggerFunc)
-var levelPrefix = [LevelDebug + 1]string{"[M]", "[A]", "[C]", "[E]", "[W]", "[N]", "[I]", "[D]"}
-
-// Register makes a log provide available by the provided name.
-// If Register is called twice with the same name or if driver is nil,
-// it panics.
-func Register(name string, log newLoggerFunc) {
- if log == nil {
- panic("logs: Register provide is nil")
- }
- if _, dup := adapters[name]; dup {
- panic("logs: Register called twice for provider " + name)
- }
- adapters[name] = log
-}
-
-// BeeLogger is default logger in beego application.
-// it can contain several providers and log message into all providers.
-type BeeLogger struct {
- lock sync.Mutex
- level int
- init bool
- enableFuncCallDepth bool
- loggerFuncCallDepth int
- asynchronous bool
- prefix string
- msgChanLen int64
- msgChan chan *logMsg
- signalChan chan string
- wg sync.WaitGroup
- outputs []*nameLogger
-}
-
-const defaultAsyncMsgLen = 1e3
-
-type nameLogger struct {
- Logger
- name string
-}
-
-type logMsg struct {
- level int
- msg string
- when time.Time
-}
-
-var logMsgPool *sync.Pool
-
-// NewLogger returns a new BeeLogger.
-// channelLen means the number of messages in chan(used where asynchronous is true).
-// if the buffering chan is full, logger adapters write to file or other way.
-func NewLogger(channelLens ...int64) *BeeLogger {
- bl := new(BeeLogger)
- bl.level = LevelDebug
- bl.loggerFuncCallDepth = 2
- bl.msgChanLen = append(channelLens, 0)[0]
- if bl.msgChanLen <= 0 {
- bl.msgChanLen = defaultAsyncMsgLen
- }
- bl.signalChan = make(chan string, 1)
- bl.setLogger(AdapterConsole)
- return bl
-}
-
-// Async set the log to asynchronous and start the goroutine
-func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger {
- bl.lock.Lock()
- defer bl.lock.Unlock()
- if bl.asynchronous {
- return bl
- }
- bl.asynchronous = true
- if len(msgLen) > 0 && msgLen[0] > 0 {
- bl.msgChanLen = msgLen[0]
- }
- bl.msgChan = make(chan *logMsg, bl.msgChanLen)
- logMsgPool = &sync.Pool{
- New: func() interface{} {
- return &logMsg{}
- },
- }
- bl.wg.Add(1)
- go bl.startLogger()
- return bl
-}
-
-// SetLogger provides a given logger adapter into BeeLogger with config string.
-// config need to be correct JSON as string: {"interval":360}.
-func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error {
- config := append(configs, "{}")[0]
- for _, l := range bl.outputs {
- if l.name == adapterName {
- return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName)
- }
- }
-
- logAdapter, ok := adapters[adapterName]
- if !ok {
- return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
- }
-
- lg := logAdapter()
- err := lg.Init(config)
- if err != nil {
- fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error())
- return err
- }
- bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg})
- return nil
-}
-
-// SetLogger provides a given logger adapter into BeeLogger with config string.
-// config need to be correct JSON as string: {"interval":360}.
-func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error {
- bl.lock.Lock()
- defer bl.lock.Unlock()
- if !bl.init {
- bl.outputs = []*nameLogger{}
- bl.init = true
- }
- return bl.setLogger(adapterName, configs...)
-}
-
-// DelLogger remove a logger adapter in BeeLogger.
-func (bl *BeeLogger) DelLogger(adapterName string) error {
- bl.lock.Lock()
- defer bl.lock.Unlock()
- outputs := []*nameLogger{}
- for _, lg := range bl.outputs {
- if lg.name == adapterName {
- lg.Destroy()
- } else {
- outputs = append(outputs, lg)
- }
- }
- if len(outputs) == len(bl.outputs) {
- return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
- }
- bl.outputs = outputs
- return nil
-}
-
-func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) {
- for _, l := range bl.outputs {
- err := l.WriteMsg(when, msg, level)
- if err != nil {
- fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err)
- }
- }
-}
-
-func (bl *BeeLogger) Write(p []byte) (n int, err error) {
- if len(p) == 0 {
- return 0, nil
- }
- // writeMsg will always add a '\n' character
- if p[len(p)-1] == '\n' {
- p = p[0 : len(p)-1]
- }
- // set levelLoggerImpl to ensure all log message will be write out
- err = bl.writeMsg(levelLoggerImpl, string(p))
- if err == nil {
- return len(p), err
- }
- return 0, err
-}
-
-func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error {
- if !bl.init {
- bl.lock.Lock()
- bl.setLogger(AdapterConsole)
- bl.lock.Unlock()
- }
-
- if len(v) > 0 {
- msg = fmt.Sprintf(msg, v...)
- }
-
- msg = bl.prefix + " " + msg
-
- when := time.Now()
- if bl.enableFuncCallDepth {
- _, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth)
- if !ok {
- file = "???"
- line = 0
- }
- _, filename := path.Split(file)
- msg = "[" + filename + ":" + strconv.Itoa(line) + "] " + msg
- }
-
- //set level info in front of filename info
- if logLevel == levelLoggerImpl {
- // set to emergency to ensure all log will be print out correctly
- logLevel = LevelEmergency
- } else {
- msg = levelPrefix[logLevel] + " " + msg
- }
-
- if bl.asynchronous {
- lm := logMsgPool.Get().(*logMsg)
- lm.level = logLevel
- lm.msg = msg
- lm.when = when
- if bl.outputs != nil {
- bl.msgChan <- lm
- } else {
- logMsgPool.Put(lm)
- }
- } else {
- bl.writeToLoggers(when, msg, logLevel)
- }
- return nil
-}
-
-// SetLevel Set log message level.
-// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning),
-// log providers will not even be sent the message.
-func (bl *BeeLogger) SetLevel(l int) {
- bl.level = l
-}
-
-// GetLevel Get Current log message level.
-func (bl *BeeLogger) GetLevel() int {
- return bl.level
-}
-
-// SetLogFuncCallDepth set log funcCallDepth
-func (bl *BeeLogger) SetLogFuncCallDepth(d int) {
- bl.loggerFuncCallDepth = d
-}
-
-// GetLogFuncCallDepth return log funcCallDepth for wrapper
-func (bl *BeeLogger) GetLogFuncCallDepth() int {
- return bl.loggerFuncCallDepth
-}
-
-// EnableFuncCallDepth enable log funcCallDepth
-func (bl *BeeLogger) EnableFuncCallDepth(b bool) {
- bl.enableFuncCallDepth = b
-}
-
-// set prefix
-func (bl *BeeLogger) SetPrefix(s string) {
- bl.prefix = s
-}
-
-// start logger chan reading.
-// when chan is not empty, write logs.
-func (bl *BeeLogger) startLogger() {
- gameOver := false
- for {
- select {
- case bm := <-bl.msgChan:
- bl.writeToLoggers(bm.when, bm.msg, bm.level)
- logMsgPool.Put(bm)
- case sg := <-bl.signalChan:
- // Now should only send "flush" or "close" to bl.signalChan
- bl.flush()
- if sg == "close" {
- for _, l := range bl.outputs {
- l.Destroy()
- }
- bl.outputs = nil
- gameOver = true
- }
- bl.wg.Done()
- }
- if gameOver {
- break
- }
- }
-}
-
-// Emergency Log EMERGENCY level message.
-func (bl *BeeLogger) Emergency(format string, v ...interface{}) {
- if LevelEmergency > bl.level {
- return
- }
- bl.writeMsg(LevelEmergency, format, v...)
-}
-
-// Alert Log ALERT level message.
-func (bl *BeeLogger) Alert(format string, v ...interface{}) {
- if LevelAlert > bl.level {
- return
- }
- bl.writeMsg(LevelAlert, format, v...)
-}
-
-// Critical Log CRITICAL level message.
-func (bl *BeeLogger) Critical(format string, v ...interface{}) {
- if LevelCritical > bl.level {
- return
- }
- bl.writeMsg(LevelCritical, format, v...)
-}
-
-// Error Log ERROR level message.
-func (bl *BeeLogger) Error(format string, v ...interface{}) {
- if LevelError > bl.level {
- return
- }
- bl.writeMsg(LevelError, format, v...)
-}
-
-// Warning Log WARNING level message.
-func (bl *BeeLogger) Warning(format string, v ...interface{}) {
- if LevelWarn > bl.level {
- return
- }
- bl.writeMsg(LevelWarn, format, v...)
-}
-
-// Notice Log NOTICE level message.
-func (bl *BeeLogger) Notice(format string, v ...interface{}) {
- if LevelNotice > bl.level {
- return
- }
- bl.writeMsg(LevelNotice, format, v...)
-}
-
-// Informational Log INFORMATIONAL level message.
-func (bl *BeeLogger) Informational(format string, v ...interface{}) {
- if LevelInfo > bl.level {
- return
- }
- bl.writeMsg(LevelInfo, format, v...)
-}
-
-// Debug Log DEBUG level message.
-func (bl *BeeLogger) Debug(format string, v ...interface{}) {
- if LevelDebug > bl.level {
- return
- }
- bl.writeMsg(LevelDebug, format, v...)
-}
-
-// Warn Log WARN level message.
-// compatibility alias for Warning()
-func (bl *BeeLogger) Warn(format string, v ...interface{}) {
- if LevelWarn > bl.level {
- return
- }
- bl.writeMsg(LevelWarn, format, v...)
-}
-
-// Info Log INFO level message.
-// compatibility alias for Informational()
-func (bl *BeeLogger) Info(format string, v ...interface{}) {
- if LevelInfo > bl.level {
- return
- }
- bl.writeMsg(LevelInfo, format, v...)
-}
-
-// Trace Log TRACE level message.
-// compatibility alias for Debug()
-func (bl *BeeLogger) Trace(format string, v ...interface{}) {
- if LevelDebug > bl.level {
- return
- }
- bl.writeMsg(LevelDebug, format, v...)
-}
-
-// Flush flush all chan data.
-func (bl *BeeLogger) Flush() {
- if bl.asynchronous {
- bl.signalChan <- "flush"
- bl.wg.Wait()
- bl.wg.Add(1)
- return
- }
- bl.flush()
-}
-
-// Close close logger, flush all chan data and destroy all adapters in BeeLogger.
-func (bl *BeeLogger) Close() {
- if bl.asynchronous {
- bl.signalChan <- "close"
- bl.wg.Wait()
- close(bl.msgChan)
- } else {
- bl.flush()
- for _, l := range bl.outputs {
- l.Destroy()
- }
- bl.outputs = nil
- }
- close(bl.signalChan)
-}
-
-// Reset close all outputs, and set bl.outputs to nil
-func (bl *BeeLogger) Reset() {
- bl.Flush()
- for _, l := range bl.outputs {
- l.Destroy()
- }
- bl.outputs = nil
-}
-
-func (bl *BeeLogger) flush() {
- if bl.asynchronous {
- for {
- if len(bl.msgChan) > 0 {
- bm := <-bl.msgChan
- bl.writeToLoggers(bm.when, bm.msg, bm.level)
- logMsgPool.Put(bm)
- continue
- }
- break
- }
- }
- for _, l := range bl.outputs {
- l.Flush()
- }
-}
-
-// beeLogger references the used application logger.
-var beeLogger = NewLogger()
-
-// GetBeeLogger returns the default BeeLogger
-func GetBeeLogger() *BeeLogger {
- return beeLogger
-}
-
-var beeLoggerMap = struct {
- sync.RWMutex
- logs map[string]*log.Logger
-}{
- logs: map[string]*log.Logger{},
-}
-
-// GetLogger returns the default BeeLogger
-func GetLogger(prefixes ...string) *log.Logger {
- prefix := append(prefixes, "")[0]
- if prefix != "" {
- prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix))
- }
- beeLoggerMap.RLock()
- l, ok := beeLoggerMap.logs[prefix]
- if ok {
- beeLoggerMap.RUnlock()
- return l
- }
- beeLoggerMap.RUnlock()
- beeLoggerMap.Lock()
- defer beeLoggerMap.Unlock()
- l, ok = beeLoggerMap.logs[prefix]
- if !ok {
- l = log.New(beeLogger, prefix, 0)
- beeLoggerMap.logs[prefix] = l
- }
- return l
-}
-
-// Reset will remove all the adapter
-func Reset() {
- beeLogger.Reset()
-}
-
-// Async set the beelogger with Async mode and hold msglen messages
-func Async(msgLen ...int64) *BeeLogger {
- return beeLogger.Async(msgLen...)
-}
-
-// SetLevel sets the global log level used by the simple logger.
-func SetLevel(l int) {
- beeLogger.SetLevel(l)
-}
-
-// SetPrefix sets the prefix
-func SetPrefix(s string) {
- beeLogger.SetPrefix(s)
-}
-
-// EnableFuncCallDepth enable log funcCallDepth
-func EnableFuncCallDepth(b bool) {
- beeLogger.enableFuncCallDepth = b
-}
-
-// SetLogFuncCall set the CallDepth, default is 4
-func SetLogFuncCall(b bool) {
- beeLogger.EnableFuncCallDepth(b)
- beeLogger.SetLogFuncCallDepth(4)
-}
-
-// SetLogFuncCallDepth set log funcCallDepth
-func SetLogFuncCallDepth(d int) {
- beeLogger.loggerFuncCallDepth = d
-}
-
-// SetLogger sets a new logger.
-func SetLogger(adapter string, config ...string) error {
- return beeLogger.SetLogger(adapter, config...)
-}
-
-// Emergency logs a message at emergency level.
-func Emergency(f interface{}, v ...interface{}) {
- beeLogger.Emergency(formatLog(f, v...))
-}
-
-// Alert logs a message at alert level.
-func Alert(f interface{}, v ...interface{}) {
- beeLogger.Alert(formatLog(f, v...))
-}
-
-// Critical logs a message at critical level.
-func Critical(f interface{}, v ...interface{}) {
- beeLogger.Critical(formatLog(f, v...))
-}
-
-// Error logs a message at error level.
-func Error(f interface{}, v ...interface{}) {
- beeLogger.Error(formatLog(f, v...))
-}
-
-// Warning logs a message at warning level.
-func Warning(f interface{}, v ...interface{}) {
- beeLogger.Warn(formatLog(f, v...))
-}
-
-// Warn compatibility alias for Warning()
-func Warn(f interface{}, v ...interface{}) {
- beeLogger.Warn(formatLog(f, v...))
-}
-
-// Notice logs a message at notice level.
-func Notice(f interface{}, v ...interface{}) {
- beeLogger.Notice(formatLog(f, v...))
-}
-
-// Informational logs a message at info level.
-func Informational(f interface{}, v ...interface{}) {
- beeLogger.Info(formatLog(f, v...))
-}
-
-// Info compatibility alias for Warning()
-func Info(f interface{}, v ...interface{}) {
- beeLogger.Info(formatLog(f, v...))
-}
-
-// Debug logs a message at debug level.
-func Debug(f interface{}, v ...interface{}) {
- beeLogger.Debug(formatLog(f, v...))
-}
-
-// Trace logs a message at trace level.
-// compatibility alias for Warning()
-func Trace(f interface{}, v ...interface{}) {
- beeLogger.Trace(formatLog(f, v...))
-}
-
-func formatLog(f interface{}, v ...interface{}) string {
- var msg string
- switch f.(type) {
- case string:
- msg = f.(string)
- if len(v) == 0 {
- return msg
- }
- if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") {
- //format string
- } else {
- //do not contain format char
- msg += strings.Repeat(" %v", len(v))
- }
- default:
- msg = fmt.Sprint(f)
- if len(v) == 0 {
- return msg
- }
- msg += strings.Repeat(" %v", len(v))
- }
- return fmt.Sprintf(msg, v...)
-}
diff --git a/logs/logger.go b/logs/logger.go
deleted file mode 100644
index a28bff6f..00000000
--- a/logs/logger.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "io"
- "runtime"
- "sync"
- "time"
-)
-
-type logWriter struct {
- sync.Mutex
- writer io.Writer
-}
-
-func newLogWriter(wr io.Writer) *logWriter {
- return &logWriter{writer: wr}
-}
-
-func (lg *logWriter) writeln(when time.Time, msg string) (int, error) {
- lg.Lock()
- h, _, _ := formatTimeHeader(when)
- n, err := lg.writer.Write(append(append(h, msg...), '\n'))
- lg.Unlock()
- return n, err
-}
-
-const (
- y1 = `0123456789`
- y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
- y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999`
- y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
- mo1 = `000000000111`
- mo2 = `123456789012`
- d1 = `0000000001111111111222222222233`
- d2 = `1234567890123456789012345678901`
- h1 = `000000000011111111112222`
- h2 = `012345678901234567890123`
- mi1 = `000000000011111111112222222222333333333344444444445555555555`
- mi2 = `012345678901234567890123456789012345678901234567890123456789`
- s1 = `000000000011111111112222222222333333333344444444445555555555`
- s2 = `012345678901234567890123456789012345678901234567890123456789`
- ns1 = `0123456789`
-)
-
-func formatTimeHeader(when time.Time) ([]byte, int, int) {
- y, mo, d := when.Date()
- h, mi, s := when.Clock()
- ns := when.Nanosecond() / 1000000
- //len("2006/01/02 15:04:05.123 ")==24
- var buf [24]byte
-
- buf[0] = y1[y/1000%10]
- buf[1] = y2[y/100]
- buf[2] = y3[y-y/100*100]
- buf[3] = y4[y-y/100*100]
- buf[4] = '/'
- buf[5] = mo1[mo-1]
- buf[6] = mo2[mo-1]
- buf[7] = '/'
- buf[8] = d1[d-1]
- buf[9] = d2[d-1]
- buf[10] = ' '
- buf[11] = h1[h]
- buf[12] = h2[h]
- buf[13] = ':'
- buf[14] = mi1[mi]
- buf[15] = mi2[mi]
- buf[16] = ':'
- buf[17] = s1[s]
- buf[18] = s2[s]
- buf[19] = '.'
- buf[20] = ns1[ns/100]
- buf[21] = ns1[ns%100/10]
- buf[22] = ns1[ns%10]
-
- buf[23] = ' '
-
- return buf[0:], d, h
-}
-
-var (
- green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
- white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
- yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
- red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
- blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
- magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
- cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
-
- w32Green = string([]byte{27, 91, 52, 50, 109})
- w32White = string([]byte{27, 91, 52, 55, 109})
- w32Yellow = string([]byte{27, 91, 52, 51, 109})
- w32Red = string([]byte{27, 91, 52, 49, 109})
- w32Blue = string([]byte{27, 91, 52, 52, 109})
- w32Magenta = string([]byte{27, 91, 52, 53, 109})
- w32Cyan = string([]byte{27, 91, 52, 54, 109})
-
- reset = string([]byte{27, 91, 48, 109})
-)
-
-var once sync.Once
-var colorMap map[string]string
-
-func initColor() {
- if runtime.GOOS == "windows" {
- green = w32Green
- white = w32White
- yellow = w32Yellow
- red = w32Red
- blue = w32Blue
- magenta = w32Magenta
- cyan = w32Cyan
- }
- colorMap = map[string]string{
- //by color
- "green": green,
- "white": white,
- "yellow": yellow,
- "red": red,
- //by method
- "GET": blue,
- "POST": cyan,
- "PUT": yellow,
- "DELETE": red,
- "PATCH": green,
- "HEAD": magenta,
- "OPTIONS": white,
- }
-}
-
-// ColorByStatus return color by http code
-// 2xx return Green
-// 3xx return White
-// 4xx return Yellow
-// 5xx return Red
-func ColorByStatus(code int) string {
- once.Do(initColor)
- switch {
- case code >= 200 && code < 300:
- return colorMap["green"]
- case code >= 300 && code < 400:
- return colorMap["white"]
- case code >= 400 && code < 500:
- return colorMap["yellow"]
- default:
- return colorMap["red"]
- }
-}
-
-// ColorByMethod return color by http code
-func ColorByMethod(method string) string {
- once.Do(initColor)
- if c := colorMap[method]; c != "" {
- return c
- }
- return reset
-}
-
-// ResetColor return reset color
-func ResetColor() string {
- return reset
-}
diff --git a/logs/logger_test.go b/logs/logger_test.go
deleted file mode 100644
index 15be500d..00000000
--- a/logs/logger_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "testing"
- "time"
-)
-
-func TestFormatHeader_0(t *testing.T) {
- tm := time.Now()
- if tm.Year() >= 2100 {
- t.FailNow()
- }
- dur := time.Second
- for {
- if tm.Year() >= 2100 {
- break
- }
- h, _, _ := formatTimeHeader(tm)
- if tm.Format("2006/01/02 15:04:05.000 ") != string(h) {
- t.Log(tm)
- t.FailNow()
- }
- tm = tm.Add(dur)
- dur *= 2
- }
-}
-
-func TestFormatHeader_1(t *testing.T) {
- tm := time.Now()
- year := tm.Year()
- dur := time.Second
- for {
- if tm.Year() >= year+1 {
- break
- }
- h, _, _ := formatTimeHeader(tm)
- if tm.Format("2006/01/02 15:04:05.000 ") != string(h) {
- t.Log(tm)
- t.FailNow()
- }
- tm = tm.Add(dur)
- }
-}
diff --git a/logs/multifile.go b/logs/multifile.go
deleted file mode 100644
index 90168274..00000000
--- a/logs/multifile.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "encoding/json"
- "time"
-)
-
-// A filesLogWriter manages several fileLogWriter
-// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file
-// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log
-// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log
-// the rotate attribute also acts like fileLogWriter
-type multiFileLogWriter struct {
- writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter
- fullLogWriter *fileLogWriter
- Separate []string `json:"separate"`
-}
-
-var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"}
-
-// Init file logger with json config.
-// jsonConfig like:
-// {
-// "filename":"logs/beego.log",
-// "maxLines":0,
-// "maxsize":0,
-// "daily":true,
-// "maxDays":15,
-// "rotate":true,
-// "perm":0600,
-// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
-// }
-
-func (f *multiFileLogWriter) Init(config string) error {
- writer := newFileWriter().(*fileLogWriter)
- err := writer.Init(config)
- if err != nil {
- return err
- }
- f.fullLogWriter = writer
- f.writers[LevelDebug+1] = writer
-
- //unmarshal "separate" field to f.Separate
- json.Unmarshal([]byte(config), f)
-
- jsonMap := map[string]interface{}{}
- json.Unmarshal([]byte(config), &jsonMap)
-
- for i := LevelEmergency; i < LevelDebug+1; i++ {
- for _, v := range f.Separate {
- if v == levelNames[i] {
- jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix
- jsonMap["level"] = i
- bs, _ := json.Marshal(jsonMap)
- writer = newFileWriter().(*fileLogWriter)
- err := writer.Init(string(bs))
- if err != nil {
- return err
- }
- f.writers[i] = writer
- }
- }
- }
-
- return nil
-}
-
-func (f *multiFileLogWriter) Destroy() {
- for i := 0; i < len(f.writers); i++ {
- if f.writers[i] != nil {
- f.writers[i].Destroy()
- }
- }
-}
-
-func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
- if f.fullLogWriter != nil {
- f.fullLogWriter.WriteMsg(when, msg, level)
- }
- for i := 0; i < len(f.writers)-1; i++ {
- if f.writers[i] != nil {
- if level == f.writers[i].Level {
- f.writers[i].WriteMsg(when, msg, level)
- }
- }
- }
- return nil
-}
-
-func (f *multiFileLogWriter) Flush() {
- for i := 0; i < len(f.writers); i++ {
- if f.writers[i] != nil {
- f.writers[i].Flush()
- }
- }
-}
-
-// newFilesWriter create a FileLogWriter returning as LoggerInterface.
-func newFilesWriter() Logger {
- return &multiFileLogWriter{}
-}
-
-func init() {
- Register(AdapterMultiFile, newFilesWriter)
-}
diff --git a/logs/multifile_test.go b/logs/multifile_test.go
deleted file mode 100644
index 57b96094..00000000
--- a/logs/multifile_test.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "bufio"
- "os"
- "strconv"
- "strings"
- "testing"
-)
-
-func TestFiles_1(t *testing.T) {
- log := NewLogger(10000)
- log.SetLogger("multifile", `{"filename":"test.log","separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"]}`)
- log.Debug("debug")
- log.Informational("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("error")
- log.Alert("alert")
- log.Critical("critical")
- log.Emergency("emergency")
- fns := []string{""}
- fns = append(fns, levelNames[0:]...)
- name := "test"
- suffix := ".log"
- for _, fn := range fns {
-
- file := name + suffix
- if fn != "" {
- file = name + "." + fn + suffix
- }
- f, err := os.Open(file)
- if err != nil {
- t.Fatal(err)
- }
- b := bufio.NewReader(f)
- lineNum := 0
- lastLine := ""
- for {
- line, _, err := b.ReadLine()
- if err != nil {
- break
- }
- if len(line) > 0 {
- lastLine = string(line)
- lineNum++
- }
- }
- var expected = 1
- if fn == "" {
- expected = LevelDebug + 1
- }
- if lineNum != expected {
- t.Fatal(file, "has", lineNum, "lines not "+strconv.Itoa(expected)+" lines")
- }
- if lineNum == 1 {
- if !strings.Contains(lastLine, fn) {
- t.Fatal(file + " " + lastLine + " not contains the log msg " + fn)
- }
- }
- os.Remove(file)
- }
-
-}
diff --git a/logs/slack.go b/logs/slack.go
deleted file mode 100644
index 1cd2e5ae..00000000
--- a/logs/slack.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package logs
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "time"
-)
-
-// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook
-type SLACKWriter struct {
- WebhookURL string `json:"webhookurl"`
- Level int `json:"level"`
-}
-
-// newSLACKWriter create jiaoliao writer.
-func newSLACKWriter() Logger {
- return &SLACKWriter{Level: LevelTrace}
-}
-
-// Init SLACKWriter with json config string
-func (s *SLACKWriter) Init(jsonconfig string) error {
- return json.Unmarshal([]byte(jsonconfig), s)
-}
-
-// WriteMsg write message in smtp writer.
-// it will send an email with subject and only this message.
-func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error {
- if level > s.Level {
- return nil
- }
-
- text := fmt.Sprintf("{\"text\": \"%s %s\"}", when.Format("2006-01-02 15:04:05"), msg)
-
- form := url.Values{}
- form.Add("payload", text)
-
- resp, err := http.PostForm(s.WebhookURL, form)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
- }
- return nil
-}
-
-// Flush implementing method. empty.
-func (s *SLACKWriter) Flush() {
-}
-
-// Destroy implementing method. empty.
-func (s *SLACKWriter) Destroy() {
-}
-
-func init() {
- Register(AdapterSlack, newSLACKWriter)
-}
diff --git a/logs/smtp.go b/logs/smtp.go
deleted file mode 100644
index 6208d7b8..00000000
--- a/logs/smtp.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-import (
- "crypto/tls"
- "encoding/json"
- "fmt"
- "net"
- "net/smtp"
- "strings"
- "time"
-)
-
-// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server.
-type SMTPWriter struct {
- Username string `json:"username"`
- Password string `json:"password"`
- Host string `json:"host"`
- Subject string `json:"subject"`
- FromAddress string `json:"fromAddress"`
- RecipientAddresses []string `json:"sendTos"`
- Level int `json:"level"`
-}
-
-// NewSMTPWriter create smtp writer.
-func newSMTPWriter() Logger {
- return &SMTPWriter{Level: LevelTrace}
-}
-
-// Init smtp writer with json config.
-// config like:
-// {
-// "username":"example@gmail.com",
-// "password:"password",
-// "host":"smtp.gmail.com:465",
-// "subject":"email title",
-// "fromAddress":"from@example.com",
-// "sendTos":["email1","email2"],
-// "level":LevelError
-// }
-func (s *SMTPWriter) Init(jsonconfig string) error {
- return json.Unmarshal([]byte(jsonconfig), s)
-}
-
-func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth {
- if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 {
- return nil
- }
- return smtp.PlainAuth(
- "",
- s.Username,
- s.Password,
- host,
- )
-}
-
-func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error {
- client, err := smtp.Dial(hostAddressWithPort)
- if err != nil {
- return err
- }
-
- host, _, _ := net.SplitHostPort(hostAddressWithPort)
- tlsConn := &tls.Config{
- InsecureSkipVerify: true,
- ServerName: host,
- }
- if err = client.StartTLS(tlsConn); err != nil {
- return err
- }
-
- if auth != nil {
- if err = client.Auth(auth); err != nil {
- return err
- }
- }
-
- if err = client.Mail(fromAddress); err != nil {
- return err
- }
-
- for _, rec := range recipients {
- if err = client.Rcpt(rec); err != nil {
- return err
- }
- }
-
- w, err := client.Data()
- if err != nil {
- return err
- }
- _, err = w.Write(msgContent)
- if err != nil {
- return err
- }
-
- err = w.Close()
- if err != nil {
- return err
- }
-
- return client.Quit()
-}
-
-// WriteMsg write message in smtp writer.
-// it will send an email with subject and only this message.
-func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error {
- if level > s.Level {
- return nil
- }
-
- hp := strings.Split(s.Host, ":")
-
- // Set up authentication information.
- auth := s.getSMTPAuth(hp[0])
-
- // Connect to the server, authenticate, set the sender and recipient,
- // and send the email all in one step.
- contentType := "Content-Type: text/plain" + "; charset=UTF-8"
- mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress +
- ">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", when.Format("2006-01-02 15:04:05")) + msg)
-
- return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg)
-}
-
-// Flush implementing method. empty.
-func (s *SMTPWriter) Flush() {
-}
-
-// Destroy implementing method. empty.
-func (s *SMTPWriter) Destroy() {
-}
-
-func init() {
- Register(AdapterMail, newSMTPWriter)
-}
diff --git a/logs/smtp_test.go b/logs/smtp_test.go
deleted file mode 100644
index ebc8a952..00000000
--- a/logs/smtp_test.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logs
-
-// it often failed. And we moved this to pkg/logs,
-// so we ignore it
-// func TestSmtp(t *testing.T) {
-// log := NewLogger(10000)
-// log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
-// log.Critical("sendmail critical")
-// time.Sleep(time.Second * 30)
-// }
diff --git a/metric/prometheus.go b/metric/prometheus.go
deleted file mode 100644
index 215896bd..00000000
--- a/metric/prometheus.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2020 astaxie
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "net/http"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/logs"
-)
-
-// Deprecated: we will removed this function in 2.1.0
-// please use pkg/web/filter/prometheus#FilterChain
-func PrometheusMiddleWare(next http.Handler) http.Handler {
- summaryVec := prometheus.NewSummaryVec(prometheus.SummaryOpts{
- Name: "beego",
- Subsystem: "http_request",
- ConstLabels: map[string]string{
- "server": beego.BConfig.ServerName,
- "env": beego.BConfig.RunMode,
- "appname": beego.BConfig.AppName,
- },
- Help: "The statics info for http request",
- }, []string{"pattern", "method", "status", "duration"})
-
- prometheus.MustRegister(summaryVec)
-
- registerBuildInfo()
-
- return http.HandlerFunc(func(writer http.ResponseWriter, q *http.Request) {
- start := time.Now()
- next.ServeHTTP(writer, q)
- end := time.Now()
- go report(end.Sub(start), writer, q, summaryVec)
- })
-}
-
-func registerBuildInfo() {
- buildInfo := prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Name: "beego",
- Subsystem: "build_info",
- Help: "The building information",
- ConstLabels: map[string]string{
- "appname": beego.BConfig.AppName,
- "build_version": beego.BuildVersion,
- "build_revision": beego.BuildGitRevision,
- "build_status": beego.BuildStatus,
- "build_tag": beego.BuildTag,
- "build_time": strings.Replace(beego.BuildTime, "--", " ", 1),
- "go_version": beego.GoVersion,
- "git_branch": beego.GitBranch,
- "start_time": time.Now().Format("2006-01-02 15:04:05"),
- },
- }, []string{})
-
- prometheus.MustRegister(buildInfo)
- buildInfo.WithLabelValues().Set(1)
-}
-
-func report(dur time.Duration, writer http.ResponseWriter, q *http.Request, vec *prometheus.SummaryVec) {
- ctrl := beego.BeeApp.Handlers
- ctx := ctrl.GetContext()
- ctx.Reset(writer, q)
- defer ctrl.GiveBackContext(ctx)
-
- // We cannot read the status code from q.Response.StatusCode
- // since the http server does not set q.Response. So q.Response is nil
- // Thus, we use reflection to read the status from writer whose concrete type is http.response
- responseVal := reflect.ValueOf(writer).Elem()
- field := responseVal.FieldByName("status")
- status := -1
- if field.IsValid() && field.Kind() == reflect.Int {
- status = int(field.Int())
- }
- ptn := "UNKNOWN"
- if rt, found := ctrl.FindRouter(ctx); found {
- ptn = rt.GetPattern()
- } else {
- logs.Warn("we can not find the router info for this request, so request will be recorded as UNKNOWN: " + q.URL.String())
- }
- ms := dur / time.Millisecond
- vec.WithLabelValues(ptn, q.Method, strconv.Itoa(status), strconv.Itoa(int(ms))).Observe(float64(ms))
-}
diff --git a/metric/prometheus_test.go b/metric/prometheus_test.go
deleted file mode 100644
index d82a6dec..00000000
--- a/metric/prometheus_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2020 astaxie
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "net/http"
- "net/url"
- "testing"
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
-
- "github.com/astaxie/beego/context"
-)
-
-func TestPrometheusMiddleWare(t *testing.T) {
- middleware := PrometheusMiddleWare(http.HandlerFunc(func(http.ResponseWriter, *http.Request) {}))
- writer := &context.Response{}
- request := &http.Request{
- URL: &url.URL{
- Host: "localhost",
- RawPath: "/a/b/c",
- },
- Method: "POST",
- }
- vec := prometheus.NewSummaryVec(prometheus.SummaryOpts{}, []string{"pattern", "method", "status", "duration"})
-
- report(time.Second, writer, request, vec)
- middleware.ServeHTTP(writer, request)
-}
diff --git a/migration/ddl.go b/migration/ddl.go
deleted file mode 100644
index cd2c1c49..00000000
--- a/migration/ddl.go
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package migration
-
-import (
- "fmt"
-
- "github.com/astaxie/beego/logs"
-)
-
-// Index struct defines the structure of Index Columns
-type Index struct {
- Name string
-}
-
-// Unique struct defines a single unique key combination
-type Unique struct {
- Definition string
- Columns []*Column
-}
-
-//Column struct defines a single column of a table
-type Column struct {
- Name string
- Inc string
- Null string
- Default string
- Unsign string
- DataType string
- remove bool
- Modify bool
-}
-
-// Foreign struct defines a single foreign relationship
-type Foreign struct {
- ForeignTable string
- ForeignColumn string
- OnDelete string
- OnUpdate string
- Column
-}
-
-// RenameColumn struct allows renaming of columns
-type RenameColumn struct {
- OldName string
- OldNull string
- OldDefault string
- OldUnsign string
- OldDataType string
- NewName string
- Column
-}
-
-// CreateTable creates the table on system
-func (m *Migration) CreateTable(tablename, engine, charset string, p ...func()) {
- m.TableName = tablename
- m.Engine = engine
- m.Charset = charset
- m.ModifyType = "create"
-}
-
-// AlterTable set the ModifyType to alter
-func (m *Migration) AlterTable(tablename string) {
- m.TableName = tablename
- m.ModifyType = "alter"
-}
-
-// NewCol creates a new standard column and attaches it to m struct
-func (m *Migration) NewCol(name string) *Column {
- col := &Column{Name: name}
- m.AddColumns(col)
- return col
-}
-
-//PriCol creates a new primary column and attaches it to m struct
-func (m *Migration) PriCol(name string) *Column {
- col := &Column{Name: name}
- m.AddColumns(col)
- m.AddPrimary(col)
- return col
-}
-
-//UniCol creates / appends columns to specified unique key and attaches it to m struct
-func (m *Migration) UniCol(uni, name string) *Column {
- col := &Column{Name: name}
- m.AddColumns(col)
-
- uniqueOriginal := &Unique{}
-
- for _, unique := range m.Uniques {
- if unique.Definition == uni {
- unique.AddColumnsToUnique(col)
- uniqueOriginal = unique
- }
- }
- if uniqueOriginal.Definition == "" {
- unique := &Unique{Definition: uni}
- unique.AddColumnsToUnique(col)
- m.AddUnique(unique)
- }
-
- return col
-}
-
-//ForeignCol creates a new foreign column and returns the instance of column
-func (m *Migration) ForeignCol(colname, foreigncol, foreigntable string) (foreign *Foreign) {
-
- foreign = &Foreign{ForeignColumn: foreigncol, ForeignTable: foreigntable}
- foreign.Name = colname
- m.AddForeign(foreign)
- return foreign
-}
-
-//SetOnDelete sets the on delete of foreign
-func (foreign *Foreign) SetOnDelete(del string) *Foreign {
- foreign.OnDelete = "ON DELETE" + del
- return foreign
-}
-
-//SetOnUpdate sets the on update of foreign
-func (foreign *Foreign) SetOnUpdate(update string) *Foreign {
- foreign.OnUpdate = "ON UPDATE" + update
- return foreign
-}
-
-//Remove marks the columns to be removed.
-//it allows reverse m to create the column.
-func (c *Column) Remove() {
- c.remove = true
-}
-
-//SetAuto enables auto_increment of column (can be used once)
-func (c *Column) SetAuto(inc bool) *Column {
- if inc {
- c.Inc = "auto_increment"
- }
- return c
-}
-
-//SetNullable sets the column to be null
-func (c *Column) SetNullable(null bool) *Column {
- if null {
- c.Null = ""
-
- } else {
- c.Null = "NOT NULL"
- }
- return c
-}
-
-//SetDefault sets the default value, prepend with "DEFAULT "
-func (c *Column) SetDefault(def string) *Column {
- c.Default = "DEFAULT " + def
- return c
-}
-
-//SetUnsigned sets the column to be unsigned int
-func (c *Column) SetUnsigned(unsign bool) *Column {
- if unsign {
- c.Unsign = "UNSIGNED"
- }
- return c
-}
-
-//SetDataType sets the dataType of the column
-func (c *Column) SetDataType(dataType string) *Column {
- c.DataType = dataType
- return c
-}
-
-//SetOldNullable allows reverting to previous nullable on reverse ms
-func (c *RenameColumn) SetOldNullable(null bool) *RenameColumn {
- if null {
- c.OldNull = ""
-
- } else {
- c.OldNull = "NOT NULL"
- }
- return c
-}
-
-//SetOldDefault allows reverting to previous default on reverse ms
-func (c *RenameColumn) SetOldDefault(def string) *RenameColumn {
- c.OldDefault = def
- return c
-}
-
-//SetOldUnsigned allows reverting to previous unsgined on reverse ms
-func (c *RenameColumn) SetOldUnsigned(unsign bool) *RenameColumn {
- if unsign {
- c.OldUnsign = "UNSIGNED"
- }
- return c
-}
-
-//SetOldDataType allows reverting to previous datatype on reverse ms
-func (c *RenameColumn) SetOldDataType(dataType string) *RenameColumn {
- c.OldDataType = dataType
- return c
-}
-
-//SetPrimary adds the columns to the primary key (can only be used any number of times in only one m)
-func (c *Column) SetPrimary(m *Migration) *Column {
- m.Primary = append(m.Primary, c)
- return c
-}
-
-//AddColumnsToUnique adds the columns to Unique Struct
-func (unique *Unique) AddColumnsToUnique(columns ...*Column) *Unique {
-
- unique.Columns = append(unique.Columns, columns...)
-
- return unique
-}
-
-//AddColumns adds columns to m struct
-func (m *Migration) AddColumns(columns ...*Column) *Migration {
-
- m.Columns = append(m.Columns, columns...)
-
- return m
-}
-
-//AddPrimary adds the column to primary in m struct
-func (m *Migration) AddPrimary(primary *Column) *Migration {
- m.Primary = append(m.Primary, primary)
- return m
-}
-
-//AddUnique adds the column to unique in m struct
-func (m *Migration) AddUnique(unique *Unique) *Migration {
- m.Uniques = append(m.Uniques, unique)
- return m
-}
-
-//AddForeign adds the column to foreign in m struct
-func (m *Migration) AddForeign(foreign *Foreign) *Migration {
- m.Foreigns = append(m.Foreigns, foreign)
- return m
-}
-
-//AddIndex adds the column to index in m struct
-func (m *Migration) AddIndex(index *Index) *Migration {
- m.Indexes = append(m.Indexes, index)
- return m
-}
-
-//RenameColumn allows renaming of columns
-func (m *Migration) RenameColumn(from, to string) *RenameColumn {
- rename := &RenameColumn{OldName: from, NewName: to}
- m.Renames = append(m.Renames, rename)
- return rename
-}
-
-//GetSQL returns the generated sql depending on ModifyType
-func (m *Migration) GetSQL() (sql string) {
- sql = ""
- switch m.ModifyType {
- case "create":
- {
- sql += fmt.Sprintf("CREATE TABLE `%s` (", m.TableName)
- for index, column := range m.Columns {
- sql += fmt.Sprintf("\n `%s` %s %s %s %s %s", column.Name, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
- if len(m.Columns) > index+1 {
- sql += ","
- }
- }
-
- if len(m.Primary) > 0 {
- sql += fmt.Sprintf(",\n PRIMARY KEY( ")
- }
- for index, column := range m.Primary {
- sql += fmt.Sprintf(" `%s`", column.Name)
- if len(m.Primary) > index+1 {
- sql += ","
- }
-
- }
- if len(m.Primary) > 0 {
- sql += fmt.Sprintf(")")
- }
-
- for _, unique := range m.Uniques {
- sql += fmt.Sprintf(",\n UNIQUE KEY `%s`( ", unique.Definition)
- for index, column := range unique.Columns {
- sql += fmt.Sprintf(" `%s`", column.Name)
- if len(unique.Columns) > index+1 {
- sql += ","
- }
- }
- sql += fmt.Sprintf(")")
- }
- for _, foreign := range m.Foreigns {
- sql += fmt.Sprintf(",\n `%s` %s %s %s %s %s", foreign.Name, foreign.DataType, foreign.Unsign, foreign.Null, foreign.Inc, foreign.Default)
- sql += fmt.Sprintf(",\n KEY `%s_%s_foreign`(`%s`),", m.TableName, foreign.Column.Name, foreign.Column.Name)
- sql += fmt.Sprintf("\n CONSTRAINT `%s_%s_foreign` FOREIGN KEY (`%s`) REFERENCES `%s` (`%s`) %s %s", m.TableName, foreign.Column.Name, foreign.Column.Name, foreign.ForeignTable, foreign.ForeignColumn, foreign.OnDelete, foreign.OnUpdate)
-
- }
- sql += fmt.Sprintf(")ENGINE=%s DEFAULT CHARSET=%s;", m.Engine, m.Charset)
- break
- }
- case "alter":
- {
- sql += fmt.Sprintf("ALTER TABLE `%s` ", m.TableName)
- for index, column := range m.Columns {
- if !column.remove {
- logs.Info("col")
- sql += fmt.Sprintf("\n ADD `%s` %s %s %s %s %s", column.Name, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
- } else {
- sql += fmt.Sprintf("\n DROP COLUMN `%s`", column.Name)
- }
-
- if len(m.Columns) > index+1 {
- sql += ","
- }
- }
- for index, column := range m.Renames {
- sql += fmt.Sprintf("CHANGE COLUMN `%s` `%s` %s %s %s %s %s", column.OldName, column.NewName, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
- if len(m.Renames) > index+1 {
- sql += ","
- }
- }
-
- for index, foreign := range m.Foreigns {
- sql += fmt.Sprintf("ADD `%s` %s %s %s %s %s", foreign.Name, foreign.DataType, foreign.Unsign, foreign.Null, foreign.Inc, foreign.Default)
- sql += fmt.Sprintf(",\n ADD KEY `%s_%s_foreign`(`%s`)", m.TableName, foreign.Column.Name, foreign.Column.Name)
- sql += fmt.Sprintf(",\n ADD CONSTRAINT `%s_%s_foreign` FOREIGN KEY (`%s`) REFERENCES `%s` (`%s`) %s %s", m.TableName, foreign.Column.Name, foreign.Column.Name, foreign.ForeignTable, foreign.ForeignColumn, foreign.OnDelete, foreign.OnUpdate)
- if len(m.Foreigns) > index+1 {
- sql += ","
- }
- }
- sql += ";"
-
- break
- }
- case "reverse":
- {
-
- sql += fmt.Sprintf("ALTER TABLE `%s`", m.TableName)
- for index, column := range m.Columns {
- if column.remove {
- sql += fmt.Sprintf("\n ADD `%s` %s %s %s %s %s", column.Name, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
- } else {
- sql += fmt.Sprintf("\n DROP COLUMN `%s`", column.Name)
- }
- if len(m.Columns) > index+1 {
- sql += ","
- }
- }
-
- if len(m.Primary) > 0 {
- sql += fmt.Sprintf("\n DROP PRIMARY KEY,")
- }
-
- for index, unique := range m.Uniques {
- sql += fmt.Sprintf("\n DROP KEY `%s`", unique.Definition)
- if len(m.Uniques) > index+1 {
- sql += ","
- }
-
- }
- for index, column := range m.Renames {
- sql += fmt.Sprintf("\n CHANGE COLUMN `%s` `%s` %s %s %s %s", column.NewName, column.OldName, column.OldDataType, column.OldUnsign, column.OldNull, column.OldDefault)
- if len(m.Renames) > index+1 {
- sql += ","
- }
- }
-
- for _, foreign := range m.Foreigns {
- sql += fmt.Sprintf("\n DROP KEY `%s_%s_foreign`", m.TableName, foreign.Column.Name)
- sql += fmt.Sprintf(",\n DROP FOREIGN KEY `%s_%s_foreign`", m.TableName, foreign.Column.Name)
- sql += fmt.Sprintf(",\n DROP COLUMN `%s`", foreign.Name)
- }
- sql += ";"
- }
- case "delete":
- {
- sql += fmt.Sprintf("DROP TABLE IF EXISTS `%s`;", m.TableName)
- }
- }
-
- return
-}
diff --git a/migration/doc.go b/migration/doc.go
deleted file mode 100644
index 0c6564d4..00000000
--- a/migration/doc.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Package migration enables you to generate migrations back and forth. It generates both migrations.
-//
-// //Creates a table
-// m.CreateTable("tablename","InnoDB","utf8");
-//
-// //Alter a table
-// m.AlterTable("tablename")
-//
-// Standard Column Methods
-// * SetDataType
-// * SetNullable
-// * SetDefault
-// * SetUnsigned (use only on integer types unless produces error)
-//
-// //Sets a primary column, multiple calls allowed, standard column methods available
-// m.PriCol("id").SetAuto(true).SetNullable(false).SetDataType("INT(10)").SetUnsigned(true)
-//
-// //UniCol Can be used multiple times, allows standard Column methods. Use same "index" string to add to same index
-// m.UniCol("index","column")
-//
-// //Standard Column Initialisation, can call .Remove() after NewCol("") on alter to remove
-// m.NewCol("name").SetDataType("VARCHAR(255) COLLATE utf8_unicode_ci").SetNullable(false)
-// m.NewCol("value").SetDataType("DOUBLE(8,2)").SetNullable(false)
-//
-// //Rename Columns , only use with Alter table, doesn't works with Create, prefix standard column methods with "Old" to
-// //create a true reversible migration eg: SetOldDataType("DOUBLE(12,3)")
-// m.RenameColumn("from","to")...
-//
-// //Foreign Columns, single columns are only supported, SetOnDelete & SetOnUpdate are available, call appropriately.
-// //Supports standard column methods, automatic reverse.
-// m.ForeignCol("local_col","foreign_col","foreign_table")
-package migration
diff --git a/migration/migration.go b/migration/migration.go
deleted file mode 100644
index 5ddfd972..00000000
--- a/migration/migration.go
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package migration is used for migration
-//
-// The table structure is as follow:
-//
-// CREATE TABLE `migrations` (
-// `id_migration` int(10) unsigned NOT NULL AUTO_INCREMENT COMMENT 'surrogate key',
-// `name` varchar(255) DEFAULT NULL COMMENT 'migration name, unique',
-// `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'date migrated or rolled back',
-// `statements` longtext COMMENT 'SQL statements for this migration',
-// `rollback_statements` longtext,
-// `status` enum('update','rollback') DEFAULT NULL COMMENT 'update indicates it is a normal migration while rollback means this migration is rolled back',
-// PRIMARY KEY (`id_migration`)
-// ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
-package migration
-
-import (
- "errors"
- "sort"
- "strings"
- "time"
-
- "github.com/astaxie/beego/logs"
- "github.com/astaxie/beego/orm"
-)
-
-// const the data format for the bee generate migration datatype
-const (
- DateFormat = "20060102_150405"
- DBDateFormat = "2006-01-02 15:04:05"
-)
-
-// Migrationer is an interface for all Migration struct
-type Migrationer interface {
- Up()
- Down()
- Reset()
- Exec(name, status string) error
- GetCreated() int64
-}
-
-//Migration defines the migrations by either SQL or DDL
-type Migration struct {
- sqls []string
- Created string
- TableName string
- Engine string
- Charset string
- ModifyType string
- Columns []*Column
- Indexes []*Index
- Primary []*Column
- Uniques []*Unique
- Foreigns []*Foreign
- Renames []*RenameColumn
- RemoveColumns []*Column
- RemoveIndexes []*Index
- RemoveUniques []*Unique
- RemoveForeigns []*Foreign
-}
-
-var (
- migrationMap map[string]Migrationer
-)
-
-func init() {
- migrationMap = make(map[string]Migrationer)
-}
-
-// Up implement in the Inheritance struct for upgrade
-func (m *Migration) Up() {
-
- switch m.ModifyType {
- case "reverse":
- m.ModifyType = "alter"
- case "delete":
- m.ModifyType = "create"
- }
- m.sqls = append(m.sqls, m.GetSQL())
-}
-
-// Down implement in the Inheritance struct for down
-func (m *Migration) Down() {
-
- switch m.ModifyType {
- case "alter":
- m.ModifyType = "reverse"
- case "create":
- m.ModifyType = "delete"
- }
- m.sqls = append(m.sqls, m.GetSQL())
-}
-
-//Migrate adds the SQL to the execution list
-func (m *Migration) Migrate(migrationType string) {
- m.ModifyType = migrationType
- m.sqls = append(m.sqls, m.GetSQL())
-}
-
-// SQL add sql want to execute
-func (m *Migration) SQL(sql string) {
- m.sqls = append(m.sqls, sql)
-}
-
-// Reset the sqls
-func (m *Migration) Reset() {
- m.sqls = make([]string, 0)
-}
-
-// Exec execute the sql already add in the sql
-func (m *Migration) Exec(name, status string) error {
- o := orm.NewOrm()
- for _, s := range m.sqls {
- logs.Info("exec sql:", s)
- r := o.Raw(s)
- _, err := r.Exec()
- if err != nil {
- return err
- }
- }
- return m.addOrUpdateRecord(name, status)
-}
-
-func (m *Migration) addOrUpdateRecord(name, status string) error {
- o := orm.NewOrm()
- if status == "down" {
- status = "rollback"
- p, err := o.Raw("update migrations set status = ?, rollback_statements = ?, created_at = ? where name = ?").Prepare()
- if err != nil {
- return nil
- }
- _, err = p.Exec(status, strings.Join(m.sqls, "; "), time.Now().Format(DBDateFormat), name)
- return err
- }
- status = "update"
- p, err := o.Raw("insert into migrations(name, created_at, statements, status) values(?,?,?,?)").Prepare()
- if err != nil {
- return err
- }
- _, err = p.Exec(name, time.Now().Format(DBDateFormat), strings.Join(m.sqls, "; "), status)
- return err
-}
-
-// GetCreated get the unixtime from the Created
-func (m *Migration) GetCreated() int64 {
- t, err := time.Parse(DateFormat, m.Created)
- if err != nil {
- return 0
- }
- return t.Unix()
-}
-
-// Register register the Migration in the map
-func Register(name string, m Migrationer) error {
- if _, ok := migrationMap[name]; ok {
- return errors.New("already exist name:" + name)
- }
- migrationMap[name] = m
- return nil
-}
-
-// Upgrade upgrade the migration from lasttime
-func Upgrade(lasttime int64) error {
- sm := sortMap(migrationMap)
- i := 0
- migs, _ := getAllMigrations()
- for _, v := range sm {
- if _, ok := migs[v.name]; !ok {
- logs.Info("start upgrade", v.name)
- v.m.Reset()
- v.m.Up()
- err := v.m.Exec(v.name, "up")
- if err != nil {
- logs.Error("execute error:", err)
- time.Sleep(2 * time.Second)
- return err
- }
- logs.Info("end upgrade:", v.name)
- i++
- }
- }
- logs.Info("total success upgrade:", i, " migration")
- time.Sleep(2 * time.Second)
- return nil
-}
-
-// Rollback rollback the migration by the name
-func Rollback(name string) error {
- if v, ok := migrationMap[name]; ok {
- logs.Info("start rollback")
- v.Reset()
- v.Down()
- err := v.Exec(name, "down")
- if err != nil {
- logs.Error("execute error:", err)
- time.Sleep(2 * time.Second)
- return err
- }
- logs.Info("end rollback")
- time.Sleep(2 * time.Second)
- return nil
- }
- logs.Error("not exist the migrationMap name:" + name)
- time.Sleep(2 * time.Second)
- return errors.New("not exist the migrationMap name:" + name)
-}
-
-// Reset reset all migration
-// run all migration's down function
-func Reset() error {
- sm := sortMap(migrationMap)
- i := 0
- for j := len(sm) - 1; j >= 0; j-- {
- v := sm[j]
- if isRollBack(v.name) {
- logs.Info("skip the", v.name)
- time.Sleep(1 * time.Second)
- continue
- }
- logs.Info("start reset:", v.name)
- v.m.Reset()
- v.m.Down()
- err := v.m.Exec(v.name, "down")
- if err != nil {
- logs.Error("execute error:", err)
- time.Sleep(2 * time.Second)
- return err
- }
- i++
- logs.Info("end reset:", v.name)
- }
- logs.Info("total success reset:", i, " migration")
- time.Sleep(2 * time.Second)
- return nil
-}
-
-// Refresh first Reset, then Upgrade
-func Refresh() error {
- err := Reset()
- if err != nil {
- logs.Error("execute error:", err)
- time.Sleep(2 * time.Second)
- return err
- }
- err = Upgrade(0)
- return err
-}
-
-type dataSlice []data
-
-type data struct {
- created int64
- name string
- m Migrationer
-}
-
-// Len is part of sort.Interface.
-func (d dataSlice) Len() int {
- return len(d)
-}
-
-// Swap is part of sort.Interface.
-func (d dataSlice) Swap(i, j int) {
- d[i], d[j] = d[j], d[i]
-}
-
-// Less is part of sort.Interface. We use count as the value to sort by
-func (d dataSlice) Less(i, j int) bool {
- return d[i].created < d[j].created
-}
-
-func sortMap(m map[string]Migrationer) dataSlice {
- s := make(dataSlice, 0, len(m))
- for k, v := range m {
- d := data{}
- d.created = v.GetCreated()
- d.name = k
- d.m = v
- s = append(s, d)
- }
- sort.Sort(s)
- return s
-}
-
-func isRollBack(name string) bool {
- o := orm.NewOrm()
- var maps []orm.Params
- num, err := o.Raw("select * from migrations where `name` = ? order by id_migration desc", name).Values(&maps)
- if err != nil {
- logs.Info("get name has error", err)
- return false
- }
- if num <= 0 {
- return false
- }
- if maps[0]["status"] == "rollback" {
- return true
- }
- return false
-}
-func getAllMigrations() (map[string]string, error) {
- o := orm.NewOrm()
- var maps []orm.Params
- migs := make(map[string]string)
- num, err := o.Raw("select * from migrations order by id_migration desc").Values(&maps)
- if err != nil {
- logs.Info("get name has error", err)
- return migs, err
- }
- if num > 0 {
- for _, v := range maps {
- name := v["name"].(string)
- migs[name] = v["status"].(string)
- }
- }
- return migs, nil
-}
diff --git a/mime.go b/mime.go
deleted file mode 100644
index ca2878ab..00000000
--- a/mime.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-var mimemaps = map[string]string{
- ".3dm": "x-world/x-3dmf",
- ".3dmf": "x-world/x-3dmf",
- ".7z": "application/x-7z-compressed",
- ".a": "application/octet-stream",
- ".aab": "application/x-authorware-bin",
- ".aam": "application/x-authorware-map",
- ".aas": "application/x-authorware-seg",
- ".abc": "text/vndabc",
- ".ace": "application/x-ace-compressed",
- ".acgi": "text/html",
- ".afl": "video/animaflex",
- ".ai": "application/postscript",
- ".aif": "audio/aiff",
- ".aifc": "audio/aiff",
- ".aiff": "audio/aiff",
- ".aim": "application/x-aim",
- ".aip": "text/x-audiosoft-intra",
- ".alz": "application/x-alz-compressed",
- ".ani": "application/x-navi-animation",
- ".aos": "application/x-nokia-9000-communicator-add-on-software",
- ".aps": "application/mime",
- ".apk": "application/vnd.android.package-archive",
- ".arc": "application/x-arc-compressed",
- ".arj": "application/arj",
- ".art": "image/x-jg",
- ".asf": "video/x-ms-asf",
- ".asm": "text/x-asm",
- ".asp": "text/asp",
- ".asx": "application/x-mplayer2",
- ".au": "audio/basic",
- ".avi": "video/x-msvideo",
- ".avs": "video/avs-video",
- ".bcpio": "application/x-bcpio",
- ".bin": "application/mac-binary",
- ".bmp": "image/bmp",
- ".boo": "application/book",
- ".book": "application/book",
- ".boz": "application/x-bzip2",
- ".bsh": "application/x-bsh",
- ".bz2": "application/x-bzip2",
- ".bz": "application/x-bzip",
- ".c++": "text/plain",
- ".c": "text/x-c",
- ".cab": "application/vnd.ms-cab-compressed",
- ".cat": "application/vndms-pkiseccat",
- ".cc": "text/x-c",
- ".ccad": "application/clariscad",
- ".cco": "application/x-cocoa",
- ".cdf": "application/cdf",
- ".cer": "application/pkix-cert",
- ".cha": "application/x-chat",
- ".chat": "application/x-chat",
- ".chrt": "application/vnd.kde.kchart",
- ".class": "application/java",
- ".com": "text/plain",
- ".conf": "text/plain",
- ".cpio": "application/x-cpio",
- ".cpp": "text/x-c",
- ".cpt": "application/mac-compactpro",
- ".crl": "application/pkcs-crl",
- ".crt": "application/pkix-cert",
- ".crx": "application/x-chrome-extension",
- ".csh": "text/x-scriptcsh",
- ".css": "text/css",
- ".csv": "text/csv",
- ".cxx": "text/plain",
- ".dar": "application/x-dar",
- ".dcr": "application/x-director",
- ".deb": "application/x-debian-package",
- ".deepv": "application/x-deepv",
- ".def": "text/plain",
- ".der": "application/x-x509-ca-cert",
- ".dif": "video/x-dv",
- ".dir": "application/x-director",
- ".divx": "video/divx",
- ".dl": "video/dl",
- ".dmg": "application/x-apple-diskimage",
- ".doc": "application/msword",
- ".dot": "application/msword",
- ".dp": "application/commonground",
- ".drw": "application/drafting",
- ".dump": "application/octet-stream",
- ".dv": "video/x-dv",
- ".dvi": "application/x-dvi",
- ".dwf": "drawing/x-dwf=(old)",
- ".dwg": "application/acad",
- ".dxf": "application/dxf",
- ".dxr": "application/x-director",
- ".el": "text/x-scriptelisp",
- ".elc": "application/x-bytecodeelisp=(compiled=elisp)",
- ".eml": "message/rfc822",
- ".env": "application/x-envoy",
- ".eps": "application/postscript",
- ".es": "application/x-esrehber",
- ".etx": "text/x-setext",
- ".evy": "application/envoy",
- ".exe": "application/octet-stream",
- ".f77": "text/x-fortran",
- ".f90": "text/x-fortran",
- ".f": "text/x-fortran",
- ".fdf": "application/vndfdf",
- ".fif": "application/fractals",
- ".fli": "video/fli",
- ".flo": "image/florian",
- ".flv": "video/x-flv",
- ".flx": "text/vndfmiflexstor",
- ".fmf": "video/x-atomic3d-feature",
- ".for": "text/x-fortran",
- ".fpx": "image/vndfpx",
- ".frl": "application/freeloader",
- ".funk": "audio/make",
- ".g3": "image/g3fax",
- ".g": "text/plain",
- ".gif": "image/gif",
- ".gl": "video/gl",
- ".gsd": "audio/x-gsm",
- ".gsm": "audio/x-gsm",
- ".gsp": "application/x-gsp",
- ".gss": "application/x-gss",
- ".gtar": "application/x-gtar",
- ".gz": "application/x-compressed",
- ".gzip": "application/x-gzip",
- ".h": "text/x-h",
- ".hdf": "application/x-hdf",
- ".help": "application/x-helpfile",
- ".hgl": "application/vndhp-hpgl",
- ".hh": "text/x-h",
- ".hlb": "text/x-script",
- ".hlp": "application/hlp",
- ".hpg": "application/vndhp-hpgl",
- ".hpgl": "application/vndhp-hpgl",
- ".hqx": "application/binhex",
- ".hta": "application/hta",
- ".htc": "text/x-component",
- ".htm": "text/html",
- ".html": "text/html",
- ".htmls": "text/html",
- ".htt": "text/webviewhtml",
- ".htx": "text/html",
- ".ice": "x-conference/x-cooltalk",
- ".ico": "image/x-icon",
- ".ics": "text/calendar",
- ".icz": "text/calendar",
- ".idc": "text/plain",
- ".ief": "image/ief",
- ".iefs": "image/ief",
- ".iges": "application/iges",
- ".igs": "application/iges",
- ".ima": "application/x-ima",
- ".imap": "application/x-httpd-imap",
- ".inf": "application/inf",
- ".ins": "application/x-internett-signup",
- ".ip": "application/x-ip2",
- ".isu": "video/x-isvideo",
- ".it": "audio/it",
- ".iv": "application/x-inventor",
- ".ivr": "i-world/i-vrml",
- ".ivy": "application/x-livescreen",
- ".jam": "audio/x-jam",
- ".jav": "text/x-java-source",
- ".java": "text/x-java-source",
- ".jcm": "application/x-java-commerce",
- ".jfif-tbnl": "image/jpeg",
- ".jfif": "image/jpeg",
- ".jnlp": "application/x-java-jnlp-file",
- ".jpe": "image/jpeg",
- ".jpeg": "image/jpeg",
- ".jpg": "image/jpeg",
- ".jps": "image/x-jps",
- ".js": "application/javascript",
- ".json": "application/json",
- ".jut": "image/jutvision",
- ".kar": "audio/midi",
- ".karbon": "application/vnd.kde.karbon",
- ".kfo": "application/vnd.kde.kformula",
- ".flw": "application/vnd.kde.kivio",
- ".kml": "application/vnd.google-earth.kml+xml",
- ".kmz": "application/vnd.google-earth.kmz",
- ".kon": "application/vnd.kde.kontour",
- ".kpr": "application/vnd.kde.kpresenter",
- ".kpt": "application/vnd.kde.kpresenter",
- ".ksp": "application/vnd.kde.kspread",
- ".kwd": "application/vnd.kde.kword",
- ".kwt": "application/vnd.kde.kword",
- ".ksh": "text/x-scriptksh",
- ".la": "audio/nspaudio",
- ".lam": "audio/x-liveaudio",
- ".latex": "application/x-latex",
- ".lha": "application/lha",
- ".lhx": "application/octet-stream",
- ".list": "text/plain",
- ".lma": "audio/nspaudio",
- ".log": "text/plain",
- ".lsp": "text/x-scriptlisp",
- ".lst": "text/plain",
- ".lsx": "text/x-la-asf",
- ".ltx": "application/x-latex",
- ".lzh": "application/octet-stream",
- ".lzx": "application/lzx",
- ".m1v": "video/mpeg",
- ".m2a": "audio/mpeg",
- ".m2v": "video/mpeg",
- ".m3u": "audio/x-mpegurl",
- ".m": "text/x-m",
- ".man": "application/x-troff-man",
- ".manifest": "text/cache-manifest",
- ".map": "application/x-navimap",
- ".mar": "text/plain",
- ".mbd": "application/mbedlet",
- ".mc$": "application/x-magic-cap-package-10",
- ".mcd": "application/mcad",
- ".mcf": "text/mcf",
- ".mcp": "application/netmc",
- ".me": "application/x-troff-me",
- ".mht": "message/rfc822",
- ".mhtml": "message/rfc822",
- ".mid": "application/x-midi",
- ".midi": "application/x-midi",
- ".mif": "application/x-frame",
- ".mime": "message/rfc822",
- ".mjf": "audio/x-vndaudioexplosionmjuicemediafile",
- ".mjpg": "video/x-motion-jpeg",
- ".mm": "application/base64",
- ".mme": "application/base64",
- ".mod": "audio/mod",
- ".moov": "video/quicktime",
- ".mov": "video/quicktime",
- ".movie": "video/x-sgi-movie",
- ".mp2": "audio/mpeg",
- ".mp3": "audio/mpeg3",
- ".mp4": "video/mp4",
- ".mpa": "audio/mpeg",
- ".mpc": "application/x-project",
- ".mpe": "video/mpeg",
- ".mpeg": "video/mpeg",
- ".mpg": "video/mpeg",
- ".mpga": "audio/mpeg",
- ".mpp": "application/vndms-project",
- ".mpt": "application/x-project",
- ".mpv": "application/x-project",
- ".mpx": "application/x-project",
- ".mrc": "application/marc",
- ".ms": "application/x-troff-ms",
- ".mv": "video/x-sgi-movie",
- ".my": "audio/make",
- ".mzz": "application/x-vndaudioexplosionmzz",
- ".nap": "image/naplps",
- ".naplps": "image/naplps",
- ".nc": "application/x-netcdf",
- ".ncm": "application/vndnokiaconfiguration-message",
- ".nif": "image/x-niff",
- ".niff": "image/x-niff",
- ".nix": "application/x-mix-transfer",
- ".nsc": "application/x-conference",
- ".nvd": "application/x-navidoc",
- ".o": "application/octet-stream",
- ".oda": "application/oda",
- ".odb": "application/vnd.oasis.opendocument.database",
- ".odc": "application/vnd.oasis.opendocument.chart",
- ".odf": "application/vnd.oasis.opendocument.formula",
- ".odg": "application/vnd.oasis.opendocument.graphics",
- ".odi": "application/vnd.oasis.opendocument.image",
- ".odm": "application/vnd.oasis.opendocument.text-master",
- ".odp": "application/vnd.oasis.opendocument.presentation",
- ".ods": "application/vnd.oasis.opendocument.spreadsheet",
- ".odt": "application/vnd.oasis.opendocument.text",
- ".oga": "audio/ogg",
- ".ogg": "audio/ogg",
- ".ogv": "video/ogg",
- ".omc": "application/x-omc",
- ".omcd": "application/x-omcdatamaker",
- ".omcr": "application/x-omcregerator",
- ".otc": "application/vnd.oasis.opendocument.chart-template",
- ".otf": "application/vnd.oasis.opendocument.formula-template",
- ".otg": "application/vnd.oasis.opendocument.graphics-template",
- ".oth": "application/vnd.oasis.opendocument.text-web",
- ".oti": "application/vnd.oasis.opendocument.image-template",
- ".otm": "application/vnd.oasis.opendocument.text-master",
- ".otp": "application/vnd.oasis.opendocument.presentation-template",
- ".ots": "application/vnd.oasis.opendocument.spreadsheet-template",
- ".ott": "application/vnd.oasis.opendocument.text-template",
- ".p10": "application/pkcs10",
- ".p12": "application/pkcs-12",
- ".p7a": "application/x-pkcs7-signature",
- ".p7c": "application/pkcs7-mime",
- ".p7m": "application/pkcs7-mime",
- ".p7r": "application/x-pkcs7-certreqresp",
- ".p7s": "application/pkcs7-signature",
- ".p": "text/x-pascal",
- ".part": "application/pro_eng",
- ".pas": "text/pascal",
- ".pbm": "image/x-portable-bitmap",
- ".pcl": "application/vndhp-pcl",
- ".pct": "image/x-pict",
- ".pcx": "image/x-pcx",
- ".pdb": "chemical/x-pdb",
- ".pdf": "application/pdf",
- ".pfunk": "audio/make",
- ".pgm": "image/x-portable-graymap",
- ".pic": "image/pict",
- ".pict": "image/pict",
- ".pkg": "application/x-newton-compatible-pkg",
- ".pko": "application/vndms-pkipko",
- ".pl": "text/x-scriptperl",
- ".plx": "application/x-pixclscript",
- ".pm4": "application/x-pagemaker",
- ".pm5": "application/x-pagemaker",
- ".pm": "text/x-scriptperl-module",
- ".png": "image/png",
- ".pnm": "application/x-portable-anymap",
- ".pot": "application/mspowerpoint",
- ".pov": "model/x-pov",
- ".ppa": "application/vndms-powerpoint",
- ".ppm": "image/x-portable-pixmap",
- ".pps": "application/mspowerpoint",
- ".ppt": "application/mspowerpoint",
- ".ppz": "application/mspowerpoint",
- ".pre": "application/x-freelance",
- ".prt": "application/pro_eng",
- ".ps": "application/postscript",
- ".psd": "application/octet-stream",
- ".pvu": "paleovu/x-pv",
- ".pwz": "application/vndms-powerpoint",
- ".py": "text/x-scriptphyton",
- ".pyc": "application/x-bytecodepython",
- ".qcp": "audio/vndqcelp",
- ".qd3": "x-world/x-3dmf",
- ".qd3d": "x-world/x-3dmf",
- ".qif": "image/x-quicktime",
- ".qt": "video/quicktime",
- ".qtc": "video/x-qtc",
- ".qti": "image/x-quicktime",
- ".qtif": "image/x-quicktime",
- ".ra": "audio/x-pn-realaudio",
- ".ram": "audio/x-pn-realaudio",
- ".rar": "application/x-rar-compressed",
- ".ras": "application/x-cmu-raster",
- ".rast": "image/cmu-raster",
- ".rexx": "text/x-scriptrexx",
- ".rf": "image/vndrn-realflash",
- ".rgb": "image/x-rgb",
- ".rm": "application/vndrn-realmedia",
- ".rmi": "audio/mid",
- ".rmm": "audio/x-pn-realaudio",
- ".rmp": "audio/x-pn-realaudio",
- ".rng": "application/ringing-tones",
- ".rnx": "application/vndrn-realplayer",
- ".roff": "application/x-troff",
- ".rp": "image/vndrn-realpix",
- ".rpm": "audio/x-pn-realaudio-plugin",
- ".rt": "text/vndrn-realtext",
- ".rtf": "text/richtext",
- ".rtx": "text/richtext",
- ".rv": "video/vndrn-realvideo",
- ".s": "text/x-asm",
- ".s3m": "audio/s3m",
- ".s7z": "application/x-7z-compressed",
- ".saveme": "application/octet-stream",
- ".sbk": "application/x-tbook",
- ".scm": "text/x-scriptscheme",
- ".sdml": "text/plain",
- ".sdp": "application/sdp",
- ".sdr": "application/sounder",
- ".sea": "application/sea",
- ".set": "application/set",
- ".sgm": "text/x-sgml",
- ".sgml": "text/x-sgml",
- ".sh": "text/x-scriptsh",
- ".shar": "application/x-bsh",
- ".shtml": "text/x-server-parsed-html",
- ".sid": "audio/x-psid",
- ".skd": "application/x-koan",
- ".skm": "application/x-koan",
- ".skp": "application/x-koan",
- ".skt": "application/x-koan",
- ".sit": "application/x-stuffit",
- ".sitx": "application/x-stuffitx",
- ".sl": "application/x-seelogo",
- ".smi": "application/smil",
- ".smil": "application/smil",
- ".snd": "audio/basic",
- ".sol": "application/solids",
- ".spc": "text/x-speech",
- ".spl": "application/futuresplash",
- ".spr": "application/x-sprite",
- ".sprite": "application/x-sprite",
- ".spx": "audio/ogg",
- ".src": "application/x-wais-source",
- ".ssi": "text/x-server-parsed-html",
- ".ssm": "application/streamingmedia",
- ".sst": "application/vndms-pkicertstore",
- ".step": "application/step",
- ".stl": "application/sla",
- ".stp": "application/step",
- ".sv4cpio": "application/x-sv4cpio",
- ".sv4crc": "application/x-sv4crc",
- ".svf": "image/vnddwg",
- ".svg": "image/svg+xml",
- ".svr": "application/x-world",
- ".swf": "application/x-shockwave-flash",
- ".t": "application/x-troff",
- ".talk": "text/x-speech",
- ".tar": "application/x-tar",
- ".tbk": "application/toolbook",
- ".tcl": "text/x-scripttcl",
- ".tcsh": "text/x-scripttcsh",
- ".tex": "application/x-tex",
- ".texi": "application/x-texinfo",
- ".texinfo": "application/x-texinfo",
- ".text": "text/plain",
- ".tgz": "application/gnutar",
- ".tif": "image/tiff",
- ".tiff": "image/tiff",
- ".tr": "application/x-troff",
- ".tsi": "audio/tsp-audio",
- ".tsp": "application/dsptype",
- ".tsv": "text/tab-separated-values",
- ".turbot": "image/florian",
- ".txt": "text/plain",
- ".uil": "text/x-uil",
- ".uni": "text/uri-list",
- ".unis": "text/uri-list",
- ".unv": "application/i-deas",
- ".uri": "text/uri-list",
- ".uris": "text/uri-list",
- ".ustar": "application/x-ustar",
- ".uu": "text/x-uuencode",
- ".uue": "text/x-uuencode",
- ".vcd": "application/x-cdlink",
- ".vcf": "text/x-vcard",
- ".vcard": "text/x-vcard",
- ".vcs": "text/x-vcalendar",
- ".vda": "application/vda",
- ".vdo": "video/vdo",
- ".vew": "application/groupwise",
- ".viv": "video/vivo",
- ".vivo": "video/vivo",
- ".vmd": "application/vocaltec-media-desc",
- ".vmf": "application/vocaltec-media-file",
- ".voc": "audio/voc",
- ".vos": "video/vosaic",
- ".vox": "audio/voxware",
- ".vqe": "audio/x-twinvq-plugin",
- ".vqf": "audio/x-twinvq",
- ".vql": "audio/x-twinvq-plugin",
- ".vrml": "application/x-vrml",
- ".vrt": "x-world/x-vrt",
- ".vsd": "application/x-visio",
- ".vst": "application/x-visio",
- ".vsw": "application/x-visio",
- ".w60": "application/wordperfect60",
- ".w61": "application/wordperfect61",
- ".w6w": "application/msword",
- ".wav": "audio/wav",
- ".wb1": "application/x-qpro",
- ".wbmp": "image/vnd.wap.wbmp",
- ".web": "application/vndxara",
- ".wiz": "application/msword",
- ".wk1": "application/x-123",
- ".wmf": "windows/metafile",
- ".wml": "text/vnd.wap.wml",
- ".wmlc": "application/vnd.wap.wmlc",
- ".wmls": "text/vnd.wap.wmlscript",
- ".wmlsc": "application/vnd.wap.wmlscriptc",
- ".word": "application/msword",
- ".wp5": "application/wordperfect",
- ".wp6": "application/wordperfect",
- ".wp": "application/wordperfect",
- ".wpd": "application/wordperfect",
- ".wq1": "application/x-lotus",
- ".wri": "application/mswrite",
- ".wrl": "application/x-world",
- ".wrz": "model/vrml",
- ".wsc": "text/scriplet",
- ".wsrc": "application/x-wais-source",
- ".wtk": "application/x-wintalk",
- ".x-png": "image/png",
- ".xbm": "image/x-xbitmap",
- ".xdr": "video/x-amt-demorun",
- ".xgz": "xgl/drawing",
- ".xif": "image/vndxiff",
- ".xl": "application/excel",
- ".xla": "application/excel",
- ".xlb": "application/excel",
- ".xlc": "application/excel",
- ".xld": "application/excel",
- ".xlk": "application/excel",
- ".xll": "application/excel",
- ".xlm": "application/excel",
- ".xls": "application/excel",
- ".xlt": "application/excel",
- ".xlv": "application/excel",
- ".xlw": "application/excel",
- ".xm": "audio/xm",
- ".xml": "text/xml",
- ".xmz": "xgl/movie",
- ".xpix": "application/x-vndls-xpix",
- ".xpm": "image/x-xpixmap",
- ".xsr": "video/x-amt-showrun",
- ".xwd": "image/x-xwd",
- ".xyz": "chemical/x-pdb",
- ".z": "application/x-compress",
- ".zip": "application/zip",
- ".zoo": "application/octet-stream",
- ".zsh": "text/x-scriptzsh",
- ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
- ".docm": "application/vnd.ms-word.document.macroEnabled.12",
- ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template",
- ".dotm": "application/vnd.ms-word.template.macroEnabled.12",
- ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
- ".xlsm": "application/vnd.ms-excel.sheet.macroEnabled.12",
- ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template",
- ".xltm": "application/vnd.ms-excel.template.macroEnabled.12",
- ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12",
- ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12",
- ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
- ".pptm": "application/vnd.ms-powerpoint.presentation.macroEnabled.12",
- ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow",
- ".ppsm": "application/vnd.ms-powerpoint.slideshow.macroEnabled.12",
- ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template",
- ".potm": "application/vnd.ms-powerpoint.template.macroEnabled.12",
- ".ppam": "application/vnd.ms-powerpoint.addin.macroEnabled.12",
- ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide",
- ".sldm": "application/vnd.ms-powerpoint.slide.macroEnabled.12",
- ".thmx": "application/vnd.ms-officetheme",
- ".onetoc": "application/onenote",
- ".onetoc2": "application/onenote",
- ".onetmp": "application/onenote",
- ".onepkg": "application/onenote",
- ".key": "application/x-iwork-keynote-sffkey",
- ".kth": "application/x-iwork-keynote-sffkth",
- ".nmbtemplate": "application/x-iwork-numbers-sfftemplate",
- ".numbers": "application/x-iwork-numbers-sffnumbers",
- ".pages": "application/x-iwork-pages-sffpages",
- ".template": "application/x-iwork-pages-sfftemplate",
- ".xpi": "application/x-xpinstall",
- ".oex": "application/x-opera-extension",
- ".mustache": "text/html",
-}
diff --git a/namespace.go b/namespace.go
deleted file mode 100644
index a6962994..00000000
--- a/namespace.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "net/http"
- "strings"
-
- beecontext "github.com/astaxie/beego/context"
-)
-
-type namespaceCond func(*beecontext.Context) bool
-
-// LinkNamespace used as link action
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type LinkNamespace func(*Namespace)
-
-// Namespace is store all the info
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type Namespace struct {
- prefix string
- handlers *ControllerRegister
-}
-
-// NewNamespace get new Namespace
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NewNamespace(prefix string, params ...LinkNamespace) *Namespace {
- ns := &Namespace{
- prefix: prefix,
- handlers: NewControllerRegister(),
- }
- for _, p := range params {
- p(ns)
- }
- return ns
-}
-
-// Cond set condition function
-// if cond return true can run this namespace, else can't
-// usage:
-// ns.Cond(func (ctx *context.Context) bool{
-// if ctx.Input.Domain() == "api.beego.me" {
-// return true
-// }
-// return false
-// })
-// Cond as the first filter
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Cond(cond namespaceCond) *Namespace {
- fn := func(ctx *beecontext.Context) {
- if !cond(ctx) {
- exception("405", ctx)
- }
- }
- if v := n.handlers.filters[BeforeRouter]; len(v) > 0 {
- mr := new(FilterRouter)
- mr.tree = NewTree()
- mr.pattern = "*"
- mr.filterFunc = fn
- mr.tree.AddRouter("*", true)
- n.handlers.filters[BeforeRouter] = append([]*FilterRouter{mr}, v...)
- } else {
- n.handlers.InsertFilter("*", BeforeRouter, fn)
- }
- return n
-}
-
-// Filter add filter in the Namespace
-// action has before & after
-// FilterFunc
-// usage:
-// Filter("before", func (ctx *context.Context){
-// _, ok := ctx.Input.Session("uid").(int)
-// if !ok && ctx.Request.RequestURI != "/login" {
-// ctx.Redirect(302, "/login")
-// }
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Filter(action string, filter ...FilterFunc) *Namespace {
- var a int
- if action == "before" {
- a = BeforeRouter
- } else if action == "after" {
- a = FinishRouter
- }
- for _, f := range filter {
- n.handlers.InsertFilter("*", a, f)
- }
- return n
-}
-
-// Router same as beego.Rourer
-// refer: https://godoc.org/github.com/astaxie/beego#Router
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Router(rootpath string, c ControllerInterface, mappingMethods ...string) *Namespace {
- n.handlers.Add(rootpath, c, mappingMethods...)
- return n
-}
-
-// AutoRouter same as beego.AutoRouter
-// refer: https://godoc.org/github.com/astaxie/beego#AutoRouter
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) AutoRouter(c ControllerInterface) *Namespace {
- n.handlers.AddAuto(c)
- return n
-}
-
-// AutoPrefix same as beego.AutoPrefix
-// refer: https://godoc.org/github.com/astaxie/beego#AutoPrefix
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) AutoPrefix(prefix string, c ControllerInterface) *Namespace {
- n.handlers.AddAutoPrefix(prefix, c)
- return n
-}
-
-// Get same as beego.Get
-// refer: https://godoc.org/github.com/astaxie/beego#Get
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Get(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Get(rootpath, f)
- return n
-}
-
-// Post same as beego.Post
-// refer: https://godoc.org/github.com/astaxie/beego#Post
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Post(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Post(rootpath, f)
- return n
-}
-
-// Delete same as beego.Delete
-// refer: https://godoc.org/github.com/astaxie/beego#Delete
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Delete(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Delete(rootpath, f)
- return n
-}
-
-// Put same as beego.Put
-// refer: https://godoc.org/github.com/astaxie/beego#Put
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Put(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Put(rootpath, f)
- return n
-}
-
-// Head same as beego.Head
-// refer: https://godoc.org/github.com/astaxie/beego#Head
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Head(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Head(rootpath, f)
- return n
-}
-
-// Options same as beego.Options
-// refer: https://godoc.org/github.com/astaxie/beego#Options
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Options(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Options(rootpath, f)
- return n
-}
-
-// Patch same as beego.Patch
-// refer: https://godoc.org/github.com/astaxie/beego#Patch
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Patch(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Patch(rootpath, f)
- return n
-}
-
-// Any same as beego.Any
-// refer: https://godoc.org/github.com/astaxie/beego#Any
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Any(rootpath string, f FilterFunc) *Namespace {
- n.handlers.Any(rootpath, f)
- return n
-}
-
-// Handler same as beego.Handler
-// refer: https://godoc.org/github.com/astaxie/beego#Handler
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Handler(rootpath string, h http.Handler) *Namespace {
- n.handlers.Handler(rootpath, h)
- return n
-}
-
-// Include add include class
-// refer: https://godoc.org/github.com/astaxie/beego#Include
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Include(cList ...ControllerInterface) *Namespace {
- n.handlers.Include(cList...)
- return n
-}
-
-// Namespace add nest Namespace
-// usage:
-//ns := beego.NewNamespace(“/v1”).
-//Namespace(
-// beego.NewNamespace("/shop").
-// Get("/:id", func(ctx *context.Context) {
-// ctx.Output.Body([]byte("shopinfo"))
-// }),
-// beego.NewNamespace("/order").
-// Get("/:id", func(ctx *context.Context) {
-// ctx.Output.Body([]byte("orderinfo"))
-// }),
-// beego.NewNamespace("/crm").
-// Get("/:id", func(ctx *context.Context) {
-// ctx.Output.Body([]byte("crminfo"))
-// }),
-//)
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (n *Namespace) Namespace(ns ...*Namespace) *Namespace {
- for _, ni := range ns {
- for k, v := range ni.handlers.routers {
- if _, ok := n.handlers.routers[k]; ok {
- addPrefix(v, ni.prefix)
- n.handlers.routers[k].AddTree(ni.prefix, v)
- } else {
- t := NewTree()
- t.AddTree(ni.prefix, v)
- addPrefix(t, ni.prefix)
- n.handlers.routers[k] = t
- }
- }
- if ni.handlers.enableFilter {
- for pos, filterList := range ni.handlers.filters {
- for _, mr := range filterList {
- t := NewTree()
- t.AddTree(ni.prefix, mr.tree)
- mr.tree = t
- n.handlers.insertFilterRouter(pos, mr)
- }
- }
- }
- }
- return n
-}
-
-// AddNamespace register Namespace into beego.Handler
-// support multi Namespace
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func AddNamespace(nl ...*Namespace) {
- for _, n := range nl {
- for k, v := range n.handlers.routers {
- if _, ok := BeeApp.Handlers.routers[k]; ok {
- addPrefix(v, n.prefix)
- BeeApp.Handlers.routers[k].AddTree(n.prefix, v)
- } else {
- t := NewTree()
- t.AddTree(n.prefix, v)
- addPrefix(t, n.prefix)
- BeeApp.Handlers.routers[k] = t
- }
- }
- if n.handlers.enableFilter {
- for pos, filterList := range n.handlers.filters {
- for _, mr := range filterList {
- t := NewTree()
- t.AddTree(n.prefix, mr.tree)
- mr.tree = t
- BeeApp.Handlers.insertFilterRouter(pos, mr)
- }
- }
- }
- }
-}
-
-func addPrefix(t *Tree, prefix string) {
- for _, v := range t.fixrouters {
- addPrefix(v, prefix)
- }
- if t.wildcard != nil {
- addPrefix(t.wildcard, prefix)
- }
- for _, l := range t.leaves {
- if c, ok := l.runObject.(*ControllerInfo); ok {
- if !strings.HasPrefix(c.pattern, prefix) {
- c.pattern = prefix + c.pattern
- }
- }
- }
-}
-
-// NSCond is Namespace Condition
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSCond(cond namespaceCond) LinkNamespace {
- return func(ns *Namespace) {
- ns.Cond(cond)
- }
-}
-
-// NSBefore Namespace BeforeRouter filter
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSBefore(filterList ...FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Filter("before", filterList...)
- }
-}
-
-// NSAfter add Namespace FinishRouter filter
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSAfter(filterList ...FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Filter("after", filterList...)
- }
-}
-
-// NSInclude Namespace Include ControllerInterface
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSInclude(cList ...ControllerInterface) LinkNamespace {
- return func(ns *Namespace) {
- ns.Include(cList...)
- }
-}
-
-// NSRouter call Namespace Router
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSRouter(rootpath string, c ControllerInterface, mappingMethods ...string) LinkNamespace {
- return func(ns *Namespace) {
- ns.Router(rootpath, c, mappingMethods...)
- }
-}
-
-// NSGet call Namespace Get
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSGet(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Get(rootpath, f)
- }
-}
-
-// NSPost call Namespace Post
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSPost(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Post(rootpath, f)
- }
-}
-
-// NSHead call Namespace Head
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSHead(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Head(rootpath, f)
- }
-}
-
-// NSPut call Namespace Put
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSPut(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Put(rootpath, f)
- }
-}
-
-// NSDelete call Namespace Delete
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSDelete(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Delete(rootpath, f)
- }
-}
-
-// NSAny call Namespace Any
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSAny(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Any(rootpath, f)
- }
-}
-
-// NSOptions call Namespace Options
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSOptions(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Options(rootpath, f)
- }
-}
-
-// NSPatch call Namespace Patch
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSPatch(rootpath string, f FilterFunc) LinkNamespace {
- return func(ns *Namespace) {
- ns.Patch(rootpath, f)
- }
-}
-
-// NSAutoRouter call Namespace AutoRouter
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSAutoRouter(c ControllerInterface) LinkNamespace {
- return func(ns *Namespace) {
- ns.AutoRouter(c)
- }
-}
-
-// NSAutoPrefix call Namespace AutoPrefix
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSAutoPrefix(prefix string, c ControllerInterface) LinkNamespace {
- return func(ns *Namespace) {
- ns.AutoPrefix(prefix, c)
- }
-}
-
-// NSNamespace add sub Namespace
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSNamespace(prefix string, params ...LinkNamespace) LinkNamespace {
- return func(ns *Namespace) {
- n := NewNamespace(prefix, params...)
- ns.Namespace(n)
- }
-}
-
-// NSHandler add handler
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NSHandler(rootpath string, h http.Handler) LinkNamespace {
- return func(ns *Namespace) {
- ns.Handler(rootpath, h)
- }
-}
diff --git a/namespace_test.go b/namespace_test.go
deleted file mode 100644
index b3f20dff..00000000
--- a/namespace_test.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "net/http"
- "net/http/httptest"
- "strconv"
- "testing"
-
- "github.com/astaxie/beego/context"
-)
-
-func TestNamespaceGet(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1/user", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.Get("/user", func(ctx *context.Context) {
- ctx.Output.Body([]byte("v1_user"))
- })
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "v1_user" {
- t.Errorf("TestNamespaceGet can't run, get the response is " + w.Body.String())
- }
-}
-
-func TestNamespacePost(t *testing.T) {
- r, _ := http.NewRequest("POST", "/v1/user/123", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.Post("/user/:id", func(ctx *context.Context) {
- ctx.Output.Body([]byte(ctx.Input.Param(":id")))
- })
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "123" {
- t.Errorf("TestNamespacePost can't run, get the response is " + w.Body.String())
- }
-}
-
-func TestNamespaceNest(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1/admin/order", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.Namespace(
- NewNamespace("/admin").
- Get("/order", func(ctx *context.Context) {
- ctx.Output.Body([]byte("order"))
- }),
- )
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "order" {
- t.Errorf("TestNamespaceNest can't run, get the response is " + w.Body.String())
- }
-}
-
-func TestNamespaceNestParam(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1/admin/order/123", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.Namespace(
- NewNamespace("/admin").
- Get("/order/:id", func(ctx *context.Context) {
- ctx.Output.Body([]byte(ctx.Input.Param(":id")))
- }),
- )
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "123" {
- t.Errorf("TestNamespaceNestParam can't run, get the response is " + w.Body.String())
- }
-}
-
-func TestNamespaceRouter(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1/api/list", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.Router("/api/list", &TestController{}, "*:List")
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "i am list" {
- t.Errorf("TestNamespaceRouter can't run, get the response is " + w.Body.String())
- }
-}
-
-func TestNamespaceAutoFunc(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1/test/list", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.AutoRouter(&TestController{})
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "i am list" {
- t.Errorf("user define func can't run")
- }
-}
-
-func TestNamespaceFilter(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v1/user/123", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v1")
- ns.Filter("before", func(ctx *context.Context) {
- ctx.Output.Body([]byte("this is Filter"))
- }).
- Get("/user/:id", func(ctx *context.Context) {
- ctx.Output.Body([]byte(ctx.Input.Param(":id")))
- })
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "this is Filter" {
- t.Errorf("TestNamespaceFilter can't run, get the response is " + w.Body.String())
- }
-}
-
-func TestNamespaceCond(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v2/test/list", nil)
- w := httptest.NewRecorder()
-
- ns := NewNamespace("/v2")
- ns.Cond(func(ctx *context.Context) bool {
- return ctx.Input.Domain() == "beego.me"
- }).
- AutoRouter(&TestController{})
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Code != 405 {
- t.Errorf("TestNamespaceCond can't run get the result " + strconv.Itoa(w.Code))
- }
-}
-
-func TestNamespaceInside(t *testing.T) {
- r, _ := http.NewRequest("GET", "/v3/shop/order/123", nil)
- w := httptest.NewRecorder()
- ns := NewNamespace("/v3",
- NSAutoRouter(&TestController{}),
- NSNamespace("/shop",
- NSGet("/order/:id", func(ctx *context.Context) {
- ctx.Output.Body([]byte(ctx.Input.Param(":id")))
- }),
- ),
- )
- AddNamespace(ns)
- BeeApp.Handlers.ServeHTTP(w, r)
- if w.Body.String() != "123" {
- t.Errorf("TestNamespaceInside can't run, get the response is " + w.Body.String())
- }
-}
diff --git a/orm/README.md b/orm/README.md
deleted file mode 100644
index 6e808d2a..00000000
--- a/orm/README.md
+++ /dev/null
@@ -1,159 +0,0 @@
-# beego orm
-
-[![Build Status](https://drone.io/github.com/astaxie/beego/status.png)](https://drone.io/github.com/astaxie/beego/latest)
-
-A powerful orm framework for go.
-
-It is heavily influenced by Django ORM, SQLAlchemy.
-
-**Support Database:**
-
-* MySQL: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql)
-* PostgreSQL: [github.com/lib/pq](https://github.com/lib/pq)
-* Sqlite3: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3)
-
-Passed all test, but need more feedback.
-
-**Features:**
-
-* full go type support
-* easy for usage, simple CRUD operation
-* auto join with relation table
-* cross DataBase compatible query
-* Raw SQL query / mapper without orm model
-* full test keep stable and strong
-
-more features please read the docs
-
-**Install:**
-
- go get github.com/astaxie/beego/orm
-
-## Changelog
-
-* 2013-08-19: support table auto create
-* 2013-08-13: update test for database types
-* 2013-08-13: go type support, such as int8, uint8, byte, rune
-* 2013-08-13: date / datetime timezone support very well
-
-## Quick Start
-
-#### Simple Usage
-
-```go
-package main
-
-import (
- "fmt"
- "github.com/astaxie/beego/orm"
- _ "github.com/go-sql-driver/mysql" // import your used driver
-)
-
-// Model Struct
-type User struct {
- Id int `orm:"auto"`
- Name string `orm:"size(100)"`
-}
-
-func init() {
- // register model
- orm.RegisterModel(new(User))
-
- // set default database
- orm.RegisterDataBase("default", "mysql", "root:root@/my_db?charset=utf8", 30)
-
- // create table
- orm.RunSyncdb("default", false, true)
-}
-
-func main() {
- o := orm.NewOrm()
-
- user := User{Name: "slene"}
-
- // insert
- id, err := o.Insert(&user)
-
- // update
- user.Name = "astaxie"
- num, err := o.Update(&user)
-
- // read one
- u := User{Id: user.Id}
- err = o.Read(&u)
-
- // delete
- num, err = o.Delete(&u)
-}
-```
-
-#### Next with relation
-
-```go
-type Post struct {
- Id int `orm:"auto"`
- Title string `orm:"size(100)"`
- User *User `orm:"rel(fk)"`
-}
-
-var posts []*Post
-qs := o.QueryTable("post")
-num, err := qs.Filter("User__Name", "slene").All(&posts)
-```
-
-#### Use Raw sql
-
-If you don't like ORM,use Raw SQL to query / mapping without ORM setting
-
-```go
-var maps []Params
-num, err := o.Raw("SELECT id FROM user WHERE name = ?", "slene").Values(&maps)
-if num > 0 {
- fmt.Println(maps[0]["id"])
-}
-```
-
-#### Transaction
-
-```go
-o.Begin()
-...
-user := User{Name: "slene"}
-id, err := o.Insert(&user)
-if err == nil {
- o.Commit()
-} else {
- o.Rollback()
-}
-
-```
-
-#### Debug Log Queries
-
-In development env, you can simple use
-
-```go
-func main() {
- orm.Debug = true
-...
-```
-
-enable log queries.
-
-output include all queries, such as exec / prepare / transaction.
-
-like this:
-
-```go
-[ORM] - 2013-08-09 13:18:16 - [Queries/default] - [ db.Exec / 0.4ms] - [INSERT INTO `user` (`name`) VALUES (?)] - `slene`
-...
-```
-
-note: not recommend use this in product env.
-
-## Docs
-
-more details and examples in docs and test
-
-[documents](http://beego.me/docs/mvc/model/overview.md)
-
diff --git a/orm/cmd.go b/orm/cmd.go
deleted file mode 100644
index 0ff4dc40..00000000
--- a/orm/cmd.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "flag"
- "fmt"
- "os"
- "strings"
-)
-
-type commander interface {
- Parse([]string)
- Run() error
-}
-
-var (
- commands = make(map[string]commander)
-)
-
-// print help.
-func printHelp(errs ...string) {
- content := `orm command usage:
-
- syncdb - auto create tables
- sqlall - print sql of create tables
- help - print this help
-`
-
- if len(errs) > 0 {
- fmt.Println(errs[0])
- }
- fmt.Println(content)
- os.Exit(2)
-}
-
-// RunCommand listen for orm command and then run it if command arguments passed.
-func RunCommand() {
- if len(os.Args) < 2 || os.Args[1] != "orm" {
- return
- }
-
- BootStrap()
-
- args := argString(os.Args[2:])
- name := args.Get(0)
-
- if name == "help" {
- printHelp()
- }
-
- if cmd, ok := commands[name]; ok {
- cmd.Parse(os.Args[3:])
- cmd.Run()
- os.Exit(0)
- } else {
- if name == "" {
- printHelp()
- } else {
- printHelp(fmt.Sprintf("unknown command %s", name))
- }
- }
-}
-
-// sync database struct command interface.
-type commandSyncDb struct {
- al *alias
- force bool
- verbose bool
- noInfo bool
- rtOnError bool
-}
-
-// parse orm command line arguments.
-func (d *commandSyncDb) Parse(args []string) {
- var name string
-
- flagSet := flag.NewFlagSet("orm command: syncdb", flag.ExitOnError)
- flagSet.StringVar(&name, "db", "default", "DataBase alias name")
- flagSet.BoolVar(&d.force, "force", false, "drop tables before create")
- flagSet.BoolVar(&d.verbose, "v", false, "verbose info")
- flagSet.Parse(args)
-
- d.al = getDbAlias(name)
-}
-
-// run orm line command.
-func (d *commandSyncDb) Run() error {
- var drops []string
- if d.force {
- drops = getDbDropSQL(d.al)
- }
-
- db := d.al.DB
-
- if d.force {
- for i, mi := range modelCache.allOrdered() {
- query := drops[i]
- if !d.noInfo {
- fmt.Printf("drop table `%s`\n", mi.table)
- }
- _, err := db.Exec(query)
- if d.verbose {
- fmt.Printf(" %s\n\n", query)
- }
- if err != nil {
- if d.rtOnError {
- return err
- }
- fmt.Printf(" %s\n", err.Error())
- }
- }
- }
-
- sqls, indexes := getDbCreateSQL(d.al)
-
- tables, err := d.al.DbBaser.GetTables(db)
- if err != nil {
- if d.rtOnError {
- return err
- }
- fmt.Printf(" %s\n", err.Error())
- }
-
- for i, mi := range modelCache.allOrdered() {
- if tables[mi.table] {
- if !d.noInfo {
- fmt.Printf("table `%s` already exists, skip\n", mi.table)
- }
-
- var fields []*fieldInfo
- columns, err := d.al.DbBaser.GetColumns(db, mi.table)
- if err != nil {
- if d.rtOnError {
- return err
- }
- fmt.Printf(" %s\n", err.Error())
- }
-
- for _, fi := range mi.fields.fieldsDB {
- if _, ok := columns[fi.column]; !ok {
- fields = append(fields, fi)
- }
- }
-
- for _, fi := range fields {
- query := getColumnAddQuery(d.al, fi)
-
- if !d.noInfo {
- fmt.Printf("add column `%s` for table `%s`\n", fi.fullName, mi.table)
- }
-
- _, err := db.Exec(query)
- if d.verbose {
- fmt.Printf(" %s\n", query)
- }
- if err != nil {
- if d.rtOnError {
- return err
- }
- fmt.Printf(" %s\n", err.Error())
- }
- }
-
- for _, idx := range indexes[mi.table] {
- if !d.al.DbBaser.IndexExists(db, idx.Table, idx.Name) {
- if !d.noInfo {
- fmt.Printf("create index `%s` for table `%s`\n", idx.Name, idx.Table)
- }
-
- query := idx.SQL
- _, err := db.Exec(query)
- if d.verbose {
- fmt.Printf(" %s\n", query)
- }
- if err != nil {
- if d.rtOnError {
- return err
- }
- fmt.Printf(" %s\n", err.Error())
- }
- }
- }
-
- continue
- }
-
- if !d.noInfo {
- fmt.Printf("create table `%s` \n", mi.table)
- }
-
- queries := []string{sqls[i]}
- for _, idx := range indexes[mi.table] {
- queries = append(queries, idx.SQL)
- }
-
- for _, query := range queries {
- _, err := db.Exec(query)
- if d.verbose {
- query = " " + strings.Join(strings.Split(query, "\n"), "\n ")
- fmt.Println(query)
- }
- if err != nil {
- if d.rtOnError {
- return err
- }
- fmt.Printf(" %s\n", err.Error())
- }
- }
- if d.verbose {
- fmt.Println("")
- }
- }
-
- return nil
-}
-
-// database creation commander interface implement.
-type commandSQLAll struct {
- al *alias
-}
-
-// parse orm command line arguments.
-func (d *commandSQLAll) Parse(args []string) {
- var name string
-
- flagSet := flag.NewFlagSet("orm command: sqlall", flag.ExitOnError)
- flagSet.StringVar(&name, "db", "default", "DataBase alias name")
- flagSet.Parse(args)
-
- d.al = getDbAlias(name)
-}
-
-// run orm line command.
-func (d *commandSQLAll) Run() error {
- sqls, indexes := getDbCreateSQL(d.al)
- var all []string
- for i, mi := range modelCache.allOrdered() {
- queries := []string{sqls[i]}
- for _, idx := range indexes[mi.table] {
- queries = append(queries, idx.SQL)
- }
- sql := strings.Join(queries, "\n")
- all = append(all, sql)
- }
- fmt.Println(strings.Join(all, "\n\n"))
-
- return nil
-}
-
-func init() {
- commands["syncdb"] = new(commandSyncDb)
- commands["sqlall"] = new(commandSQLAll)
-}
-
-// RunSyncdb run syncdb command line.
-// name means table's alias name. default is "default".
-// force means run next sql if the current is error.
-// verbose means show all info when running command or not.
-func RunSyncdb(name string, force bool, verbose bool) error {
- BootStrap()
-
- al := getDbAlias(name)
- cmd := new(commandSyncDb)
- cmd.al = al
- cmd.force = force
- cmd.noInfo = !verbose
- cmd.verbose = verbose
- cmd.rtOnError = true
- return cmd.Run()
-}
diff --git a/orm/cmd_utils.go b/orm/cmd_utils.go
deleted file mode 100644
index 692a079f..00000000
--- a/orm/cmd_utils.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "os"
- "strings"
-)
-
-type dbIndex struct {
- Table string
- Name string
- SQL string
-}
-
-// create database drop sql.
-func getDbDropSQL(al *alias) (sqls []string) {
- if len(modelCache.cache) == 0 {
- fmt.Println("no Model found, need register your model")
- os.Exit(2)
- }
-
- Q := al.DbBaser.TableQuote()
-
- for _, mi := range modelCache.allOrdered() {
- sqls = append(sqls, fmt.Sprintf(`DROP TABLE IF EXISTS %s%s%s`, Q, mi.table, Q))
- }
- return sqls
-}
-
-// get database column type string.
-func getColumnTyp(al *alias, fi *fieldInfo) (col string) {
- T := al.DbBaser.DbTypes()
- fieldType := fi.fieldType
- fieldSize := fi.size
-
-checkColumn:
- switch fieldType {
- case TypeBooleanField:
- col = T["bool"]
- case TypeVarCharField:
- if al.Driver == DRPostgres && fi.toText {
- col = T["string-text"]
- } else {
- col = fmt.Sprintf(T["string"], fieldSize)
- }
- case TypeCharField:
- col = fmt.Sprintf(T["string-char"], fieldSize)
- case TypeTextField:
- col = T["string-text"]
- case TypeTimeField:
- col = T["time.Time-clock"]
- case TypeDateField:
- col = T["time.Time-date"]
- case TypeDateTimeField:
- col = T["time.Time"]
- case TypeBitField:
- col = T["int8"]
- case TypeSmallIntegerField:
- col = T["int16"]
- case TypeIntegerField:
- col = T["int32"]
- case TypeBigIntegerField:
- if al.Driver == DRSqlite {
- fieldType = TypeIntegerField
- goto checkColumn
- }
- col = T["int64"]
- case TypePositiveBitField:
- col = T["uint8"]
- case TypePositiveSmallIntegerField:
- col = T["uint16"]
- case TypePositiveIntegerField:
- col = T["uint32"]
- case TypePositiveBigIntegerField:
- col = T["uint64"]
- case TypeFloatField:
- col = T["float64"]
- case TypeDecimalField:
- s := T["float64-decimal"]
- if !strings.Contains(s, "%d") {
- col = s
- } else {
- col = fmt.Sprintf(s, fi.digits, fi.decimals)
- }
- case TypeJSONField:
- if al.Driver != DRPostgres {
- fieldType = TypeVarCharField
- goto checkColumn
- }
- col = T["json"]
- case TypeJsonbField:
- if al.Driver != DRPostgres {
- fieldType = TypeVarCharField
- goto checkColumn
- }
- col = T["jsonb"]
- case RelForeignKey, RelOneToOne:
- fieldType = fi.relModelInfo.fields.pk.fieldType
- fieldSize = fi.relModelInfo.fields.pk.size
- goto checkColumn
- }
-
- return
-}
-
-// create alter sql string.
-func getColumnAddQuery(al *alias, fi *fieldInfo) string {
- Q := al.DbBaser.TableQuote()
- typ := getColumnTyp(al, fi)
-
- if !fi.null {
- typ += " " + "NOT NULL"
- }
-
- return fmt.Sprintf("ALTER TABLE %s%s%s ADD COLUMN %s%s%s %s %s",
- Q, fi.mi.table, Q,
- Q, fi.column, Q,
- typ, getColumnDefault(fi),
- )
-}
-
-// create database creation string.
-func getDbCreateSQL(al *alias) (sqls []string, tableIndexes map[string][]dbIndex) {
- if len(modelCache.cache) == 0 {
- fmt.Println("no Model found, need register your model")
- os.Exit(2)
- }
-
- Q := al.DbBaser.TableQuote()
- T := al.DbBaser.DbTypes()
- sep := fmt.Sprintf("%s, %s", Q, Q)
-
- tableIndexes = make(map[string][]dbIndex)
-
- for _, mi := range modelCache.allOrdered() {
- sql := fmt.Sprintf("-- %s\n", strings.Repeat("-", 50))
- sql += fmt.Sprintf("-- Table Structure for `%s`\n", mi.fullName)
- sql += fmt.Sprintf("-- %s\n", strings.Repeat("-", 50))
-
- sql += fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s%s%s (\n", Q, mi.table, Q)
-
- columns := make([]string, 0, len(mi.fields.fieldsDB))
-
- sqlIndexes := [][]string{}
-
- for _, fi := range mi.fields.fieldsDB {
-
- column := fmt.Sprintf(" %s%s%s ", Q, fi.column, Q)
- col := getColumnTyp(al, fi)
-
- if fi.auto {
- switch al.Driver {
- case DRSqlite, DRPostgres:
- column += T["auto"]
- default:
- column += col + " " + T["auto"]
- }
- } else if fi.pk {
- column += col + " " + T["pk"]
- } else {
- column += col
-
- if !fi.null {
- column += " " + "NOT NULL"
- }
-
- //if fi.initial.String() != "" {
- // column += " DEFAULT " + fi.initial.String()
- //}
-
- // Append attribute DEFAULT
- column += getColumnDefault(fi)
-
- if fi.unique {
- column += " " + "UNIQUE"
- }
-
- if fi.index {
- sqlIndexes = append(sqlIndexes, []string{fi.column})
- }
- }
-
- if strings.Contains(column, "%COL%") {
- column = strings.Replace(column, "%COL%", fi.column, -1)
- }
-
- if fi.description != "" && al.Driver != DRSqlite {
- column += " " + fmt.Sprintf("COMMENT '%s'", fi.description)
- }
-
- columns = append(columns, column)
- }
-
- if mi.model != nil {
- allnames := getTableUnique(mi.addrField)
- if !mi.manual && len(mi.uniques) > 0 {
- allnames = append(allnames, mi.uniques)
- }
- for _, names := range allnames {
- cols := make([]string, 0, len(names))
- for _, name := range names {
- if fi, ok := mi.fields.GetByAny(name); ok && fi.dbcol {
- cols = append(cols, fi.column)
- } else {
- panic(fmt.Errorf("cannot found column `%s` when parse UNIQUE in `%s.TableUnique`", name, mi.fullName))
- }
- }
- column := fmt.Sprintf(" UNIQUE (%s%s%s)", Q, strings.Join(cols, sep), Q)
- columns = append(columns, column)
- }
- }
-
- sql += strings.Join(columns, ",\n")
- sql += "\n)"
-
- if al.Driver == DRMySQL {
- var engine string
- if mi.model != nil {
- engine = getTableEngine(mi.addrField)
- }
- if engine == "" {
- engine = al.Engine
- }
- sql += " ENGINE=" + engine
- }
-
- sql += ";"
- sqls = append(sqls, sql)
-
- if mi.model != nil {
- for _, names := range getTableIndex(mi.addrField) {
- cols := make([]string, 0, len(names))
- for _, name := range names {
- if fi, ok := mi.fields.GetByAny(name); ok && fi.dbcol {
- cols = append(cols, fi.column)
- } else {
- panic(fmt.Errorf("cannot found column `%s` when parse INDEX in `%s.TableIndex`", name, mi.fullName))
- }
- }
- sqlIndexes = append(sqlIndexes, cols)
- }
- }
-
- for _, names := range sqlIndexes {
- name := mi.table + "_" + strings.Join(names, "_")
- cols := strings.Join(names, sep)
- sql := fmt.Sprintf("CREATE INDEX %s%s%s ON %s%s%s (%s%s%s);", Q, name, Q, Q, mi.table, Q, Q, cols, Q)
-
- index := dbIndex{}
- index.Table = mi.table
- index.Name = name
- index.SQL = sql
-
- tableIndexes[mi.table] = append(tableIndexes[mi.table], index)
- }
-
- }
-
- return
-}
-
-// Get string value for the attribute "DEFAULT" for the CREATE, ALTER commands
-func getColumnDefault(fi *fieldInfo) string {
- var (
- v, t, d string
- )
-
- // Skip default attribute if field is in relations
- if fi.rel || fi.reverse {
- return v
- }
-
- t = " DEFAULT '%s' "
-
- // These defaults will be useful if there no config value orm:"default" and NOT NULL is on
- switch fi.fieldType {
- case TypeTimeField, TypeDateField, TypeDateTimeField, TypeTextField:
- return v
-
- case TypeBitField, TypeSmallIntegerField, TypeIntegerField,
- TypeBigIntegerField, TypePositiveBitField, TypePositiveSmallIntegerField,
- TypePositiveIntegerField, TypePositiveBigIntegerField, TypeFloatField,
- TypeDecimalField:
- t = " DEFAULT %s "
- d = "0"
- case TypeBooleanField:
- t = " DEFAULT %s "
- d = "FALSE"
- case TypeJSONField, TypeJsonbField:
- d = "{}"
- }
-
- if fi.colDefault {
- if !fi.initial.Exist() {
- v = fmt.Sprintf(t, "")
- } else {
- v = fmt.Sprintf(t, fi.initial.String())
- }
- } else {
- if !fi.null {
- v = fmt.Sprintf(t, d)
- }
- }
-
- return v
-}
diff --git a/orm/db.go b/orm/db.go
deleted file mode 100644
index 5d175bf1..00000000
--- a/orm/db.go
+++ /dev/null
@@ -1,1908 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "database/sql"
- "errors"
- "fmt"
- "reflect"
- "strings"
- "time"
-)
-
-const (
- formatTime = "15:04:05"
- formatDate = "2006-01-02"
- formatDateTime = "2006-01-02 15:04:05"
-)
-
-var (
- // ErrMissPK missing pk error
- ErrMissPK = errors.New("missed pk value")
-)
-
-var (
- operators = map[string]bool{
- "exact": true,
- "iexact": true,
- "contains": true,
- "icontains": true,
- // "regex": true,
- // "iregex": true,
- "gt": true,
- "gte": true,
- "lt": true,
- "lte": true,
- "eq": true,
- "nq": true,
- "ne": true,
- ">": true,
- ">=": true,
- "<": true,
- "<=": true,
- "=": true,
- "!=": true,
- "startswith": true,
- "endswith": true,
- "istartswith": true,
- "iendswith": true,
- "in": true,
- "between": true,
- // "year": true,
- // "month": true,
- // "day": true,
- // "week_day": true,
- "isnull": true,
- // "search": true,
- }
-)
-
-// an instance of dbBaser interface/
-type dbBase struct {
- ins dbBaser
-}
-
-// check dbBase implements dbBaser interface.
-var _ dbBaser = new(dbBase)
-
-// get struct columns values as interface slice.
-func (d *dbBase) collectValues(mi *modelInfo, ind reflect.Value, cols []string, skipAuto bool, insert bool, names *[]string, tz *time.Location) (values []interface{}, autoFields []string, err error) {
- if names == nil {
- ns := make([]string, 0, len(cols))
- names = &ns
- }
- values = make([]interface{}, 0, len(cols))
-
- for _, column := range cols {
- var fi *fieldInfo
- if fi, _ = mi.fields.GetByAny(column); fi != nil {
- column = fi.column
- } else {
- panic(fmt.Errorf("wrong db field/column name `%s` for model `%s`", column, mi.fullName))
- }
- if !fi.dbcol || fi.auto && skipAuto {
- continue
- }
- value, err := d.collectFieldValue(mi, fi, ind, insert, tz)
- if err != nil {
- return nil, nil, err
- }
-
- // ignore empty value auto field
- if insert && fi.auto {
- if fi.fieldType&IsPositiveIntegerField > 0 {
- if vu, ok := value.(uint64); !ok || vu == 0 {
- continue
- }
- } else {
- if vu, ok := value.(int64); !ok || vu == 0 {
- continue
- }
- }
- autoFields = append(autoFields, fi.column)
- }
-
- *names, values = append(*names, column), append(values, value)
- }
-
- return
-}
-
-// get one field value in struct column as interface.
-func (d *dbBase) collectFieldValue(mi *modelInfo, fi *fieldInfo, ind reflect.Value, insert bool, tz *time.Location) (interface{}, error) {
- var value interface{}
- if fi.pk {
- _, value, _ = getExistPk(mi, ind)
- } else {
- field := ind.FieldByIndex(fi.fieldIndex)
- if fi.isFielder {
- f := field.Addr().Interface().(Fielder)
- value = f.RawValue()
- } else {
- switch fi.fieldType {
- case TypeBooleanField:
- if nb, ok := field.Interface().(sql.NullBool); ok {
- value = nil
- if nb.Valid {
- value = nb.Bool
- }
- } else if field.Kind() == reflect.Ptr {
- if field.IsNil() {
- value = nil
- } else {
- value = field.Elem().Bool()
- }
- } else {
- value = field.Bool()
- }
- case TypeVarCharField, TypeCharField, TypeTextField, TypeJSONField, TypeJsonbField:
- if ns, ok := field.Interface().(sql.NullString); ok {
- value = nil
- if ns.Valid {
- value = ns.String
- }
- } else if field.Kind() == reflect.Ptr {
- if field.IsNil() {
- value = nil
- } else {
- value = field.Elem().String()
- }
- } else {
- value = field.String()
- }
- case TypeFloatField, TypeDecimalField:
- if nf, ok := field.Interface().(sql.NullFloat64); ok {
- value = nil
- if nf.Valid {
- value = nf.Float64
- }
- } else if field.Kind() == reflect.Ptr {
- if field.IsNil() {
- value = nil
- } else {
- value = field.Elem().Float()
- }
- } else {
- vu := field.Interface()
- if _, ok := vu.(float32); ok {
- value, _ = StrTo(ToStr(vu)).Float64()
- } else {
- value = field.Float()
- }
- }
- case TypeTimeField, TypeDateField, TypeDateTimeField:
- value = field.Interface()
- if t, ok := value.(time.Time); ok {
- d.ins.TimeToDB(&t, tz)
- if t.IsZero() {
- value = nil
- } else {
- value = t
- }
- }
- default:
- switch {
- case fi.fieldType&IsPositiveIntegerField > 0:
- if field.Kind() == reflect.Ptr {
- if field.IsNil() {
- value = nil
- } else {
- value = field.Elem().Uint()
- }
- } else {
- value = field.Uint()
- }
- case fi.fieldType&IsIntegerField > 0:
- if ni, ok := field.Interface().(sql.NullInt64); ok {
- value = nil
- if ni.Valid {
- value = ni.Int64
- }
- } else if field.Kind() == reflect.Ptr {
- if field.IsNil() {
- value = nil
- } else {
- value = field.Elem().Int()
- }
- } else {
- value = field.Int()
- }
- case fi.fieldType&IsRelField > 0:
- if field.IsNil() {
- value = nil
- } else {
- if _, vu, ok := getExistPk(fi.relModelInfo, reflect.Indirect(field)); ok {
- value = vu
- } else {
- value = nil
- }
- }
- if !fi.null && value == nil {
- return nil, fmt.Errorf("field `%s` cannot be NULL", fi.fullName)
- }
- }
- }
- }
- switch fi.fieldType {
- case TypeTimeField, TypeDateField, TypeDateTimeField:
- if fi.autoNow || fi.autoNowAdd && insert {
- if insert {
- if t, ok := value.(time.Time); ok && !t.IsZero() {
- break
- }
- }
- tnow := time.Now()
- d.ins.TimeToDB(&tnow, tz)
- value = tnow
- if fi.isFielder {
- f := field.Addr().Interface().(Fielder)
- f.SetRaw(tnow.In(DefaultTimeLoc))
- } else if field.Kind() == reflect.Ptr {
- v := tnow.In(DefaultTimeLoc)
- field.Set(reflect.ValueOf(&v))
- } else {
- field.Set(reflect.ValueOf(tnow.In(DefaultTimeLoc)))
- }
- }
- case TypeJSONField, TypeJsonbField:
- if s, ok := value.(string); (ok && len(s) == 0) || value == nil {
- if fi.colDefault && fi.initial.Exist() {
- value = fi.initial.String()
- } else {
- value = nil
- }
- }
- }
- }
- return value, nil
-}
-
-// create insert sql preparation statement object.
-func (d *dbBase) PrepareInsert(q dbQuerier, mi *modelInfo) (stmtQuerier, string, error) {
- Q := d.ins.TableQuote()
-
- dbcols := make([]string, 0, len(mi.fields.dbcols))
- marks := make([]string, 0, len(mi.fields.dbcols))
- for _, fi := range mi.fields.fieldsDB {
- if !fi.auto {
- dbcols = append(dbcols, fi.column)
- marks = append(marks, "?")
- }
- }
- qmarks := strings.Join(marks, ", ")
- sep := fmt.Sprintf("%s, %s", Q, Q)
- columns := strings.Join(dbcols, sep)
-
- query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks)
-
- d.ins.ReplaceMarks(&query)
-
- d.ins.HasReturningID(mi, &query)
-
- stmt, err := q.Prepare(query)
- return stmt, query, err
-}
-
-// insert struct with prepared statement and given struct reflect value.
-func (d *dbBase) InsertStmt(stmt stmtQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location) (int64, error) {
- values, _, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, nil, tz)
- if err != nil {
- return 0, err
- }
-
- if d.ins.HasReturningID(mi, nil) {
- row := stmt.QueryRow(values...)
- var id int64
- err := row.Scan(&id)
- return id, err
- }
- res, err := stmt.Exec(values...)
- if err == nil {
- return res.LastInsertId()
- }
- return 0, err
-}
-
-// query sql ,read records and persist in dbBaser.
-func (d *dbBase) Read(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string, isForUpdate bool) error {
- var whereCols []string
- var args []interface{}
-
- // if specify cols length > 0, then use it for where condition.
- if len(cols) > 0 {
- var err error
- whereCols = make([]string, 0, len(cols))
- args, _, err = d.collectValues(mi, ind, cols, false, false, &whereCols, tz)
- if err != nil {
- return err
- }
- } else {
- // default use pk value as where condtion.
- pkColumn, pkValue, ok := getExistPk(mi, ind)
- if !ok {
- return ErrMissPK
- }
- whereCols = []string{pkColumn}
- args = append(args, pkValue)
- }
-
- Q := d.ins.TableQuote()
-
- sep := fmt.Sprintf("%s, %s", Q, Q)
- sels := strings.Join(mi.fields.dbcols, sep)
- colsNum := len(mi.fields.dbcols)
-
- sep = fmt.Sprintf("%s = ? AND %s", Q, Q)
- wheres := strings.Join(whereCols, sep)
-
- forUpdate := ""
- if isForUpdate {
- forUpdate = "FOR UPDATE"
- }
-
- query := fmt.Sprintf("SELECT %s%s%s FROM %s%s%s WHERE %s%s%s = ? %s", Q, sels, Q, Q, mi.table, Q, Q, wheres, Q, forUpdate)
-
- refs := make([]interface{}, colsNum)
- for i := range refs {
- var ref interface{}
- refs[i] = &ref
- }
-
- d.ins.ReplaceMarks(&query)
-
- row := q.QueryRow(query, args...)
- if err := row.Scan(refs...); err != nil {
- if err == sql.ErrNoRows {
- return ErrNoRows
- }
- return err
- }
- elm := reflect.New(mi.addrField.Elem().Type())
- mind := reflect.Indirect(elm)
- d.setColsValues(mi, &mind, mi.fields.dbcols, refs, tz)
- ind.Set(mind)
- return nil
-}
-
-// execute insert sql dbQuerier with given struct reflect.Value.
-func (d *dbBase) Insert(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location) (int64, error) {
- names := make([]string, 0, len(mi.fields.dbcols))
- values, autoFields, err := d.collectValues(mi, ind, mi.fields.dbcols, false, true, &names, tz)
- if err != nil {
- return 0, err
- }
-
- id, err := d.InsertValue(q, mi, false, names, values)
- if err != nil {
- return 0, err
- }
-
- if len(autoFields) > 0 {
- err = d.ins.setval(q, mi, autoFields)
- }
- return id, err
-}
-
-// multi-insert sql with given slice struct reflect.Value.
-func (d *dbBase) InsertMulti(q dbQuerier, mi *modelInfo, sind reflect.Value, bulk int, tz *time.Location) (int64, error) {
- var (
- cnt int64
- nums int
- values []interface{}
- names []string
- )
-
- // typ := reflect.Indirect(mi.addrField).Type()
-
- length, autoFields := sind.Len(), make([]string, 0, 1)
-
- for i := 1; i <= length; i++ {
-
- ind := reflect.Indirect(sind.Index(i - 1))
-
- // Is this needed ?
- // if !ind.Type().AssignableTo(typ) {
- // return cnt, ErrArgs
- // }
-
- if i == 1 {
- var (
- vus []interface{}
- err error
- )
- vus, autoFields, err = d.collectValues(mi, ind, mi.fields.dbcols, false, true, &names, tz)
- if err != nil {
- return cnt, err
- }
- values = make([]interface{}, bulk*len(vus))
- nums += copy(values, vus)
- } else {
- vus, _, err := d.collectValues(mi, ind, mi.fields.dbcols, false, true, nil, tz)
- if err != nil {
- return cnt, err
- }
-
- if len(vus) != len(names) {
- return cnt, ErrArgs
- }
-
- nums += copy(values[nums:], vus)
- }
-
- if i > 1 && i%bulk == 0 || length == i {
- num, err := d.InsertValue(q, mi, true, names, values[:nums])
- if err != nil {
- return cnt, err
- }
- cnt += num
- nums = 0
- }
- }
-
- var err error
- if len(autoFields) > 0 {
- err = d.ins.setval(q, mi, autoFields)
- }
-
- return cnt, err
-}
-
-// execute insert sql with given struct and given values.
-// insert the given values, not the field values in struct.
-func (d *dbBase) InsertValue(q dbQuerier, mi *modelInfo, isMulti bool, names []string, values []interface{}) (int64, error) {
- Q := d.ins.TableQuote()
-
- marks := make([]string, len(names))
- for i := range marks {
- marks[i] = "?"
- }
-
- sep := fmt.Sprintf("%s, %s", Q, Q)
- qmarks := strings.Join(marks, ", ")
- columns := strings.Join(names, sep)
-
- multi := len(values) / len(names)
-
- if isMulti && multi > 1 {
- qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
- }
-
- query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks)
-
- d.ins.ReplaceMarks(&query)
-
- if isMulti || !d.ins.HasReturningID(mi, &query) {
- res, err := q.Exec(query, values...)
- if err == nil {
- if isMulti {
- return res.RowsAffected()
- }
- return res.LastInsertId()
- }
- return 0, err
- }
- row := q.QueryRow(query, values...)
- var id int64
- err := row.Scan(&id)
- return id, err
-}
-
-// InsertOrUpdate a row
-// If your primary key or unique column conflict will update
-// If no will insert
-func (d *dbBase) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a *alias, args ...string) (int64, error) {
- args0 := ""
- iouStr := ""
- argsMap := map[string]string{}
- switch a.Driver {
- case DRMySQL:
- iouStr = "ON DUPLICATE KEY UPDATE"
- case DRPostgres:
- if len(args) == 0 {
- return 0, fmt.Errorf("`%s` use InsertOrUpdate must have a conflict column", a.DriverName)
- }
- args0 = strings.ToLower(args[0])
- iouStr = fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET", args0)
- default:
- return 0, fmt.Errorf("`%s` nonsupport InsertOrUpdate in beego", a.DriverName)
- }
-
- //Get on the key-value pairs
- for _, v := range args {
- kv := strings.Split(v, "=")
- if len(kv) == 2 {
- argsMap[strings.ToLower(kv[0])] = kv[1]
- }
- }
-
- isMulti := false
- names := make([]string, 0, len(mi.fields.dbcols)-1)
- Q := d.ins.TableQuote()
- values, _, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, &names, a.TZ)
-
- if err != nil {
- return 0, err
- }
-
- marks := make([]string, len(names))
- updateValues := make([]interface{}, 0)
- updates := make([]string, len(names))
- var conflitValue interface{}
- for i, v := range names {
- // identifier in database may not be case-sensitive, so quote it
- v = fmt.Sprintf("%s%s%s", Q, v, Q)
- marks[i] = "?"
- valueStr := argsMap[strings.ToLower(v)]
- if v == args0 {
- conflitValue = values[i]
- }
- if valueStr != "" {
- switch a.Driver {
- case DRMySQL:
- updates[i] = v + "=" + valueStr
- case DRPostgres:
- if conflitValue != nil {
- //postgres ON CONFLICT DO UPDATE SET can`t use colu=colu+values
- updates[i] = fmt.Sprintf("%s=(select %s from %s where %s = ? )", v, valueStr, mi.table, args0)
- updateValues = append(updateValues, conflitValue)
- } else {
- return 0, fmt.Errorf("`%s` must be in front of `%s` in your struct", args0, v)
- }
- }
- } else {
- updates[i] = v + "=?"
- updateValues = append(updateValues, values[i])
- }
- }
-
- values = append(values, updateValues...)
-
- sep := fmt.Sprintf("%s, %s", Q, Q)
- qmarks := strings.Join(marks, ", ")
- qupdates := strings.Join(updates, ", ")
- columns := strings.Join(names, sep)
-
- multi := len(values) / len(names)
-
- if isMulti {
- qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
- }
- //conflitValue maybe is a int,can`t use fmt.Sprintf
- query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s) %s "+qupdates, Q, mi.table, Q, Q, columns, Q, qmarks, iouStr)
-
- d.ins.ReplaceMarks(&query)
-
- if isMulti || !d.ins.HasReturningID(mi, &query) {
- res, err := q.Exec(query, values...)
- if err == nil {
- if isMulti {
- return res.RowsAffected()
- }
- return res.LastInsertId()
- }
- return 0, err
- }
-
- row := q.QueryRow(query, values...)
- var id int64
- err = row.Scan(&id)
- if err != nil && err.Error() == `pq: syntax error at or near "ON"` {
- err = fmt.Errorf("postgres version must 9.5 or higher")
- }
- return id, err
-}
-
-// execute update sql dbQuerier with given struct reflect.Value.
-func (d *dbBase) Update(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string) (int64, error) {
- pkName, pkValue, ok := getExistPk(mi, ind)
- if !ok {
- return 0, ErrMissPK
- }
-
- var setNames []string
-
- // if specify cols length is zero, then commit all columns.
- if len(cols) == 0 {
- cols = mi.fields.dbcols
- setNames = make([]string, 0, len(mi.fields.dbcols)-1)
- } else {
- setNames = make([]string, 0, len(cols))
- }
-
- setValues, _, err := d.collectValues(mi, ind, cols, true, false, &setNames, tz)
- if err != nil {
- return 0, err
- }
-
- var findAutoNowAdd, findAutoNow bool
- var index int
- for i, col := range setNames {
- if mi.fields.GetByColumn(col).autoNowAdd {
- index = i
- findAutoNowAdd = true
- }
- if mi.fields.GetByColumn(col).autoNow {
- findAutoNow = true
- }
- }
- if findAutoNowAdd {
- setNames = append(setNames[0:index], setNames[index+1:]...)
- setValues = append(setValues[0:index], setValues[index+1:]...)
- }
-
- if !findAutoNow {
- for col, info := range mi.fields.columns {
- if info.autoNow {
- setNames = append(setNames, col)
- setValues = append(setValues, time.Now())
- }
- }
- }
-
- setValues = append(setValues, pkValue)
-
- Q := d.ins.TableQuote()
-
- sep := fmt.Sprintf("%s = ?, %s", Q, Q)
- setColumns := strings.Join(setNames, sep)
-
- query := fmt.Sprintf("UPDATE %s%s%s SET %s%s%s = ? WHERE %s%s%s = ?", Q, mi.table, Q, Q, setColumns, Q, Q, pkName, Q)
-
- d.ins.ReplaceMarks(&query)
-
- res, err := q.Exec(query, setValues...)
- if err == nil {
- return res.RowsAffected()
- }
- return 0, err
-}
-
-// execute delete sql dbQuerier with given struct reflect.Value.
-// delete index is pk.
-func (d *dbBase) Delete(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string) (int64, error) {
- var whereCols []string
- var args []interface{}
- // if specify cols length > 0, then use it for where condition.
- if len(cols) > 0 {
- var err error
- whereCols = make([]string, 0, len(cols))
- args, _, err = d.collectValues(mi, ind, cols, false, false, &whereCols, tz)
- if err != nil {
- return 0, err
- }
- } else {
- // default use pk value as where condtion.
- pkColumn, pkValue, ok := getExistPk(mi, ind)
- if !ok {
- return 0, ErrMissPK
- }
- whereCols = []string{pkColumn}
- args = append(args, pkValue)
- }
-
- Q := d.ins.TableQuote()
-
- sep := fmt.Sprintf("%s = ? AND %s", Q, Q)
- wheres := strings.Join(whereCols, sep)
-
- query := fmt.Sprintf("DELETE FROM %s%s%s WHERE %s%s%s = ?", Q, mi.table, Q, Q, wheres, Q)
-
- d.ins.ReplaceMarks(&query)
- res, err := q.Exec(query, args...)
- if err == nil {
- num, err := res.RowsAffected()
- if err != nil {
- return 0, err
- }
- if num > 0 {
- if mi.fields.pk.auto {
- if mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
- ind.FieldByIndex(mi.fields.pk.fieldIndex).SetUint(0)
- } else {
- ind.FieldByIndex(mi.fields.pk.fieldIndex).SetInt(0)
- }
- }
- err := d.deleteRels(q, mi, args, tz)
- if err != nil {
- return num, err
- }
- }
- return num, err
- }
- return 0, err
-}
-
-// update table-related record by querySet.
-// need querySet not struct reflect.Value to update related records.
-func (d *dbBase) UpdateBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, params Params, tz *time.Location) (int64, error) {
- columns := make([]string, 0, len(params))
- values := make([]interface{}, 0, len(params))
- for col, val := range params {
- if fi, ok := mi.fields.GetByAny(col); !ok || !fi.dbcol {
- panic(fmt.Errorf("wrong field/column name `%s`", col))
- } else {
- columns = append(columns, fi.column)
- values = append(values, val)
- }
- }
-
- if len(columns) == 0 {
- panic(fmt.Errorf("update params cannot empty"))
- }
-
- tables := newDbTables(mi, d.ins)
- if qs != nil {
- tables.parseRelated(qs.related, qs.relDepth)
- }
-
- where, args := tables.getCondSQL(cond, false, tz)
-
- values = append(values, args...)
-
- join := tables.getJoinSQL()
-
- var query, T string
-
- Q := d.ins.TableQuote()
-
- if d.ins.SupportUpdateJoin() {
- T = "T0."
- }
-
- cols := make([]string, 0, len(columns))
-
- for i, v := range columns {
- col := fmt.Sprintf("%s%s%s%s", T, Q, v, Q)
- if c, ok := values[i].(colValue); ok {
- switch c.opt {
- case ColAdd:
- cols = append(cols, col+" = "+col+" + ?")
- case ColMinus:
- cols = append(cols, col+" = "+col+" - ?")
- case ColMultiply:
- cols = append(cols, col+" = "+col+" * ?")
- case ColExcept:
- cols = append(cols, col+" = "+col+" / ?")
- case ColBitAnd:
- cols = append(cols, col+" = "+col+" & ?")
- case ColBitRShift:
- cols = append(cols, col+" = "+col+" >> ?")
- case ColBitLShift:
- cols = append(cols, col+" = "+col+" << ?")
- case ColBitXOR:
- cols = append(cols, col+" = "+col+" ^ ?")
- case ColBitOr:
- cols = append(cols, col+" = "+col+" | ?")
- }
- values[i] = c.value
- } else {
- cols = append(cols, col+" = ?")
- }
- }
-
- sets := strings.Join(cols, ", ") + " "
-
- if d.ins.SupportUpdateJoin() {
- query = fmt.Sprintf("UPDATE %s%s%s T0 %sSET %s%s", Q, mi.table, Q, join, sets, where)
- } else {
- supQuery := fmt.Sprintf("SELECT T0.%s%s%s FROM %s%s%s T0 %s%s", Q, mi.fields.pk.column, Q, Q, mi.table, Q, join, where)
- query = fmt.Sprintf("UPDATE %s%s%s SET %sWHERE %s%s%s IN ( %s )", Q, mi.table, Q, sets, Q, mi.fields.pk.column, Q, supQuery)
- }
-
- d.ins.ReplaceMarks(&query)
- var err error
- var res sql.Result
- if qs != nil && qs.forContext {
- res, err = q.ExecContext(qs.ctx, query, values...)
- } else {
- res, err = q.Exec(query, values...)
- }
- if err == nil {
- return res.RowsAffected()
- }
- return 0, err
-}
-
-// delete related records.
-// do UpdateBanch or DeleteBanch by condition of tables' relationship.
-func (d *dbBase) deleteRels(q dbQuerier, mi *modelInfo, args []interface{}, tz *time.Location) error {
- for _, fi := range mi.fields.fieldsReverse {
- fi = fi.reverseFieldInfo
- switch fi.onDelete {
- case odCascade:
- cond := NewCondition().And(fmt.Sprintf("%s__in", fi.name), args...)
- _, err := d.DeleteBatch(q, nil, fi.mi, cond, tz)
- if err != nil {
- return err
- }
- case odSetDefault, odSetNULL:
- cond := NewCondition().And(fmt.Sprintf("%s__in", fi.name), args...)
- params := Params{fi.column: nil}
- if fi.onDelete == odSetDefault {
- params[fi.column] = fi.initial.String()
- }
- _, err := d.UpdateBatch(q, nil, fi.mi, cond, params, tz)
- if err != nil {
- return err
- }
- case odDoNothing:
- }
- }
- return nil
-}
-
-// delete table-related records.
-func (d *dbBase) DeleteBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, tz *time.Location) (int64, error) {
- tables := newDbTables(mi, d.ins)
- tables.skipEnd = true
-
- if qs != nil {
- tables.parseRelated(qs.related, qs.relDepth)
- }
-
- if cond == nil || cond.IsEmpty() {
- panic(fmt.Errorf("delete operation cannot execute without condition"))
- }
-
- Q := d.ins.TableQuote()
-
- where, args := tables.getCondSQL(cond, false, tz)
- join := tables.getJoinSQL()
-
- cols := fmt.Sprintf("T0.%s%s%s", Q, mi.fields.pk.column, Q)
- query := fmt.Sprintf("SELECT %s FROM %s%s%s T0 %s%s", cols, Q, mi.table, Q, join, where)
-
- d.ins.ReplaceMarks(&query)
-
- var rs *sql.Rows
- r, err := q.Query(query, args...)
- if err != nil {
- return 0, err
- }
- rs = r
- defer rs.Close()
-
- var ref interface{}
- args = make([]interface{}, 0)
- cnt := 0
- for rs.Next() {
- if err := rs.Scan(&ref); err != nil {
- return 0, err
- }
- pkValue, err := d.convertValueFromDB(mi.fields.pk, reflect.ValueOf(ref).Interface(), tz)
- if err != nil {
- return 0, err
- }
- args = append(args, pkValue)
- cnt++
- }
-
- if cnt == 0 {
- return 0, nil
- }
-
- marks := make([]string, len(args))
- for i := range marks {
- marks[i] = "?"
- }
- sqlIn := fmt.Sprintf("IN (%s)", strings.Join(marks, ", "))
- query = fmt.Sprintf("DELETE FROM %s%s%s WHERE %s%s%s %s", Q, mi.table, Q, Q, mi.fields.pk.column, Q, sqlIn)
-
- d.ins.ReplaceMarks(&query)
- var res sql.Result
- if qs != nil && qs.forContext {
- res, err = q.ExecContext(qs.ctx, query, args...)
- } else {
- res, err = q.Exec(query, args...)
- }
- if err == nil {
- num, err := res.RowsAffected()
- if err != nil {
- return 0, err
- }
- if num > 0 {
- err := d.deleteRels(q, mi, args, tz)
- if err != nil {
- return num, err
- }
- }
- return num, nil
- }
- return 0, err
-}
-
-// read related records.
-func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, container interface{}, tz *time.Location, cols []string) (int64, error) {
-
- val := reflect.ValueOf(container)
- ind := reflect.Indirect(val)
-
- errTyp := true
- one := true
- isPtr := true
-
- if val.Kind() == reflect.Ptr {
- fn := ""
- if ind.Kind() == reflect.Slice {
- one = false
- typ := ind.Type().Elem()
- switch typ.Kind() {
- case reflect.Ptr:
- fn = getFullName(typ.Elem())
- case reflect.Struct:
- isPtr = false
- fn = getFullName(typ)
- }
- } else {
- fn = getFullName(ind.Type())
- }
- errTyp = fn != mi.fullName
- }
-
- if errTyp {
- if one {
- panic(fmt.Errorf("wrong object type `%s` for rows scan, need *%s", val.Type(), mi.fullName))
- } else {
- panic(fmt.Errorf("wrong object type `%s` for rows scan, need *[]*%s or *[]%s", val.Type(), mi.fullName, mi.fullName))
- }
- }
-
- rlimit := qs.limit
- offset := qs.offset
-
- Q := d.ins.TableQuote()
-
- var tCols []string
- if len(cols) > 0 {
- hasRel := len(qs.related) > 0 || qs.relDepth > 0
- tCols = make([]string, 0, len(cols))
- var maps map[string]bool
- if hasRel {
- maps = make(map[string]bool)
- }
- for _, col := range cols {
- if fi, ok := mi.fields.GetByAny(col); ok {
- tCols = append(tCols, fi.column)
- if hasRel {
- maps[fi.column] = true
- }
- } else {
- return 0, fmt.Errorf("wrong field/column name `%s`", col)
- }
- }
- if hasRel {
- for _, fi := range mi.fields.fieldsDB {
- if fi.fieldType&IsRelField > 0 {
- if !maps[fi.column] {
- tCols = append(tCols, fi.column)
- }
- }
- }
- }
- } else {
- tCols = mi.fields.dbcols
- }
-
- colsNum := len(tCols)
- sep := fmt.Sprintf("%s, T0.%s", Q, Q)
- sels := fmt.Sprintf("T0.%s%s%s", Q, strings.Join(tCols, sep), Q)
-
- tables := newDbTables(mi, d.ins)
- tables.parseRelated(qs.related, qs.relDepth)
-
- where, args := tables.getCondSQL(cond, false, tz)
- groupBy := tables.getGroupSQL(qs.groups)
- orderBy := tables.getOrderSQL(qs.orders)
- limit := tables.getLimitSQL(mi, offset, rlimit)
- join := tables.getJoinSQL()
-
- for _, tbl := range tables.tables {
- if tbl.sel {
- colsNum += len(tbl.mi.fields.dbcols)
- sep := fmt.Sprintf("%s, %s.%s", Q, tbl.index, Q)
- sels += fmt.Sprintf(", %s.%s%s%s", tbl.index, Q, strings.Join(tbl.mi.fields.dbcols, sep), Q)
- }
- }
-
- sqlSelect := "SELECT"
- if qs.distinct {
- sqlSelect += " DISTINCT"
- }
- query := fmt.Sprintf("%s %s FROM %s%s%s T0 %s%s%s%s%s", sqlSelect, sels, Q, mi.table, Q, join, where, groupBy, orderBy, limit)
-
- if qs.forupdate {
- query += " FOR UPDATE"
- }
-
- d.ins.ReplaceMarks(&query)
-
- var rs *sql.Rows
- var err error
- if qs != nil && qs.forContext {
- rs, err = q.QueryContext(qs.ctx, query, args...)
- if err != nil {
- return 0, err
- }
- } else {
- rs, err = q.Query(query, args...)
- if err != nil {
- return 0, err
- }
- }
-
- refs := make([]interface{}, colsNum)
- for i := range refs {
- var ref interface{}
- refs[i] = &ref
- }
-
- defer rs.Close()
-
- slice := ind
-
- var cnt int64
- for rs.Next() {
- if one && cnt == 0 || !one {
- if err := rs.Scan(refs...); err != nil {
- return 0, err
- }
-
- elm := reflect.New(mi.addrField.Elem().Type())
- mind := reflect.Indirect(elm)
-
- cacheV := make(map[string]*reflect.Value)
- cacheM := make(map[string]*modelInfo)
- trefs := refs
-
- d.setColsValues(mi, &mind, tCols, refs[:len(tCols)], tz)
- trefs = refs[len(tCols):]
-
- for _, tbl := range tables.tables {
- // loop selected tables
- if tbl.sel {
- last := mind
- names := ""
- mmi := mi
- // loop cascade models
- for _, name := range tbl.names {
- names += name
- if val, ok := cacheV[names]; ok {
- last = *val
- mmi = cacheM[names]
- } else {
- fi := mmi.fields.GetByName(name)
- lastm := mmi
- mmi = fi.relModelInfo
- field := last
- if last.Kind() != reflect.Invalid {
- field = reflect.Indirect(last.FieldByIndex(fi.fieldIndex))
- if field.IsValid() {
- d.setColsValues(mmi, &field, mmi.fields.dbcols, trefs[:len(mmi.fields.dbcols)], tz)
- for _, fi := range mmi.fields.fieldsReverse {
- if fi.inModel && fi.reverseFieldInfo.mi == lastm {
- if fi.reverseFieldInfo != nil {
- f := field.FieldByIndex(fi.fieldIndex)
- if f.Kind() == reflect.Ptr {
- f.Set(last.Addr())
- }
- }
- }
- }
- last = field
- }
- }
- cacheV[names] = &field
- cacheM[names] = mmi
- }
- }
- trefs = trefs[len(mmi.fields.dbcols):]
- }
- }
-
- if one {
- ind.Set(mind)
- } else {
- if cnt == 0 {
- // you can use a empty & caped container list
- // orm will not replace it
- if ind.Len() != 0 {
- // if container is not empty
- // create a new one
- slice = reflect.New(ind.Type()).Elem()
- }
- }
-
- if isPtr {
- slice = reflect.Append(slice, mind.Addr())
- } else {
- slice = reflect.Append(slice, mind)
- }
- }
- }
- cnt++
- }
-
- if !one {
- if cnt > 0 {
- ind.Set(slice)
- } else {
- // when a result is empty and container is nil
- // to set a empty container
- if ind.IsNil() {
- ind.Set(reflect.MakeSlice(ind.Type(), 0, 0))
- }
- }
- }
-
- return cnt, nil
-}
-
-// excute count sql and return count result int64.
-func (d *dbBase) Count(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, tz *time.Location) (cnt int64, err error) {
- tables := newDbTables(mi, d.ins)
- tables.parseRelated(qs.related, qs.relDepth)
-
- where, args := tables.getCondSQL(cond, false, tz)
- groupBy := tables.getGroupSQL(qs.groups)
- tables.getOrderSQL(qs.orders)
- join := tables.getJoinSQL()
-
- Q := d.ins.TableQuote()
-
- query := fmt.Sprintf("SELECT COUNT(*) FROM %s%s%s T0 %s%s%s", Q, mi.table, Q, join, where, groupBy)
-
- if groupBy != "" {
- query = fmt.Sprintf("SELECT COUNT(*) FROM (%s) AS T", query)
- }
-
- d.ins.ReplaceMarks(&query)
-
- var row *sql.Row
- if qs != nil && qs.forContext {
- row = q.QueryRowContext(qs.ctx, query, args...)
- } else {
- row = q.QueryRow(query, args...)
- }
- err = row.Scan(&cnt)
- return
-}
-
-// generate sql with replacing operator string placeholders and replaced values.
-func (d *dbBase) GenerateOperatorSQL(mi *modelInfo, fi *fieldInfo, operator string, args []interface{}, tz *time.Location) (string, []interface{}) {
- var sql string
- params := getFlatParams(fi, args, tz)
-
- if len(params) == 0 {
- panic(fmt.Errorf("operator `%s` need at least one args", operator))
- }
- arg := params[0]
-
- switch operator {
- case "in":
- marks := make([]string, len(params))
- for i := range marks {
- marks[i] = "?"
- }
- sql = fmt.Sprintf("IN (%s)", strings.Join(marks, ", "))
- case "between":
- if len(params) != 2 {
- panic(fmt.Errorf("operator `%s` need 2 args not %d", operator, len(params)))
- }
- sql = "BETWEEN ? AND ?"
- default:
- if len(params) > 1 {
- panic(fmt.Errorf("operator `%s` need 1 args not %d", operator, len(params)))
- }
- sql = d.ins.OperatorSQL(operator)
- switch operator {
- case "exact":
- if arg == nil {
- params[0] = "IS NULL"
- }
- case "iexact", "contains", "icontains", "startswith", "endswith", "istartswith", "iendswith":
- param := strings.Replace(ToStr(arg), `%`, `\%`, -1)
- switch operator {
- case "iexact":
- case "contains", "icontains":
- param = fmt.Sprintf("%%%s%%", param)
- case "startswith", "istartswith":
- param = fmt.Sprintf("%s%%", param)
- case "endswith", "iendswith":
- param = fmt.Sprintf("%%%s", param)
- }
- params[0] = param
- case "isnull":
- if b, ok := arg.(bool); ok {
- if b {
- sql = "IS NULL"
- } else {
- sql = "IS NOT NULL"
- }
- params = nil
- } else {
- panic(fmt.Errorf("operator `%s` need a bool value not `%T`", operator, arg))
- }
- }
- }
- return sql, params
-}
-
-// gernerate sql string with inner function, such as UPPER(text).
-func (d *dbBase) GenerateOperatorLeftCol(*fieldInfo, string, *string) {
- // default not use
-}
-
-// set values to struct column.
-func (d *dbBase) setColsValues(mi *modelInfo, ind *reflect.Value, cols []string, values []interface{}, tz *time.Location) {
- for i, column := range cols {
- val := reflect.Indirect(reflect.ValueOf(values[i])).Interface()
-
- fi := mi.fields.GetByColumn(column)
-
- field := ind.FieldByIndex(fi.fieldIndex)
-
- value, err := d.convertValueFromDB(fi, val, tz)
- if err != nil {
- panic(fmt.Errorf("Raw value: `%v` %s", val, err.Error()))
- }
-
- _, err = d.setFieldValue(fi, value, field)
-
- if err != nil {
- panic(fmt.Errorf("Raw value: `%v` %s", val, err.Error()))
- }
- }
-}
-
-// convert value from database result to value following in field type.
-func (d *dbBase) convertValueFromDB(fi *fieldInfo, val interface{}, tz *time.Location) (interface{}, error) {
- if val == nil {
- return nil, nil
- }
-
- var value interface{}
- var tErr error
-
- var str *StrTo
- switch v := val.(type) {
- case []byte:
- s := StrTo(string(v))
- str = &s
- case string:
- s := StrTo(v)
- str = &s
- }
-
- fieldType := fi.fieldType
-
-setValue:
- switch {
- case fieldType == TypeBooleanField:
- if str == nil {
- switch v := val.(type) {
- case int64:
- b := v == 1
- value = b
- default:
- s := StrTo(ToStr(v))
- str = &s
- }
- }
- if str != nil {
- b, err := str.Bool()
- if err != nil {
- tErr = err
- goto end
- }
- value = b
- }
- case fieldType == TypeVarCharField || fieldType == TypeCharField || fieldType == TypeTextField || fieldType == TypeJSONField || fieldType == TypeJsonbField:
- if str == nil {
- value = ToStr(val)
- } else {
- value = str.String()
- }
- case fieldType == TypeTimeField || fieldType == TypeDateField || fieldType == TypeDateTimeField:
- if str == nil {
- switch t := val.(type) {
- case time.Time:
- d.ins.TimeFromDB(&t, tz)
- value = t
- default:
- s := StrTo(ToStr(t))
- str = &s
- }
- }
- if str != nil {
- s := str.String()
- var (
- t time.Time
- err error
- )
- if len(s) >= 19 {
- s = s[:19]
- t, err = time.ParseInLocation(formatDateTime, s, tz)
- } else if len(s) >= 10 {
- if len(s) > 10 {
- s = s[:10]
- }
- t, err = time.ParseInLocation(formatDate, s, tz)
- } else if len(s) >= 8 {
- if len(s) > 8 {
- s = s[:8]
- }
- t, err = time.ParseInLocation(formatTime, s, tz)
- }
- t = t.In(DefaultTimeLoc)
-
- if err != nil && s != "00:00:00" && s != "0000-00-00" && s != "0000-00-00 00:00:00" {
- tErr = err
- goto end
- }
- value = t
- }
- case fieldType&IsIntegerField > 0:
- if str == nil {
- s := StrTo(ToStr(val))
- str = &s
- }
- if str != nil {
- var err error
- switch fieldType {
- case TypeBitField:
- _, err = str.Int8()
- case TypeSmallIntegerField:
- _, err = str.Int16()
- case TypeIntegerField:
- _, err = str.Int32()
- case TypeBigIntegerField:
- _, err = str.Int64()
- case TypePositiveBitField:
- _, err = str.Uint8()
- case TypePositiveSmallIntegerField:
- _, err = str.Uint16()
- case TypePositiveIntegerField:
- _, err = str.Uint32()
- case TypePositiveBigIntegerField:
- _, err = str.Uint64()
- }
- if err != nil {
- tErr = err
- goto end
- }
- if fieldType&IsPositiveIntegerField > 0 {
- v, _ := str.Uint64()
- value = v
- } else {
- v, _ := str.Int64()
- value = v
- }
- }
- case fieldType == TypeFloatField || fieldType == TypeDecimalField:
- if str == nil {
- switch v := val.(type) {
- case float64:
- value = v
- default:
- s := StrTo(ToStr(v))
- str = &s
- }
- }
- if str != nil {
- v, err := str.Float64()
- if err != nil {
- tErr = err
- goto end
- }
- value = v
- }
- case fieldType&IsRelField > 0:
- fi = fi.relModelInfo.fields.pk
- fieldType = fi.fieldType
- goto setValue
- }
-
-end:
- if tErr != nil {
- err := fmt.Errorf("convert to `%s` failed, field: %s err: %s", fi.addrValue.Type(), fi.fullName, tErr)
- return nil, err
- }
-
- return value, nil
-
-}
-
-// set one value to struct column field.
-func (d *dbBase) setFieldValue(fi *fieldInfo, value interface{}, field reflect.Value) (interface{}, error) {
-
- fieldType := fi.fieldType
- isNative := !fi.isFielder
-
-setValue:
- switch {
- case fieldType == TypeBooleanField:
- if isNative {
- if nb, ok := field.Interface().(sql.NullBool); ok {
- if value == nil {
- nb.Valid = false
- } else {
- nb.Bool = value.(bool)
- nb.Valid = true
- }
- field.Set(reflect.ValueOf(nb))
- } else if field.Kind() == reflect.Ptr {
- if value != nil {
- v := value.(bool)
- field.Set(reflect.ValueOf(&v))
- }
- } else {
- if value == nil {
- value = false
- }
- field.SetBool(value.(bool))
- }
- }
- case fieldType == TypeVarCharField || fieldType == TypeCharField || fieldType == TypeTextField || fieldType == TypeJSONField || fieldType == TypeJsonbField:
- if isNative {
- if ns, ok := field.Interface().(sql.NullString); ok {
- if value == nil {
- ns.Valid = false
- } else {
- ns.String = value.(string)
- ns.Valid = true
- }
- field.Set(reflect.ValueOf(ns))
- } else if field.Kind() == reflect.Ptr {
- if value != nil {
- v := value.(string)
- field.Set(reflect.ValueOf(&v))
- }
- } else {
- if value == nil {
- value = ""
- }
- field.SetString(value.(string))
- }
- }
- case fieldType == TypeTimeField || fieldType == TypeDateField || fieldType == TypeDateTimeField:
- if isNative {
- if value == nil {
- value = time.Time{}
- } else if field.Kind() == reflect.Ptr {
- if value != nil {
- v := value.(time.Time)
- field.Set(reflect.ValueOf(&v))
- }
- } else {
- field.Set(reflect.ValueOf(value))
- }
- }
- case fieldType == TypePositiveBitField && field.Kind() == reflect.Ptr:
- if value != nil {
- v := uint8(value.(uint64))
- field.Set(reflect.ValueOf(&v))
- }
- case fieldType == TypePositiveSmallIntegerField && field.Kind() == reflect.Ptr:
- if value != nil {
- v := uint16(value.(uint64))
- field.Set(reflect.ValueOf(&v))
- }
- case fieldType == TypePositiveIntegerField && field.Kind() == reflect.Ptr:
- if value != nil {
- if field.Type() == reflect.TypeOf(new(uint)) {
- v := uint(value.(uint64))
- field.Set(reflect.ValueOf(&v))
- } else {
- v := uint32(value.(uint64))
- field.Set(reflect.ValueOf(&v))
- }
- }
- case fieldType == TypePositiveBigIntegerField && field.Kind() == reflect.Ptr:
- if value != nil {
- v := value.(uint64)
- field.Set(reflect.ValueOf(&v))
- }
- case fieldType == TypeBitField && field.Kind() == reflect.Ptr:
- if value != nil {
- v := int8(value.(int64))
- field.Set(reflect.ValueOf(&v))
- }
- case fieldType == TypeSmallIntegerField && field.Kind() == reflect.Ptr:
- if value != nil {
- v := int16(value.(int64))
- field.Set(reflect.ValueOf(&v))
- }
- case fieldType == TypeIntegerField && field.Kind() == reflect.Ptr:
- if value != nil {
- if field.Type() == reflect.TypeOf(new(int)) {
- v := int(value.(int64))
- field.Set(reflect.ValueOf(&v))
- } else {
- v := int32(value.(int64))
- field.Set(reflect.ValueOf(&v))
- }
- }
- case fieldType == TypeBigIntegerField && field.Kind() == reflect.Ptr:
- if value != nil {
- v := value.(int64)
- field.Set(reflect.ValueOf(&v))
- }
- case fieldType&IsIntegerField > 0:
- if fieldType&IsPositiveIntegerField > 0 {
- if isNative {
- if value == nil {
- value = uint64(0)
- }
- field.SetUint(value.(uint64))
- }
- } else {
- if isNative {
- if ni, ok := field.Interface().(sql.NullInt64); ok {
- if value == nil {
- ni.Valid = false
- } else {
- ni.Int64 = value.(int64)
- ni.Valid = true
- }
- field.Set(reflect.ValueOf(ni))
- } else {
- if value == nil {
- value = int64(0)
- }
- field.SetInt(value.(int64))
- }
- }
- }
- case fieldType == TypeFloatField || fieldType == TypeDecimalField:
- if isNative {
- if nf, ok := field.Interface().(sql.NullFloat64); ok {
- if value == nil {
- nf.Valid = false
- } else {
- nf.Float64 = value.(float64)
- nf.Valid = true
- }
- field.Set(reflect.ValueOf(nf))
- } else if field.Kind() == reflect.Ptr {
- if value != nil {
- if field.Type() == reflect.TypeOf(new(float32)) {
- v := float32(value.(float64))
- field.Set(reflect.ValueOf(&v))
- } else {
- v := value.(float64)
- field.Set(reflect.ValueOf(&v))
- }
- }
- } else {
-
- if value == nil {
- value = float64(0)
- }
- field.SetFloat(value.(float64))
- }
- }
- case fieldType&IsRelField > 0:
- if value != nil {
- fieldType = fi.relModelInfo.fields.pk.fieldType
- mf := reflect.New(fi.relModelInfo.addrField.Elem().Type())
- field.Set(mf)
- f := mf.Elem().FieldByIndex(fi.relModelInfo.fields.pk.fieldIndex)
- field = f
- goto setValue
- }
- }
-
- if !isNative {
- fd := field.Addr().Interface().(Fielder)
- err := fd.SetRaw(value)
- if err != nil {
- err = fmt.Errorf("converted value `%v` set to Fielder `%s` failed, err: %s", value, fi.fullName, err)
- return nil, err
- }
- }
-
- return value, nil
-}
-
-// query sql, read values , save to *[]ParamList.
-func (d *dbBase) ReadValues(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, exprs []string, container interface{}, tz *time.Location) (int64, error) {
-
- var (
- maps []Params
- lists []ParamsList
- list ParamsList
- )
-
- typ := 0
- switch v := container.(type) {
- case *[]Params:
- d := *v
- if len(d) == 0 {
- maps = d
- }
- typ = 1
- case *[]ParamsList:
- d := *v
- if len(d) == 0 {
- lists = d
- }
- typ = 2
- case *ParamsList:
- d := *v
- if len(d) == 0 {
- list = d
- }
- typ = 3
- default:
- panic(fmt.Errorf("unsupport read values type `%T`", container))
- }
-
- tables := newDbTables(mi, d.ins)
-
- var (
- cols []string
- infos []*fieldInfo
- )
-
- hasExprs := len(exprs) > 0
-
- Q := d.ins.TableQuote()
-
- if hasExprs {
- cols = make([]string, 0, len(exprs))
- infos = make([]*fieldInfo, 0, len(exprs))
- for _, ex := range exprs {
- index, name, fi, suc := tables.parseExprs(mi, strings.Split(ex, ExprSep))
- if !suc {
- panic(fmt.Errorf("unknown field/column name `%s`", ex))
- }
- cols = append(cols, fmt.Sprintf("%s.%s%s%s %s%s%s", index, Q, fi.column, Q, Q, name, Q))
- infos = append(infos, fi)
- }
- } else {
- cols = make([]string, 0, len(mi.fields.dbcols))
- infos = make([]*fieldInfo, 0, len(exprs))
- for _, fi := range mi.fields.fieldsDB {
- cols = append(cols, fmt.Sprintf("T0.%s%s%s %s%s%s", Q, fi.column, Q, Q, fi.name, Q))
- infos = append(infos, fi)
- }
- }
-
- where, args := tables.getCondSQL(cond, false, tz)
- groupBy := tables.getGroupSQL(qs.groups)
- orderBy := tables.getOrderSQL(qs.orders)
- limit := tables.getLimitSQL(mi, qs.offset, qs.limit)
- join := tables.getJoinSQL()
-
- sels := strings.Join(cols, ", ")
-
- sqlSelect := "SELECT"
- if qs.distinct {
- sqlSelect += " DISTINCT"
- }
- query := fmt.Sprintf("%s %s FROM %s%s%s T0 %s%s%s%s%s", sqlSelect, sels, Q, mi.table, Q, join, where, groupBy, orderBy, limit)
-
- d.ins.ReplaceMarks(&query)
-
- rs, err := q.Query(query, args...)
- if err != nil {
- return 0, err
- }
- refs := make([]interface{}, len(cols))
- for i := range refs {
- var ref interface{}
- refs[i] = &ref
- }
-
- defer rs.Close()
-
- var (
- cnt int64
- columns []string
- )
- for rs.Next() {
- if cnt == 0 {
- cols, err := rs.Columns()
- if err != nil {
- return 0, err
- }
- columns = cols
- }
-
- if err := rs.Scan(refs...); err != nil {
- return 0, err
- }
-
- switch typ {
- case 1:
- params := make(Params, len(cols))
- for i, ref := range refs {
- fi := infos[i]
-
- val := reflect.Indirect(reflect.ValueOf(ref)).Interface()
-
- value, err := d.convertValueFromDB(fi, val, tz)
- if err != nil {
- panic(fmt.Errorf("db value convert failed `%v` %s", val, err.Error()))
- }
-
- params[columns[i]] = value
- }
- maps = append(maps, params)
- case 2:
- params := make(ParamsList, 0, len(cols))
- for i, ref := range refs {
- fi := infos[i]
-
- val := reflect.Indirect(reflect.ValueOf(ref)).Interface()
-
- value, err := d.convertValueFromDB(fi, val, tz)
- if err != nil {
- panic(fmt.Errorf("db value convert failed `%v` %s", val, err.Error()))
- }
-
- params = append(params, value)
- }
- lists = append(lists, params)
- case 3:
- for i, ref := range refs {
- fi := infos[i]
-
- val := reflect.Indirect(reflect.ValueOf(ref)).Interface()
-
- value, err := d.convertValueFromDB(fi, val, tz)
- if err != nil {
- panic(fmt.Errorf("db value convert failed `%v` %s", val, err.Error()))
- }
-
- list = append(list, value)
- }
- }
-
- cnt++
- }
-
- switch v := container.(type) {
- case *[]Params:
- *v = maps
- case *[]ParamsList:
- *v = lists
- case *ParamsList:
- *v = list
- }
-
- return cnt, nil
-}
-
-func (d *dbBase) RowsTo(dbQuerier, *querySet, *modelInfo, *Condition, interface{}, string, string, *time.Location) (int64, error) {
- return 0, nil
-}
-
-// flag of update joined record.
-func (d *dbBase) SupportUpdateJoin() bool {
- return true
-}
-
-func (d *dbBase) MaxLimit() uint64 {
- return 18446744073709551615
-}
-
-// return quote.
-func (d *dbBase) TableQuote() string {
- return "`"
-}
-
-// replace value placeholder in parametered sql string.
-func (d *dbBase) ReplaceMarks(query *string) {
- // default use `?` as mark, do nothing
-}
-
-// flag of RETURNING sql.
-func (d *dbBase) HasReturningID(*modelInfo, *string) bool {
- return false
-}
-
-// sync auto key
-func (d *dbBase) setval(db dbQuerier, mi *modelInfo, autoFields []string) error {
- return nil
-}
-
-// convert time from db.
-func (d *dbBase) TimeFromDB(t *time.Time, tz *time.Location) {
- *t = t.In(tz)
-}
-
-// convert time to db.
-func (d *dbBase) TimeToDB(t *time.Time, tz *time.Location) {
- *t = t.In(tz)
-}
-
-// get database types.
-func (d *dbBase) DbTypes() map[string]string {
- return nil
-}
-
-// gt all tables.
-func (d *dbBase) GetTables(db dbQuerier) (map[string]bool, error) {
- tables := make(map[string]bool)
- query := d.ins.ShowTablesQuery()
- rows, err := db.Query(query)
- if err != nil {
- return tables, err
- }
-
- defer rows.Close()
-
- for rows.Next() {
- var table string
- err := rows.Scan(&table)
- if err != nil {
- return tables, err
- }
- if table != "" {
- tables[table] = true
- }
- }
-
- return tables, nil
-}
-
-// get all cloumns in table.
-func (d *dbBase) GetColumns(db dbQuerier, table string) (map[string][3]string, error) {
- columns := make(map[string][3]string)
- query := d.ins.ShowColumnsQuery(table)
- rows, err := db.Query(query)
- if err != nil {
- return columns, err
- }
-
- defer rows.Close()
-
- for rows.Next() {
- var (
- name string
- typ string
- null string
- )
- err := rows.Scan(&name, &typ, &null)
- if err != nil {
- return columns, err
- }
- columns[name] = [3]string{name, typ, null}
- }
-
- return columns, nil
-}
-
-// not implement.
-func (d *dbBase) OperatorSQL(operator string) string {
- panic(ErrNotImplement)
-}
-
-// not implement.
-func (d *dbBase) ShowTablesQuery() string {
- panic(ErrNotImplement)
-}
-
-// not implement.
-func (d *dbBase) ShowColumnsQuery(table string) string {
- panic(ErrNotImplement)
-}
-
-// not implement.
-func (d *dbBase) IndexExists(dbQuerier, string, string) bool {
- panic(ErrNotImplement)
-}
diff --git a/orm/db_alias.go b/orm/db_alias.go
deleted file mode 100644
index d3dbc595..00000000
--- a/orm/db_alias.go
+++ /dev/null
@@ -1,487 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "context"
- "database/sql"
- "fmt"
- lru "github.com/hashicorp/golang-lru"
- "reflect"
- "sync"
- "time"
-)
-
-// DriverType database driver constant int.
-type DriverType int
-
-// Enum the Database driver
-const (
- _ DriverType = iota // int enum type
- DRMySQL // mysql
- DRSqlite // sqlite
- DROracle // oracle
- DRPostgres // pgsql
- DRTiDB // TiDB
-)
-
-// database driver string.
-type driver string
-
-// get type constant int of current driver..
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d driver) Type() DriverType {
- a, _ := dataBaseCache.get(string(d))
- return a.Driver
-}
-
-// get name of current driver
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d driver) Name() string {
- return string(d)
-}
-
-// check driver iis implemented Driver interface or not.
-var _ Driver = new(driver)
-
-var (
- dataBaseCache = &_dbCache{cache: make(map[string]*alias)}
- drivers = map[string]DriverType{
- "mysql": DRMySQL,
- "postgres": DRPostgres,
- "sqlite3": DRSqlite,
- "tidb": DRTiDB,
- "oracle": DROracle,
- "oci8": DROracle, // github.com/mattn/go-oci8
- "ora": DROracle, //https://github.com/rana/ora
- }
- dbBasers = map[DriverType]dbBaser{
- DRMySQL: newdbBaseMysql(),
- DRSqlite: newdbBaseSqlite(),
- DROracle: newdbBaseOracle(),
- DRPostgres: newdbBasePostgres(),
- DRTiDB: newdbBaseTidb(),
- }
-)
-
-// database alias cacher.
-type _dbCache struct {
- mux sync.RWMutex
- cache map[string]*alias
-}
-
-// add database alias with original name.
-func (ac *_dbCache) add(name string, al *alias) (added bool) {
- ac.mux.Lock()
- defer ac.mux.Unlock()
- if _, ok := ac.cache[name]; !ok {
- ac.cache[name] = al
- added = true
- }
- return
-}
-
-// get database alias if cached.
-func (ac *_dbCache) get(name string) (al *alias, ok bool) {
- ac.mux.RLock()
- defer ac.mux.RUnlock()
- al, ok = ac.cache[name]
- return
-}
-
-// get default alias.
-func (ac *_dbCache) getDefault() (al *alias) {
- al, _ = ac.get("default")
- return
-}
-
-type DB struct {
- *sync.RWMutex
- DB *sql.DB
- stmtDecorators *lru.Cache
-}
-
-// Begin start a transaction
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) Begin() (*sql.Tx, error) {
- return d.DB.Begin()
-}
-
-// BeginTx start a transaction with context and those options
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
- return d.DB.BeginTx(ctx, opts)
-}
-
-// su must call release to release *sql.Stmt after using
-func (d *DB) getStmtDecorator(query string) (*stmtDecorator, error) {
- d.RLock()
- c, ok := d.stmtDecorators.Get(query)
- if ok {
- c.(*stmtDecorator).acquire()
- d.RUnlock()
- return c.(*stmtDecorator), nil
- }
- d.RUnlock()
-
- d.Lock()
- c, ok = d.stmtDecorators.Get(query)
- if ok {
- c.(*stmtDecorator).acquire()
- d.Unlock()
- return c.(*stmtDecorator), nil
- }
-
- stmt, err := d.Prepare(query)
- if err != nil {
- d.Unlock()
- return nil, err
- }
- sd := newStmtDecorator(stmt)
- sd.acquire()
- d.stmtDecorators.Add(query, sd)
- d.Unlock()
-
- return sd, nil
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) Prepare(query string) (*sql.Stmt, error) {
- return d.DB.Prepare(query)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
- return d.DB.PrepareContext(ctx, query)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) Exec(query string, args ...interface{}) (sql.Result, error) {
- sd, err := d.getStmtDecorator(query)
- if err != nil {
- return nil, err
- }
- stmt := sd.getStmt()
- defer sd.release()
- return stmt.Exec(args...)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
- sd, err := d.getStmtDecorator(query)
- if err != nil {
- return nil, err
- }
- stmt := sd.getStmt()
- defer sd.release()
- return stmt.ExecContext(ctx, args...)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) Query(query string, args ...interface{}) (*sql.Rows, error) {
- sd, err := d.getStmtDecorator(query)
- if err != nil {
- return nil, err
- }
- stmt := sd.getStmt()
- defer sd.release()
- return stmt.Query(args...)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
- sd, err := d.getStmtDecorator(query)
- if err != nil {
- return nil, err
- }
- stmt := sd.getStmt()
- defer sd.release()
- return stmt.QueryContext(ctx, args...)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) QueryRow(query string, args ...interface{}) *sql.Row {
- sd, err := d.getStmtDecorator(query)
- if err != nil {
- panic(err)
- }
- stmt := sd.getStmt()
- defer sd.release()
- return stmt.QueryRow(args...)
-
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (d *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
- sd, err := d.getStmtDecorator(query)
- if err != nil {
- panic(err)
- }
- stmt := sd.getStmt()
- defer sd.release()
- return stmt.QueryRowContext(ctx, args)
-}
-
-type alias struct {
- Name string
- Driver DriverType
- DriverName string
- DataSource string
- MaxIdleConns int
- MaxOpenConns int
- DB *DB
- DbBaser dbBaser
- TZ *time.Location
- Engine string
-}
-
-func detectTZ(al *alias) {
- // orm timezone system match database
- // default use Local
- al.TZ = DefaultTimeLoc
-
- if al.DriverName == "sphinx" {
- return
- }
-
- switch al.Driver {
- case DRMySQL:
- row := al.DB.QueryRow("SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP)")
- var tz string
- row.Scan(&tz)
- if len(tz) >= 8 {
- if tz[0] != '-' {
- tz = "+" + tz
- }
- t, err := time.Parse("-07:00:00", tz)
- if err == nil {
- if t.Location().String() != "" {
- al.TZ = t.Location()
- }
- } else {
- DebugLog.Printf("Detect DB timezone: %s %s\n", tz, err.Error())
- }
- }
-
- // get default engine from current database
- row = al.DB.QueryRow("SELECT ENGINE, TRANSACTIONS FROM information_schema.engines WHERE SUPPORT = 'DEFAULT'")
- var engine string
- var tx bool
- row.Scan(&engine, &tx)
-
- if engine != "" {
- al.Engine = engine
- } else {
- al.Engine = "INNODB"
- }
-
- case DRSqlite, DROracle:
- al.TZ = time.UTC
-
- case DRPostgres:
- row := al.DB.QueryRow("SELECT current_setting('TIMEZONE')")
- var tz string
- row.Scan(&tz)
- loc, err := time.LoadLocation(tz)
- if err == nil {
- al.TZ = loc
- } else {
- DebugLog.Printf("Detect DB timezone: %s %s\n", tz, err.Error())
- }
- }
-}
-
-func addAliasWthDB(aliasName, driverName string, db *sql.DB) (*alias, error) {
- al := new(alias)
- al.Name = aliasName
- al.DriverName = driverName
- al.DB = &DB{
- RWMutex: new(sync.RWMutex),
- DB: db,
- stmtDecorators: newStmtDecoratorLruWithEvict(),
- }
-
- if dr, ok := drivers[driverName]; ok {
- al.DbBaser = dbBasers[dr]
- al.Driver = dr
- } else {
- return nil, fmt.Errorf("driver name `%s` have not registered", driverName)
- }
-
- err := db.Ping()
- if err != nil {
- return nil, fmt.Errorf("register db Ping `%s`, %s", aliasName, err.Error())
- }
-
- if !dataBaseCache.add(aliasName, al) {
- return nil, fmt.Errorf("DataBase alias name `%s` already registered, cannot reuse", aliasName)
- }
-
- return al, nil
-}
-
-// AddAliasWthDB add a aliasName for the drivename
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func AddAliasWthDB(aliasName, driverName string, db *sql.DB) error {
- _, err := addAliasWthDB(aliasName, driverName, db)
- return err
-}
-
-// RegisterDataBase Setting the database connect params. Use the database driver self dataSource args.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func RegisterDataBase(aliasName, driverName, dataSource string, params ...int) error {
- var (
- err error
- db *sql.DB
- al *alias
- )
-
- db, err = sql.Open(driverName, dataSource)
- if err != nil {
- err = fmt.Errorf("register db `%s`, %s", aliasName, err.Error())
- goto end
- }
-
- al, err = addAliasWthDB(aliasName, driverName, db)
- if err != nil {
- goto end
- }
-
- al.DataSource = dataSource
-
- detectTZ(al)
-
- for i, v := range params {
- switch i {
- case 0:
- SetMaxIdleConns(al.Name, v)
- case 1:
- SetMaxOpenConns(al.Name, v)
- }
- }
-
-end:
- if err != nil {
- if db != nil {
- db.Close()
- }
- DebugLog.Println(err.Error())
- }
-
- return err
-}
-
-// RegisterDriver Register a database driver use specify driver name, this can be definition the driver is which database type.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func RegisterDriver(driverName string, typ DriverType) error {
- if t, ok := drivers[driverName]; !ok {
- drivers[driverName] = typ
- } else {
- if t != typ {
- return fmt.Errorf("driverName `%s` db driver already registered and is other type", driverName)
- }
- }
- return nil
-}
-
-// SetDataBaseTZ Change the database default used timezone
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func SetDataBaseTZ(aliasName string, tz *time.Location) error {
- if al, ok := dataBaseCache.get(aliasName); ok {
- al.TZ = tz
- } else {
- return fmt.Errorf("DataBase alias name `%s` not registered", aliasName)
- }
- return nil
-}
-
-// SetMaxIdleConns Change the max idle conns for *sql.DB, use specify database alias name
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func SetMaxIdleConns(aliasName string, maxIdleConns int) {
- al := getDbAlias(aliasName)
- al.MaxIdleConns = maxIdleConns
- al.DB.DB.SetMaxIdleConns(maxIdleConns)
-}
-
-// SetMaxOpenConns Change the max open conns for *sql.DB, use specify database alias name
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func SetMaxOpenConns(aliasName string, maxOpenConns int) {
- al := getDbAlias(aliasName)
- al.MaxOpenConns = maxOpenConns
- al.DB.DB.SetMaxOpenConns(maxOpenConns)
- // for tip go 1.2
- if fun := reflect.ValueOf(al.DB).MethodByName("SetMaxOpenConns"); fun.IsValid() {
- fun.Call([]reflect.Value{reflect.ValueOf(maxOpenConns)})
- }
-}
-
-// GetDB Get *sql.DB from registered database by db alias name.
-// Use "default" as alias name if you not set.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func GetDB(aliasNames ...string) (*sql.DB, error) {
- var name string
- if len(aliasNames) > 0 {
- name = aliasNames[0]
- } else {
- name = "default"
- }
- al, ok := dataBaseCache.get(name)
- if ok {
- return al.DB.DB, nil
- }
- return nil, fmt.Errorf("DataBase of alias name `%s` not found", name)
-}
-
-type stmtDecorator struct {
- wg sync.WaitGroup
- stmt *sql.Stmt
-}
-
-func (s *stmtDecorator) getStmt() *sql.Stmt {
- return s.stmt
-}
-
-// acquire will add one
-// since this method will be used inside read lock scope,
-// so we can not do more things here
-// we should think about refactor this
-func (s *stmtDecorator) acquire() {
- s.wg.Add(1)
-}
-
-func (s *stmtDecorator) release() {
- s.wg.Done()
-}
-
-//garbage recycle for stmt
-func (s *stmtDecorator) destroy() {
- go func() {
- s.wg.Wait()
- _ = s.stmt.Close()
- }()
-}
-
-func newStmtDecorator(sqlStmt *sql.Stmt) *stmtDecorator {
- return &stmtDecorator{
- stmt: sqlStmt,
- }
-}
-
-func newStmtDecoratorLruWithEvict() *lru.Cache {
- cache, _ := lru.NewWithEvict(1000, func(key interface{}, value interface{}) {
- value.(*stmtDecorator).destroy()
- })
- return cache
-}
diff --git a/orm/db_mysql.go b/orm/db_mysql.go
deleted file mode 100644
index 36f6f566..00000000
--- a/orm/db_mysql.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "reflect"
- "strings"
-)
-
-// mysql operators.
-var mysqlOperators = map[string]string{
- "exact": "= ?",
- "iexact": "LIKE ?",
- "contains": "LIKE BINARY ?",
- "icontains": "LIKE ?",
- // "regex": "REGEXP BINARY ?",
- // "iregex": "REGEXP ?",
- "gt": "> ?",
- ">": "> ?",
- "gte": ">= ?",
- ">=": ">= ?",
- "lt": "< ?",
- "<": "< ?",
- "lte": "<= ?",
- "<=": "<= ?",
- "eq": "= ?",
- "=": "= ?",
- "ne": "!= ?",
- "!=": "!= ?",
- "startswith": "LIKE BINARY ?",
- "endswith": "LIKE BINARY ?",
- "istartswith": "LIKE ?",
- "iendswith": "LIKE ?",
-}
-
-// mysql column field types.
-var mysqlTypes = map[string]string{
- "auto": "AUTO_INCREMENT NOT NULL PRIMARY KEY",
- "pk": "NOT NULL PRIMARY KEY",
- "bool": "bool",
- "string": "varchar(%d)",
- "string-char": "char(%d)",
- "string-text": "longtext",
- "time.Time-date": "date",
- "time.Time": "datetime",
- "int8": "tinyint",
- "int16": "smallint",
- "int32": "integer",
- "int64": "bigint",
- "uint8": "tinyint unsigned",
- "uint16": "smallint unsigned",
- "uint32": "integer unsigned",
- "uint64": "bigint unsigned",
- "float64": "double precision",
- "float64-decimal": "numeric(%d, %d)",
-}
-
-// mysql dbBaser implementation.
-type dbBaseMysql struct {
- dbBase
-}
-
-var _ dbBaser = new(dbBaseMysql)
-
-// get mysql operator.
-func (d *dbBaseMysql) OperatorSQL(operator string) string {
- return mysqlOperators[operator]
-}
-
-// get mysql table field types.
-func (d *dbBaseMysql) DbTypes() map[string]string {
- return mysqlTypes
-}
-
-// show table sql for mysql.
-func (d *dbBaseMysql) ShowTablesQuery() string {
- return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema = DATABASE()"
-}
-
-// show columns sql of table for mysql.
-func (d *dbBaseMysql) ShowColumnsQuery(table string) string {
- return fmt.Sprintf("SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE FROM information_schema.columns "+
- "WHERE table_schema = DATABASE() AND table_name = '%s'", table)
-}
-
-// execute sql to check index exist.
-func (d *dbBaseMysql) IndexExists(db dbQuerier, table string, name string) bool {
- row := db.QueryRow("SELECT count(*) FROM information_schema.statistics "+
- "WHERE table_schema = DATABASE() AND table_name = ? AND index_name = ?", table, name)
- var cnt int
- row.Scan(&cnt)
- return cnt > 0
-}
-
-// InsertOrUpdate a row
-// If your primary key or unique column conflict will update
-// If no will insert
-// Add "`" for mysql sql building
-func (d *dbBaseMysql) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a *alias, args ...string) (int64, error) {
- var iouStr string
- argsMap := map[string]string{}
-
- iouStr = "ON DUPLICATE KEY UPDATE"
-
- //Get on the key-value pairs
- for _, v := range args {
- kv := strings.Split(v, "=")
- if len(kv) == 2 {
- argsMap[strings.ToLower(kv[0])] = kv[1]
- }
- }
-
- isMulti := false
- names := make([]string, 0, len(mi.fields.dbcols)-1)
- Q := d.ins.TableQuote()
- values, _, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, &names, a.TZ)
-
- if err != nil {
- return 0, err
- }
-
- marks := make([]string, len(names))
- updateValues := make([]interface{}, 0)
- updates := make([]string, len(names))
-
- for i, v := range names {
- marks[i] = "?"
- valueStr := argsMap[strings.ToLower(v)]
- if valueStr != "" {
- updates[i] = "`" + v + "`" + "=" + valueStr
- } else {
- updates[i] = "`" + v + "`" + "=?"
- updateValues = append(updateValues, values[i])
- }
- }
-
- values = append(values, updateValues...)
-
- sep := fmt.Sprintf("%s, %s", Q, Q)
- qmarks := strings.Join(marks, ", ")
- qupdates := strings.Join(updates, ", ")
- columns := strings.Join(names, sep)
-
- multi := len(values) / len(names)
-
- if isMulti {
- qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
- }
- //conflitValue maybe is a int,can`t use fmt.Sprintf
- query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s) %s "+qupdates, Q, mi.table, Q, Q, columns, Q, qmarks, iouStr)
-
- d.ins.ReplaceMarks(&query)
-
- if isMulti || !d.ins.HasReturningID(mi, &query) {
- res, err := q.Exec(query, values...)
- if err == nil {
- if isMulti {
- return res.RowsAffected()
- }
- return res.LastInsertId()
- }
- return 0, err
- }
-
- row := q.QueryRow(query, values...)
- var id int64
- err = row.Scan(&id)
- return id, err
-}
-
-// create new mysql dbBaser.
-func newdbBaseMysql() dbBaser {
- b := new(dbBaseMysql)
- b.ins = b
- return b
-}
diff --git a/orm/db_oracle.go b/orm/db_oracle.go
deleted file mode 100644
index ed2ec74c..00000000
--- a/orm/db_oracle.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strings"
-)
-
-// oracle operators.
-var oracleOperators = map[string]string{
- "exact": "= ?",
- "=": "= ?",
- "gt": "> ?",
- ">": "> ?",
- "gte": ">= ?",
- ">=": ">= ?",
- "lt": "< ?",
- "<": "< ?",
- "lte": "<= ?",
- "<=": "<= ?",
- "//iendswith": "LIKE ?",
-}
-
-// oracle column field types.
-var oracleTypes = map[string]string{
- "pk": "NOT NULL PRIMARY KEY",
- "bool": "bool",
- "string": "VARCHAR2(%d)",
- "string-char": "CHAR(%d)",
- "string-text": "VARCHAR2(%d)",
- "time.Time-date": "DATE",
- "time.Time": "TIMESTAMP",
- "int8": "INTEGER",
- "int16": "INTEGER",
- "int32": "INTEGER",
- "int64": "INTEGER",
- "uint8": "INTEGER",
- "uint16": "INTEGER",
- "uint32": "INTEGER",
- "uint64": "INTEGER",
- "float64": "NUMBER",
- "float64-decimal": "NUMBER(%d, %d)",
-}
-
-// oracle dbBaser
-type dbBaseOracle struct {
- dbBase
-}
-
-var _ dbBaser = new(dbBaseOracle)
-
-// create oracle dbBaser.
-func newdbBaseOracle() dbBaser {
- b := new(dbBaseOracle)
- b.ins = b
- return b
-}
-
-// OperatorSQL get oracle operator.
-func (d *dbBaseOracle) OperatorSQL(operator string) string {
- return oracleOperators[operator]
-}
-
-// DbTypes get oracle table field types.
-func (d *dbBaseOracle) DbTypes() map[string]string {
- return oracleTypes
-}
-
-//ShowTablesQuery show all the tables in database
-func (d *dbBaseOracle) ShowTablesQuery() string {
- return "SELECT TABLE_NAME FROM USER_TABLES"
-}
-
-// Oracle
-func (d *dbBaseOracle) ShowColumnsQuery(table string) string {
- return fmt.Sprintf("SELECT COLUMN_NAME FROM ALL_TAB_COLUMNS "+
- "WHERE TABLE_NAME ='%s'", strings.ToUpper(table))
-}
-
-// check index is exist
-func (d *dbBaseOracle) IndexExists(db dbQuerier, table string, name string) bool {
- row := db.QueryRow("SELECT COUNT(*) FROM USER_IND_COLUMNS, USER_INDEXES "+
- "WHERE USER_IND_COLUMNS.INDEX_NAME = USER_INDEXES.INDEX_NAME "+
- "AND USER_IND_COLUMNS.TABLE_NAME = ? AND USER_IND_COLUMNS.INDEX_NAME = ?", strings.ToUpper(table), strings.ToUpper(name))
-
- var cnt int
- row.Scan(&cnt)
- return cnt > 0
-}
-
-// execute insert sql with given struct and given values.
-// insert the given values, not the field values in struct.
-func (d *dbBaseOracle) InsertValue(q dbQuerier, mi *modelInfo, isMulti bool, names []string, values []interface{}) (int64, error) {
- Q := d.ins.TableQuote()
-
- marks := make([]string, len(names))
- for i := range marks {
- marks[i] = ":" + names[i]
- }
-
- sep := fmt.Sprintf("%s, %s", Q, Q)
- qmarks := strings.Join(marks, ", ")
- columns := strings.Join(names, sep)
-
- multi := len(values) / len(names)
-
- if isMulti {
- qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
- }
-
- query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks)
-
- d.ins.ReplaceMarks(&query)
-
- if isMulti || !d.ins.HasReturningID(mi, &query) {
- res, err := q.Exec(query, values...)
- if err == nil {
- if isMulti {
- return res.RowsAffected()
- }
- return res.LastInsertId()
- }
- return 0, err
- }
- row := q.QueryRow(query, values...)
- var id int64
- err := row.Scan(&id)
- return id, err
-}
diff --git a/orm/db_postgres.go b/orm/db_postgres.go
deleted file mode 100644
index 7eb88d7a..00000000
--- a/orm/db_postgres.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strconv"
-)
-
-// postgresql operators.
-var postgresOperators = map[string]string{
- "exact": "= ?",
- "iexact": "= UPPER(?)",
- "contains": "LIKE ?",
- "icontains": "LIKE UPPER(?)",
- "gt": "> ?",
- ">": "> ?",
- "gte": ">= ?",
- ">=": ">= ?",
- "lt": "< ?",
- "<": "< ?",
- "lte": "<= ?",
- "<=": "<= ?",
- "eq": "= ?",
- "=": "= ?",
- "ne": "!= ?",
- "!=": "!= ?",
- "startswith": "LIKE ?",
- "endswith": "LIKE ?",
- "istartswith": "LIKE UPPER(?)",
- "iendswith": "LIKE UPPER(?)",
-}
-
-// postgresql column field types.
-var postgresTypes = map[string]string{
- "auto": "serial NOT NULL PRIMARY KEY",
- "pk": "NOT NULL PRIMARY KEY",
- "bool": "bool",
- "string": "varchar(%d)",
- "string-char": "char(%d)",
- "string-text": "text",
- "time.Time-date": "date",
- "time.Time": "timestamp with time zone",
- "int8": `smallint CHECK("%COL%" >= -127 AND "%COL%" <= 128)`,
- "int16": "smallint",
- "int32": "integer",
- "int64": "bigint",
- "uint8": `smallint CHECK("%COL%" >= 0 AND "%COL%" <= 255)`,
- "uint16": `integer CHECK("%COL%" >= 0)`,
- "uint32": `bigint CHECK("%COL%" >= 0)`,
- "uint64": `bigint CHECK("%COL%" >= 0)`,
- "float64": "double precision",
- "float64-decimal": "numeric(%d, %d)",
- "json": "json",
- "jsonb": "jsonb",
-}
-
-// postgresql dbBaser.
-type dbBasePostgres struct {
- dbBase
-}
-
-var _ dbBaser = new(dbBasePostgres)
-
-// get postgresql operator.
-func (d *dbBasePostgres) OperatorSQL(operator string) string {
- return postgresOperators[operator]
-}
-
-// generate functioned sql string, such as contains(text).
-func (d *dbBasePostgres) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) {
- switch operator {
- case "contains", "startswith", "endswith":
- *leftCol = fmt.Sprintf("%s::text", *leftCol)
- case "iexact", "icontains", "istartswith", "iendswith":
- *leftCol = fmt.Sprintf("UPPER(%s::text)", *leftCol)
- }
-}
-
-// postgresql unsupports updating joined record.
-func (d *dbBasePostgres) SupportUpdateJoin() bool {
- return false
-}
-
-func (d *dbBasePostgres) MaxLimit() uint64 {
- return 0
-}
-
-// postgresql quote is ".
-func (d *dbBasePostgres) TableQuote() string {
- return `"`
-}
-
-// postgresql value placeholder is $n.
-// replace default ? to $n.
-func (d *dbBasePostgres) ReplaceMarks(query *string) {
- q := *query
- num := 0
- for _, c := range q {
- if c == '?' {
- num++
- }
- }
- if num == 0 {
- return
- }
- data := make([]byte, 0, len(q)+num)
- num = 1
- for i := 0; i < len(q); i++ {
- c := q[i]
- if c == '?' {
- data = append(data, '$')
- data = append(data, []byte(strconv.Itoa(num))...)
- num++
- } else {
- data = append(data, c)
- }
- }
- *query = string(data)
-}
-
-// make returning sql support for postgresql.
-func (d *dbBasePostgres) HasReturningID(mi *modelInfo, query *string) bool {
- fi := mi.fields.pk
- if fi.fieldType&IsPositiveIntegerField == 0 && fi.fieldType&IsIntegerField == 0 {
- return false
- }
-
- if query != nil {
- *query = fmt.Sprintf(`%s RETURNING "%s"`, *query, fi.column)
- }
- return true
-}
-
-// sync auto key
-func (d *dbBasePostgres) setval(db dbQuerier, mi *modelInfo, autoFields []string) error {
- if len(autoFields) == 0 {
- return nil
- }
-
- Q := d.ins.TableQuote()
- for _, name := range autoFields {
- query := fmt.Sprintf("SELECT setval(pg_get_serial_sequence('%s', '%s'), (SELECT MAX(%s%s%s) FROM %s%s%s));",
- mi.table, name,
- Q, name, Q,
- Q, mi.table, Q)
- if _, err := db.Exec(query); err != nil {
- return err
- }
- }
- return nil
-}
-
-// show table sql for postgresql.
-func (d *dbBasePostgres) ShowTablesQuery() string {
- return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema NOT IN ('pg_catalog', 'information_schema')"
-}
-
-// show table columns sql for postgresql.
-func (d *dbBasePostgres) ShowColumnsQuery(table string) string {
- return fmt.Sprintf("SELECT column_name, data_type, is_nullable FROM information_schema.columns where table_schema NOT IN ('pg_catalog', 'information_schema') and table_name = '%s'", table)
-}
-
-// get column types of postgresql.
-func (d *dbBasePostgres) DbTypes() map[string]string {
- return postgresTypes
-}
-
-// check index exist in postgresql.
-func (d *dbBasePostgres) IndexExists(db dbQuerier, table string, name string) bool {
- query := fmt.Sprintf("SELECT COUNT(*) FROM pg_indexes WHERE tablename = '%s' AND indexname = '%s'", table, name)
- row := db.QueryRow(query)
- var cnt int
- row.Scan(&cnt)
- return cnt > 0
-}
-
-// create new postgresql dbBaser.
-func newdbBasePostgres() dbBaser {
- b := new(dbBasePostgres)
- b.ins = b
- return b
-}
diff --git a/orm/db_sqlite.go b/orm/db_sqlite.go
deleted file mode 100644
index bd9f5d3b..00000000
--- a/orm/db_sqlite.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "database/sql"
- "fmt"
- "reflect"
- "time"
-)
-
-// sqlite operators.
-var sqliteOperators = map[string]string{
- "exact": "= ?",
- "iexact": "LIKE ? ESCAPE '\\'",
- "contains": "LIKE ? ESCAPE '\\'",
- "icontains": "LIKE ? ESCAPE '\\'",
- "gt": "> ?",
- ">": "> ?",
- "gte": ">= ?",
- ">=": ">= ?",
- "lt": "< ?",
- "<": "< ?",
- "lte": "<= ?",
- "<=": "<= ?",
- "eq": "= ?",
- "=": "= ?",
- "ne": "!= ?",
- "!=": "!= ?",
- "startswith": "LIKE ? ESCAPE '\\'",
- "endswith": "LIKE ? ESCAPE '\\'",
- "istartswith": "LIKE ? ESCAPE '\\'",
- "iendswith": "LIKE ? ESCAPE '\\'",
-}
-
-// sqlite column types.
-var sqliteTypes = map[string]string{
- "auto": "integer NOT NULL PRIMARY KEY AUTOINCREMENT",
- "pk": "NOT NULL PRIMARY KEY",
- "bool": "bool",
- "string": "varchar(%d)",
- "string-char": "character(%d)",
- "string-text": "text",
- "time.Time-date": "date",
- "time.Time": "datetime",
- "int8": "tinyint",
- "int16": "smallint",
- "int32": "integer",
- "int64": "bigint",
- "uint8": "tinyint unsigned",
- "uint16": "smallint unsigned",
- "uint32": "integer unsigned",
- "uint64": "bigint unsigned",
- "float64": "real",
- "float64-decimal": "decimal",
-}
-
-// sqlite dbBaser.
-type dbBaseSqlite struct {
- dbBase
-}
-
-var _ dbBaser = new(dbBaseSqlite)
-
-// override base db read for update behavior as SQlite does not support syntax
-func (d *dbBaseSqlite) Read(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string, isForUpdate bool) error {
- if isForUpdate {
- DebugLog.Println("[WARN] SQLite does not support SELECT FOR UPDATE query, isForUpdate param is ignored and always as false to do the work")
- }
- return d.dbBase.Read(q, mi, ind, tz, cols, false)
-}
-
-// get sqlite operator.
-func (d *dbBaseSqlite) OperatorSQL(operator string) string {
- return sqliteOperators[operator]
-}
-
-// generate functioned sql for sqlite.
-// only support DATE(text).
-func (d *dbBaseSqlite) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) {
- if fi.fieldType == TypeDateField {
- *leftCol = fmt.Sprintf("DATE(%s)", *leftCol)
- }
-}
-
-// unable updating joined record in sqlite.
-func (d *dbBaseSqlite) SupportUpdateJoin() bool {
- return false
-}
-
-// max int in sqlite.
-func (d *dbBaseSqlite) MaxLimit() uint64 {
- return 9223372036854775807
-}
-
-// get column types in sqlite.
-func (d *dbBaseSqlite) DbTypes() map[string]string {
- return sqliteTypes
-}
-
-// get show tables sql in sqlite.
-func (d *dbBaseSqlite) ShowTablesQuery() string {
- return "SELECT name FROM sqlite_master WHERE type = 'table'"
-}
-
-// get columns in sqlite.
-func (d *dbBaseSqlite) GetColumns(db dbQuerier, table string) (map[string][3]string, error) {
- query := d.ins.ShowColumnsQuery(table)
- rows, err := db.Query(query)
- if err != nil {
- return nil, err
- }
-
- columns := make(map[string][3]string)
- for rows.Next() {
- var tmp, name, typ, null sql.NullString
- err := rows.Scan(&tmp, &name, &typ, &null, &tmp, &tmp)
- if err != nil {
- return nil, err
- }
- columns[name.String] = [3]string{name.String, typ.String, null.String}
- }
-
- return columns, nil
-}
-
-// get show columns sql in sqlite.
-func (d *dbBaseSqlite) ShowColumnsQuery(table string) string {
- return fmt.Sprintf("pragma table_info('%s')", table)
-}
-
-// check index exist in sqlite.
-func (d *dbBaseSqlite) IndexExists(db dbQuerier, table string, name string) bool {
- query := fmt.Sprintf("PRAGMA index_list('%s')", table)
- rows, err := db.Query(query)
- if err != nil {
- panic(err)
- }
- defer rows.Close()
- for rows.Next() {
- var tmp, index sql.NullString
- rows.Scan(&tmp, &index, &tmp, &tmp, &tmp)
- if name == index.String {
- return true
- }
- }
- return false
-}
-
-// create new sqlite dbBaser.
-func newdbBaseSqlite() dbBaser {
- b := new(dbBaseSqlite)
- b.ins = b
- return b
-}
diff --git a/orm/db_tables.go b/orm/db_tables.go
deleted file mode 100644
index 4b21a6fc..00000000
--- a/orm/db_tables.go
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strings"
- "time"
-)
-
-// table info struct.
-type dbTable struct {
- id int
- index string
- name string
- names []string
- sel bool
- inner bool
- mi *modelInfo
- fi *fieldInfo
- jtl *dbTable
-}
-
-// tables collection struct, contains some tables.
-type dbTables struct {
- tablesM map[string]*dbTable
- tables []*dbTable
- mi *modelInfo
- base dbBaser
- skipEnd bool
-}
-
-// set table info to collection.
-// if not exist, create new.
-func (t *dbTables) set(names []string, mi *modelInfo, fi *fieldInfo, inner bool) *dbTable {
- name := strings.Join(names, ExprSep)
- if j, ok := t.tablesM[name]; ok {
- j.name = name
- j.mi = mi
- j.fi = fi
- j.inner = inner
- } else {
- i := len(t.tables) + 1
- jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil}
- t.tablesM[name] = jt
- t.tables = append(t.tables, jt)
- }
- return t.tablesM[name]
-}
-
-// add table info to collection.
-func (t *dbTables) add(names []string, mi *modelInfo, fi *fieldInfo, inner bool) (*dbTable, bool) {
- name := strings.Join(names, ExprSep)
- if _, ok := t.tablesM[name]; !ok {
- i := len(t.tables) + 1
- jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil}
- t.tablesM[name] = jt
- t.tables = append(t.tables, jt)
- return jt, true
- }
- return t.tablesM[name], false
-}
-
-// get table info in collection.
-func (t *dbTables) get(name string) (*dbTable, bool) {
- j, ok := t.tablesM[name]
- return j, ok
-}
-
-// get related fields info in recursive depth loop.
-// loop once, depth decreases one.
-func (t *dbTables) loopDepth(depth int, prefix string, fi *fieldInfo, related []string) []string {
- if depth < 0 || fi.fieldType == RelManyToMany {
- return related
- }
-
- if prefix == "" {
- prefix = fi.name
- } else {
- prefix = prefix + ExprSep + fi.name
- }
- related = append(related, prefix)
-
- depth--
- for _, fi := range fi.relModelInfo.fields.fieldsRel {
- related = t.loopDepth(depth, prefix, fi, related)
- }
-
- return related
-}
-
-// parse related fields.
-func (t *dbTables) parseRelated(rels []string, depth int) {
-
- relsNum := len(rels)
- related := make([]string, relsNum)
- copy(related, rels)
-
- relDepth := depth
-
- if relsNum != 0 {
- relDepth = 0
- }
-
- relDepth--
- for _, fi := range t.mi.fields.fieldsRel {
- related = t.loopDepth(relDepth, "", fi, related)
- }
-
- for i, s := range related {
- var (
- exs = strings.Split(s, ExprSep)
- names = make([]string, 0, len(exs))
- mmi = t.mi
- cancel = true
- jtl *dbTable
- )
-
- inner := true
-
- for _, ex := range exs {
- if fi, ok := mmi.fields.GetByAny(ex); ok && fi.rel && fi.fieldType != RelManyToMany {
- names = append(names, fi.name)
- mmi = fi.relModelInfo
-
- if fi.null || t.skipEnd {
- inner = false
- }
-
- jt := t.set(names, mmi, fi, inner)
- jt.jtl = jtl
-
- if fi.reverse {
- cancel = false
- }
-
- if cancel {
- jt.sel = depth > 0
-
- if i < relsNum {
- jt.sel = true
- }
- }
-
- jtl = jt
-
- } else {
- panic(fmt.Errorf("unknown model/table name `%s`", ex))
- }
- }
- }
-}
-
-// generate join string.
-func (t *dbTables) getJoinSQL() (join string) {
- Q := t.base.TableQuote()
-
- for _, jt := range t.tables {
- if jt.inner {
- join += "INNER JOIN "
- } else {
- join += "LEFT OUTER JOIN "
- }
- var (
- table string
- t1, t2 string
- c1, c2 string
- )
- t1 = "T0"
- if jt.jtl != nil {
- t1 = jt.jtl.index
- }
- t2 = jt.index
- table = jt.mi.table
-
- switch {
- case jt.fi.fieldType == RelManyToMany || jt.fi.fieldType == RelReverseMany || jt.fi.reverse && jt.fi.reverseFieldInfo.fieldType == RelManyToMany:
- c1 = jt.fi.mi.fields.pk.column
- for _, ffi := range jt.mi.fields.fieldsRel {
- if jt.fi.mi == ffi.relModelInfo {
- c2 = ffi.column
- break
- }
- }
- default:
- c1 = jt.fi.column
- c2 = jt.fi.relModelInfo.fields.pk.column
-
- if jt.fi.reverse {
- c1 = jt.mi.fields.pk.column
- c2 = jt.fi.reverseFieldInfo.column
- }
- }
-
- join += fmt.Sprintf("%s%s%s %s ON %s.%s%s%s = %s.%s%s%s ", Q, table, Q, t2,
- t2, Q, c2, Q, t1, Q, c1, Q)
- }
- return
-}
-
-// parse orm model struct field tag expression.
-func (t *dbTables) parseExprs(mi *modelInfo, exprs []string) (index, name string, info *fieldInfo, success bool) {
- var (
- jtl *dbTable
- fi *fieldInfo
- fiN *fieldInfo
- mmi = mi
- )
-
- num := len(exprs) - 1
- var names []string
-
- inner := true
-
-loopFor:
- for i, ex := range exprs {
-
- var ok, okN bool
-
- if fiN != nil {
- fi = fiN
- ok = true
- fiN = nil
- }
-
- if i == 0 {
- fi, ok = mmi.fields.GetByAny(ex)
- }
-
- _ = okN
-
- if ok {
-
- isRel := fi.rel || fi.reverse
-
- names = append(names, fi.name)
-
- switch {
- case fi.rel:
- mmi = fi.relModelInfo
- if fi.fieldType == RelManyToMany {
- mmi = fi.relThroughModelInfo
- }
- case fi.reverse:
- mmi = fi.reverseFieldInfo.mi
- }
-
- if i < num {
- fiN, okN = mmi.fields.GetByAny(exprs[i+1])
- }
-
- if isRel && (!fi.mi.isThrough || num != i) {
- if fi.null || t.skipEnd {
- inner = false
- }
-
- if t.skipEnd && okN || !t.skipEnd {
- if t.skipEnd && okN && fiN.pk {
- goto loopEnd
- }
-
- jt, _ := t.add(names, mmi, fi, inner)
- jt.jtl = jtl
- jtl = jt
- }
-
- }
-
- if num != i {
- continue
- }
-
- loopEnd:
-
- if i == 0 || jtl == nil {
- index = "T0"
- } else {
- index = jtl.index
- }
-
- info = fi
-
- if jtl == nil {
- name = fi.name
- } else {
- name = jtl.name + ExprSep + fi.name
- }
-
- switch {
- case fi.rel:
-
- case fi.reverse:
- switch fi.reverseFieldInfo.fieldType {
- case RelOneToOne, RelForeignKey:
- index = jtl.index
- info = fi.reverseFieldInfo.mi.fields.pk
- name = info.name
- }
- }
-
- break loopFor
-
- } else {
- index = ""
- name = ""
- info = nil
- success = false
- return
- }
- }
-
- success = index != "" && info != nil
- return
-}
-
-// generate condition sql.
-func (t *dbTables) getCondSQL(cond *Condition, sub bool, tz *time.Location) (where string, params []interface{}) {
- if cond == nil || cond.IsEmpty() {
- return
- }
-
- Q := t.base.TableQuote()
-
- mi := t.mi
-
- for i, p := range cond.params {
- if i > 0 {
- if p.isOr {
- where += "OR "
- } else {
- where += "AND "
- }
- }
- if p.isNot {
- where += "NOT "
- }
- if p.isCond {
- w, ps := t.getCondSQL(p.cond, true, tz)
- if w != "" {
- w = fmt.Sprintf("( %s) ", w)
- }
- where += w
- params = append(params, ps...)
- } else {
- exprs := p.exprs
-
- num := len(exprs) - 1
- operator := ""
- if operators[exprs[num]] {
- operator = exprs[num]
- exprs = exprs[:num]
- }
-
- index, _, fi, suc := t.parseExprs(mi, exprs)
- if !suc {
- panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(p.exprs, ExprSep)))
- }
-
- if operator == "" {
- operator = "exact"
- }
-
- var operSQL string
- var args []interface{}
- if p.isRaw {
- operSQL = p.sql
- } else {
- operSQL, args = t.base.GenerateOperatorSQL(mi, fi, operator, p.args, tz)
- }
-
- leftCol := fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q)
- t.base.GenerateOperatorLeftCol(fi, operator, &leftCol)
-
- where += fmt.Sprintf("%s %s ", leftCol, operSQL)
- params = append(params, args...)
-
- }
- }
-
- if !sub && where != "" {
- where = "WHERE " + where
- }
-
- return
-}
-
-// generate group sql.
-func (t *dbTables) getGroupSQL(groups []string) (groupSQL string) {
- if len(groups) == 0 {
- return
- }
-
- Q := t.base.TableQuote()
-
- groupSqls := make([]string, 0, len(groups))
- for _, group := range groups {
- exprs := strings.Split(group, ExprSep)
-
- index, _, fi, suc := t.parseExprs(t.mi, exprs)
- if !suc {
- panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep)))
- }
-
- groupSqls = append(groupSqls, fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q))
- }
-
- groupSQL = fmt.Sprintf("GROUP BY %s ", strings.Join(groupSqls, ", "))
- return
-}
-
-// generate order sql.
-func (t *dbTables) getOrderSQL(orders []string) (orderSQL string) {
- if len(orders) == 0 {
- return
- }
-
- Q := t.base.TableQuote()
-
- orderSqls := make([]string, 0, len(orders))
- for _, order := range orders {
- asc := "ASC"
- if order[0] == '-' {
- asc = "DESC"
- order = order[1:]
- }
- exprs := strings.Split(order, ExprSep)
-
- index, _, fi, suc := t.parseExprs(t.mi, exprs)
- if !suc {
- panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep)))
- }
-
- orderSqls = append(orderSqls, fmt.Sprintf("%s.%s%s%s %s", index, Q, fi.column, Q, asc))
- }
-
- orderSQL = fmt.Sprintf("ORDER BY %s ", strings.Join(orderSqls, ", "))
- return
-}
-
-// generate limit sql.
-func (t *dbTables) getLimitSQL(mi *modelInfo, offset int64, limit int64) (limits string) {
- if limit == 0 {
- limit = int64(DefaultRowsLimit)
- }
- if limit < 0 {
- // no limit
- if offset > 0 {
- maxLimit := t.base.MaxLimit()
- if maxLimit == 0 {
- limits = fmt.Sprintf("OFFSET %d", offset)
- } else {
- limits = fmt.Sprintf("LIMIT %d OFFSET %d", maxLimit, offset)
- }
- }
- } else if offset <= 0 {
- limits = fmt.Sprintf("LIMIT %d", limit)
- } else {
- limits = fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset)
- }
- return
-}
-
-// crete new tables collection.
-func newDbTables(mi *modelInfo, base dbBaser) *dbTables {
- tables := &dbTables{}
- tables.tablesM = make(map[string]*dbTable)
- tables.mi = mi
- tables.base = base
- return tables
-}
diff --git a/orm/db_tidb.go b/orm/db_tidb.go
deleted file mode 100644
index 6020a488..00000000
--- a/orm/db_tidb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2015 TiDB Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
-)
-
-// mysql dbBaser implementation.
-type dbBaseTidb struct {
- dbBase
-}
-
-var _ dbBaser = new(dbBaseTidb)
-
-// get mysql operator.
-func (d *dbBaseTidb) OperatorSQL(operator string) string {
- return mysqlOperators[operator]
-}
-
-// get mysql table field types.
-func (d *dbBaseTidb) DbTypes() map[string]string {
- return mysqlTypes
-}
-
-// show table sql for mysql.
-func (d *dbBaseTidb) ShowTablesQuery() string {
- return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema = DATABASE()"
-}
-
-// show columns sql of table for mysql.
-func (d *dbBaseTidb) ShowColumnsQuery(table string) string {
- return fmt.Sprintf("SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE FROM information_schema.columns "+
- "WHERE table_schema = DATABASE() AND table_name = '%s'", table)
-}
-
-// execute sql to check index exist.
-func (d *dbBaseTidb) IndexExists(db dbQuerier, table string, name string) bool {
- row := db.QueryRow("SELECT count(*) FROM information_schema.statistics "+
- "WHERE table_schema = DATABASE() AND table_name = ? AND index_name = ?", table, name)
- var cnt int
- row.Scan(&cnt)
- return cnt > 0
-}
-
-// create new mysql dbBaser.
-func newdbBaseTidb() dbBaser {
- b := new(dbBaseTidb)
- b.ins = b
- return b
-}
diff --git a/orm/db_utils.go b/orm/db_utils.go
deleted file mode 100644
index 7ae10ca5..00000000
--- a/orm/db_utils.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "reflect"
- "time"
-)
-
-// get table alias.
-func getDbAlias(name string) *alias {
- if al, ok := dataBaseCache.get(name); ok {
- return al
- }
- panic(fmt.Errorf("unknown DataBase alias name %s", name))
-}
-
-// get pk column info.
-func getExistPk(mi *modelInfo, ind reflect.Value) (column string, value interface{}, exist bool) {
- fi := mi.fields.pk
-
- v := ind.FieldByIndex(fi.fieldIndex)
- if fi.fieldType&IsPositiveIntegerField > 0 {
- vu := v.Uint()
- exist = vu > 0
- value = vu
- } else if fi.fieldType&IsIntegerField > 0 {
- vu := v.Int()
- exist = true
- value = vu
- } else if fi.fieldType&IsRelField > 0 {
- _, value, exist = getExistPk(fi.relModelInfo, reflect.Indirect(v))
- } else {
- vu := v.String()
- exist = vu != ""
- value = vu
- }
-
- column = fi.column
- return
-}
-
-// get fields description as flatted string.
-func getFlatParams(fi *fieldInfo, args []interface{}, tz *time.Location) (params []interface{}) {
-
-outFor:
- for _, arg := range args {
- val := reflect.ValueOf(arg)
-
- if arg == nil {
- params = append(params, arg)
- continue
- }
-
- kind := val.Kind()
- if kind == reflect.Ptr {
- val = val.Elem()
- kind = val.Kind()
- arg = val.Interface()
- }
-
- switch kind {
- case reflect.String:
- v := val.String()
- if fi != nil {
- if fi.fieldType == TypeTimeField || fi.fieldType == TypeDateField || fi.fieldType == TypeDateTimeField {
- var t time.Time
- var err error
- if len(v) >= 19 {
- s := v[:19]
- t, err = time.ParseInLocation(formatDateTime, s, DefaultTimeLoc)
- } else if len(v) >= 10 {
- s := v
- if len(v) > 10 {
- s = v[:10]
- }
- t, err = time.ParseInLocation(formatDate, s, tz)
- } else {
- s := v
- if len(s) > 8 {
- s = v[:8]
- }
- t, err = time.ParseInLocation(formatTime, s, tz)
- }
- if err == nil {
- if fi.fieldType == TypeDateField {
- v = t.In(tz).Format(formatDate)
- } else if fi.fieldType == TypeDateTimeField {
- v = t.In(tz).Format(formatDateTime)
- } else {
- v = t.In(tz).Format(formatTime)
- }
- }
- }
- }
- arg = v
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- arg = val.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- arg = val.Uint()
- case reflect.Float32:
- arg, _ = StrTo(ToStr(arg)).Float64()
- case reflect.Float64:
- arg = val.Float()
- case reflect.Bool:
- arg = val.Bool()
- case reflect.Slice, reflect.Array:
- if _, ok := arg.([]byte); ok {
- continue outFor
- }
-
- var args []interface{}
- for i := 0; i < val.Len(); i++ {
- v := val.Index(i)
-
- var vu interface{}
- if v.CanInterface() {
- vu = v.Interface()
- }
-
- if vu == nil {
- continue
- }
-
- args = append(args, vu)
- }
-
- if len(args) > 0 {
- p := getFlatParams(fi, args, tz)
- params = append(params, p...)
- }
- continue outFor
- case reflect.Struct:
- if v, ok := arg.(time.Time); ok {
- if fi != nil && fi.fieldType == TypeDateField {
- arg = v.In(tz).Format(formatDate)
- } else if fi != nil && fi.fieldType == TypeDateTimeField {
- arg = v.In(tz).Format(formatDateTime)
- } else if fi != nil && fi.fieldType == TypeTimeField {
- arg = v.In(tz).Format(formatTime)
- } else {
- arg = v.In(tz).Format(formatDateTime)
- }
- } else {
- typ := val.Type()
- name := getFullName(typ)
- var value interface{}
- if mmi, ok := modelCache.getByFullName(name); ok {
- if _, vu, exist := getExistPk(mmi, val); exist {
- value = vu
- }
- }
- arg = value
-
- if arg == nil {
- panic(fmt.Errorf("need a valid args value, unknown table or value `%s`", name))
- }
- }
- }
-
- params = append(params, arg)
- }
- return
-}
diff --git a/orm/models.go b/orm/models.go
deleted file mode 100644
index 4776bcba..00000000
--- a/orm/models.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "sync"
-)
-
-const (
- odCascade = "cascade"
- odSetNULL = "set_null"
- odSetDefault = "set_default"
- odDoNothing = "do_nothing"
- defaultStructTagName = "orm"
- defaultStructTagDelim = ";"
-)
-
-var (
- modelCache = &_modelCache{
- cache: make(map[string]*modelInfo),
- cacheByFullName: make(map[string]*modelInfo),
- }
-)
-
-// model info collection
-type _modelCache struct {
- sync.RWMutex // only used outsite for bootStrap
- orders []string
- cache map[string]*modelInfo
- cacheByFullName map[string]*modelInfo
- done bool
-}
-
-// get all model info
-func (mc *_modelCache) all() map[string]*modelInfo {
- m := make(map[string]*modelInfo, len(mc.cache))
- for k, v := range mc.cache {
- m[k] = v
- }
- return m
-}
-
-// get ordered model info
-func (mc *_modelCache) allOrdered() []*modelInfo {
- m := make([]*modelInfo, 0, len(mc.orders))
- for _, table := range mc.orders {
- m = append(m, mc.cache[table])
- }
- return m
-}
-
-// get model info by table name
-func (mc *_modelCache) get(table string) (mi *modelInfo, ok bool) {
- mi, ok = mc.cache[table]
- return
-}
-
-// get model info by full name
-func (mc *_modelCache) getByFullName(name string) (mi *modelInfo, ok bool) {
- mi, ok = mc.cacheByFullName[name]
- return
-}
-
-// set model info to collection
-func (mc *_modelCache) set(table string, mi *modelInfo) *modelInfo {
- mii := mc.cache[table]
- mc.cache[table] = mi
- mc.cacheByFullName[mi.fullName] = mi
- if mii == nil {
- mc.orders = append(mc.orders, table)
- }
- return mii
-}
-
-// clean all model info.
-func (mc *_modelCache) clean() {
- mc.orders = make([]string, 0)
- mc.cache = make(map[string]*modelInfo)
- mc.cacheByFullName = make(map[string]*modelInfo)
- mc.done = false
-}
-
-// ResetModelCache Clean model cache. Then you can re-RegisterModel.
-// Common use this api for test case.
-func ResetModelCache() {
- modelCache.clean()
-}
diff --git a/orm/models_boot.go b/orm/models_boot.go
deleted file mode 100644
index 8c56b3c4..00000000
--- a/orm/models_boot.go
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "os"
- "reflect"
- "runtime/debug"
- "strings"
-)
-
-// register models.
-// PrefixOrSuffix means table name prefix or suffix.
-// isPrefix whether the prefix is prefix or suffix
-func registerModel(PrefixOrSuffix string, model interface{}, isPrefix bool) {
- val := reflect.ValueOf(model)
- typ := reflect.Indirect(val).Type()
-
- if val.Kind() != reflect.Ptr {
- panic(fmt.Errorf(" cannot use non-ptr model struct `%s`", getFullName(typ)))
- }
- // For this case:
- // u := &User{}
- // registerModel(&u)
- if typ.Kind() == reflect.Ptr {
- panic(fmt.Errorf(" only allow ptr model struct, it looks you use two reference to the struct `%s`", typ))
- }
-
- table := getTableName(val)
-
- if PrefixOrSuffix != "" {
- if isPrefix {
- table = PrefixOrSuffix + table
- } else {
- table = table + PrefixOrSuffix
- }
- }
- // models's fullname is pkgpath + struct name
- name := getFullName(typ)
- if _, ok := modelCache.getByFullName(name); ok {
- fmt.Printf(" model `%s` repeat register, must be unique\n", name)
- os.Exit(2)
- }
-
- if _, ok := modelCache.get(table); ok {
- fmt.Printf(" table name `%s` repeat register, must be unique\n", table)
- os.Exit(2)
- }
-
- mi := newModelInfo(val)
- if mi.fields.pk == nil {
- outFor:
- for _, fi := range mi.fields.fieldsDB {
- if strings.ToLower(fi.name) == "id" {
- switch fi.addrValue.Elem().Kind() {
- case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
- fi.auto = true
- fi.pk = true
- mi.fields.pk = fi
- break outFor
- }
- }
- }
-
- if mi.fields.pk == nil {
- fmt.Printf(" `%s` needs a primary key field, default is to use 'id' if not set\n", name)
- os.Exit(2)
- }
-
- }
-
- mi.table = table
- mi.pkg = typ.PkgPath()
- mi.model = model
- mi.manual = true
-
- modelCache.set(table, mi)
-}
-
-// bootstrap models
-func bootStrap() {
- if modelCache.done {
- return
- }
- var (
- err error
- models map[string]*modelInfo
- )
- if dataBaseCache.getDefault() == nil {
- err = fmt.Errorf("must have one register DataBase alias named `default`")
- goto end
- }
-
- // set rel and reverse model
- // RelManyToMany set the relTable
- models = modelCache.all()
- for _, mi := range models {
- for _, fi := range mi.fields.columns {
- if fi.rel || fi.reverse {
- elm := fi.addrValue.Type().Elem()
- if fi.fieldType == RelReverseMany || fi.fieldType == RelManyToMany {
- elm = elm.Elem()
- }
- // check the rel or reverse model already register
- name := getFullName(elm)
- mii, ok := modelCache.getByFullName(name)
- if !ok || mii.pkg != elm.PkgPath() {
- err = fmt.Errorf("can not find rel in field `%s`, `%s` may be miss register", fi.fullName, elm.String())
- goto end
- }
- fi.relModelInfo = mii
-
- switch fi.fieldType {
- case RelManyToMany:
- if fi.relThrough != "" {
- if i := strings.LastIndex(fi.relThrough, "."); i != -1 && len(fi.relThrough) > (i+1) {
- pn := fi.relThrough[:i]
- rmi, ok := modelCache.getByFullName(fi.relThrough)
- if !ok || pn != rmi.pkg {
- err = fmt.Errorf("field `%s` wrong rel_through value `%s` cannot find table", fi.fullName, fi.relThrough)
- goto end
- }
- fi.relThroughModelInfo = rmi
- fi.relTable = rmi.table
- } else {
- err = fmt.Errorf("field `%s` wrong rel_through value `%s`", fi.fullName, fi.relThrough)
- goto end
- }
- } else {
- i := newM2MModelInfo(mi, mii)
- if fi.relTable != "" {
- i.table = fi.relTable
- }
- if v := modelCache.set(i.table, i); v != nil {
- err = fmt.Errorf("the rel table name `%s` already registered, cannot be use, please change one", fi.relTable)
- goto end
- }
- fi.relTable = i.table
- fi.relThroughModelInfo = i
- }
-
- fi.relThroughModelInfo.isThrough = true
- }
- }
- }
- }
-
- // check the rel filed while the relModelInfo also has filed point to current model
- // if not exist, add a new field to the relModelInfo
- models = modelCache.all()
- for _, mi := range models {
- for _, fi := range mi.fields.fieldsRel {
- switch fi.fieldType {
- case RelForeignKey, RelOneToOne, RelManyToMany:
- inModel := false
- for _, ffi := range fi.relModelInfo.fields.fieldsReverse {
- if ffi.relModelInfo == mi {
- inModel = true
- break
- }
- }
- if !inModel {
- rmi := fi.relModelInfo
- ffi := new(fieldInfo)
- ffi.name = mi.name
- ffi.column = ffi.name
- ffi.fullName = rmi.fullName + "." + ffi.name
- ffi.reverse = true
- ffi.relModelInfo = mi
- ffi.mi = rmi
- if fi.fieldType == RelOneToOne {
- ffi.fieldType = RelReverseOne
- } else {
- ffi.fieldType = RelReverseMany
- }
- if !rmi.fields.Add(ffi) {
- added := false
- for cnt := 0; cnt < 5; cnt++ {
- ffi.name = fmt.Sprintf("%s%d", mi.name, cnt)
- ffi.column = ffi.name
- ffi.fullName = rmi.fullName + "." + ffi.name
- if added = rmi.fields.Add(ffi); added {
- break
- }
- }
- if !added {
- panic(fmt.Errorf("cannot generate auto reverse field info `%s` to `%s`", fi.fullName, ffi.fullName))
- }
- }
- }
- }
- }
- }
-
- models = modelCache.all()
- for _, mi := range models {
- for _, fi := range mi.fields.fieldsRel {
- switch fi.fieldType {
- case RelManyToMany:
- for _, ffi := range fi.relThroughModelInfo.fields.fieldsRel {
- switch ffi.fieldType {
- case RelOneToOne, RelForeignKey:
- if ffi.relModelInfo == fi.relModelInfo {
- fi.reverseFieldInfoTwo = ffi
- }
- if ffi.relModelInfo == mi {
- fi.reverseField = ffi.name
- fi.reverseFieldInfo = ffi
- }
- }
- }
- if fi.reverseFieldInfoTwo == nil {
- err = fmt.Errorf("can not find m2m field for m2m model `%s`, ensure your m2m model defined correct",
- fi.relThroughModelInfo.fullName)
- goto end
- }
- }
- }
- }
-
- models = modelCache.all()
- for _, mi := range models {
- for _, fi := range mi.fields.fieldsReverse {
- switch fi.fieldType {
- case RelReverseOne:
- found := false
- mForA:
- for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelOneToOne] {
- if ffi.relModelInfo == mi {
- found = true
- fi.reverseField = ffi.name
- fi.reverseFieldInfo = ffi
-
- ffi.reverseField = fi.name
- ffi.reverseFieldInfo = fi
- break mForA
- }
- }
- if !found {
- err = fmt.Errorf("reverse field `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName)
- goto end
- }
- case RelReverseMany:
- found := false
- mForB:
- for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelForeignKey] {
- if ffi.relModelInfo == mi {
- found = true
- fi.reverseField = ffi.name
- fi.reverseFieldInfo = ffi
-
- ffi.reverseField = fi.name
- ffi.reverseFieldInfo = fi
-
- break mForB
- }
- }
- if !found {
- mForC:
- for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelManyToMany] {
- conditions := fi.relThrough != "" && fi.relThrough == ffi.relThrough ||
- fi.relTable != "" && fi.relTable == ffi.relTable ||
- fi.relThrough == "" && fi.relTable == ""
- if ffi.relModelInfo == mi && conditions {
- found = true
-
- fi.reverseField = ffi.reverseFieldInfoTwo.name
- fi.reverseFieldInfo = ffi.reverseFieldInfoTwo
- fi.relThroughModelInfo = ffi.relThroughModelInfo
- fi.reverseFieldInfoTwo = ffi.reverseFieldInfo
- fi.reverseFieldInfoM2M = ffi
- ffi.reverseFieldInfoM2M = fi
-
- break mForC
- }
- }
- }
- if !found {
- err = fmt.Errorf("reverse field for `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName)
- goto end
- }
- }
- }
- }
-
-end:
- if err != nil {
- fmt.Println(err)
- debug.PrintStack()
- os.Exit(2)
- }
-}
-
-// RegisterModel register models
-func RegisterModel(models ...interface{}) {
- if modelCache.done {
- panic(fmt.Errorf("RegisterModel must be run before BootStrap"))
- }
- RegisterModelWithPrefix("", models...)
-}
-
-// RegisterModelWithPrefix register models with a prefix
-func RegisterModelWithPrefix(prefix string, models ...interface{}) {
- if modelCache.done {
- panic(fmt.Errorf("RegisterModelWithPrefix must be run before BootStrap"))
- }
-
- for _, model := range models {
- registerModel(prefix, model, true)
- }
-}
-
-// RegisterModelWithSuffix register models with a suffix
-func RegisterModelWithSuffix(suffix string, models ...interface{}) {
- if modelCache.done {
- panic(fmt.Errorf("RegisterModelWithSuffix must be run before BootStrap"))
- }
-
- for _, model := range models {
- registerModel(suffix, model, false)
- }
-}
-
-// BootStrap bootstrap models.
-// make all model parsed and can not add more models
-func BootStrap() {
- modelCache.Lock()
- defer modelCache.Unlock()
- if modelCache.done {
- return
- }
- bootStrap()
- modelCache.done = true
-}
diff --git a/orm/models_fields.go b/orm/models_fields.go
deleted file mode 100644
index b4fad94f..00000000
--- a/orm/models_fields.go
+++ /dev/null
@@ -1,783 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strconv"
- "time"
-)
-
-// Define the Type enum
-const (
- TypeBooleanField = 1 << iota
- TypeVarCharField
- TypeCharField
- TypeTextField
- TypeTimeField
- TypeDateField
- TypeDateTimeField
- TypeBitField
- TypeSmallIntegerField
- TypeIntegerField
- TypeBigIntegerField
- TypePositiveBitField
- TypePositiveSmallIntegerField
- TypePositiveIntegerField
- TypePositiveBigIntegerField
- TypeFloatField
- TypeDecimalField
- TypeJSONField
- TypeJsonbField
- RelForeignKey
- RelOneToOne
- RelManyToMany
- RelReverseOne
- RelReverseMany
-)
-
-// Define some logic enum
-const (
- IsIntegerField = ^-TypePositiveBigIntegerField >> 6 << 7
- IsPositiveIntegerField = ^-TypePositiveBigIntegerField >> 10 << 11
- IsRelField = ^-RelReverseMany >> 18 << 19
- IsFieldType = ^-RelReverseMany<<1 + 1
-)
-
-// BooleanField A true/false field.
-type BooleanField bool
-
-// Value return the BooleanField
-func (e BooleanField) Value() bool {
- return bool(e)
-}
-
-// Set will set the BooleanField
-func (e *BooleanField) Set(d bool) {
- *e = BooleanField(d)
-}
-
-// String format the Bool to string
-func (e *BooleanField) String() string {
- return strconv.FormatBool(e.Value())
-}
-
-// FieldType return BooleanField the type
-func (e *BooleanField) FieldType() int {
- return TypeBooleanField
-}
-
-// SetRaw set the interface to bool
-func (e *BooleanField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case bool:
- e.Set(d)
- case string:
- v, err := StrTo(d).Bool()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return the current value
-func (e *BooleanField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify the BooleanField implement the Fielder interface
-var _ Fielder = new(BooleanField)
-
-// CharField A string field
-// required values tag: size
-// The size is enforced at the database level and in models’s validation.
-// eg: `orm:"size(120)"`
-type CharField string
-
-// Value return the CharField's Value
-func (e CharField) Value() string {
- return string(e)
-}
-
-// Set CharField value
-func (e *CharField) Set(d string) {
- *e = CharField(d)
-}
-
-// String return the CharField
-func (e *CharField) String() string {
- return e.Value()
-}
-
-// FieldType return the enum type
-func (e *CharField) FieldType() int {
- return TypeVarCharField
-}
-
-// SetRaw set the interface to string
-func (e *CharField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case string:
- e.Set(d)
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return the CharField value
-func (e *CharField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify CharField implement Fielder
-var _ Fielder = new(CharField)
-
-// TimeField A time, represented in go by a time.Time instance.
-// only time values like 10:00:00
-// Has a few extra, optional attr tag:
-//
-// auto_now:
-// Automatically set the field to now every time the object is saved. Useful for “last-modified” timestamps.
-// Note that the current date is always used; it’s not just a default value that you can override.
-//
-// auto_now_add:
-// Automatically set the field to now when the object is first created. Useful for creation of timestamps.
-// Note that the current date is always used; it’s not just a default value that you can override.
-//
-// eg: `orm:"auto_now"` or `orm:"auto_now_add"`
-type TimeField time.Time
-
-// Value return the time.Time
-func (e TimeField) Value() time.Time {
- return time.Time(e)
-}
-
-// Set set the TimeField's value
-func (e *TimeField) Set(d time.Time) {
- *e = TimeField(d)
-}
-
-// String convert time to string
-func (e *TimeField) String() string {
- return e.Value().String()
-}
-
-// FieldType return enum type Date
-func (e *TimeField) FieldType() int {
- return TypeDateField
-}
-
-// SetRaw convert the interface to time.Time. Allow string and time.Time
-func (e *TimeField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case time.Time:
- e.Set(d)
- case string:
- v, err := timeParse(d, formatTime)
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return time value
-func (e *TimeField) RawValue() interface{} {
- return e.Value()
-}
-
-var _ Fielder = new(TimeField)
-
-// DateField A date, represented in go by a time.Time instance.
-// only date values like 2006-01-02
-// Has a few extra, optional attr tag:
-//
-// auto_now:
-// Automatically set the field to now every time the object is saved. Useful for “last-modified” timestamps.
-// Note that the current date is always used; it’s not just a default value that you can override.
-//
-// auto_now_add:
-// Automatically set the field to now when the object is first created. Useful for creation of timestamps.
-// Note that the current date is always used; it’s not just a default value that you can override.
-//
-// eg: `orm:"auto_now"` or `orm:"auto_now_add"`
-type DateField time.Time
-
-// Value return the time.Time
-func (e DateField) Value() time.Time {
- return time.Time(e)
-}
-
-// Set set the DateField's value
-func (e *DateField) Set(d time.Time) {
- *e = DateField(d)
-}
-
-// String convert datetime to string
-func (e *DateField) String() string {
- return e.Value().String()
-}
-
-// FieldType return enum type Date
-func (e *DateField) FieldType() int {
- return TypeDateField
-}
-
-// SetRaw convert the interface to time.Time. Allow string and time.Time
-func (e *DateField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case time.Time:
- e.Set(d)
- case string:
- v, err := timeParse(d, formatDate)
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return Date value
-func (e *DateField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify DateField implement fielder interface
-var _ Fielder = new(DateField)
-
-// DateTimeField A date, represented in go by a time.Time instance.
-// datetime values like 2006-01-02 15:04:05
-// Takes the same extra arguments as DateField.
-type DateTimeField time.Time
-
-// Value return the datetime value
-func (e DateTimeField) Value() time.Time {
- return time.Time(e)
-}
-
-// Set set the time.Time to datetime
-func (e *DateTimeField) Set(d time.Time) {
- *e = DateTimeField(d)
-}
-
-// String return the time's String
-func (e *DateTimeField) String() string {
- return e.Value().String()
-}
-
-// FieldType return the enum TypeDateTimeField
-func (e *DateTimeField) FieldType() int {
- return TypeDateTimeField
-}
-
-// SetRaw convert the string or time.Time to DateTimeField
-func (e *DateTimeField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case time.Time:
- e.Set(d)
- case string:
- v, err := timeParse(d, formatDateTime)
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return the datetime value
-func (e *DateTimeField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify datetime implement fielder
-var _ Fielder = new(DateTimeField)
-
-// FloatField A floating-point number represented in go by a float32 value.
-type FloatField float64
-
-// Value return the FloatField value
-func (e FloatField) Value() float64 {
- return float64(e)
-}
-
-// Set the Float64
-func (e *FloatField) Set(d float64) {
- *e = FloatField(d)
-}
-
-// String return the string
-func (e *FloatField) String() string {
- return ToStr(e.Value(), -1, 32)
-}
-
-// FieldType return the enum type
-func (e *FloatField) FieldType() int {
- return TypeFloatField
-}
-
-// SetRaw converter interface Float64 float32 or string to FloatField
-func (e *FloatField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case float32:
- e.Set(float64(d))
- case float64:
- e.Set(d)
- case string:
- v, err := StrTo(d).Float64()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return the FloatField value
-func (e *FloatField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify FloatField implement Fielder
-var _ Fielder = new(FloatField)
-
-// SmallIntegerField -32768 to 32767
-type SmallIntegerField int16
-
-// Value return int16 value
-func (e SmallIntegerField) Value() int16 {
- return int16(e)
-}
-
-// Set the SmallIntegerField value
-func (e *SmallIntegerField) Set(d int16) {
- *e = SmallIntegerField(d)
-}
-
-// String convert smallint to string
-func (e *SmallIntegerField) String() string {
- return ToStr(e.Value())
-}
-
-// FieldType return enum type SmallIntegerField
-func (e *SmallIntegerField) FieldType() int {
- return TypeSmallIntegerField
-}
-
-// SetRaw convert interface int16/string to int16
-func (e *SmallIntegerField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case int16:
- e.Set(d)
- case string:
- v, err := StrTo(d).Int16()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return smallint value
-func (e *SmallIntegerField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify SmallIntegerField implement Fielder
-var _ Fielder = new(SmallIntegerField)
-
-// IntegerField -2147483648 to 2147483647
-type IntegerField int32
-
-// Value return the int32
-func (e IntegerField) Value() int32 {
- return int32(e)
-}
-
-// Set IntegerField value
-func (e *IntegerField) Set(d int32) {
- *e = IntegerField(d)
-}
-
-// String convert Int32 to string
-func (e *IntegerField) String() string {
- return ToStr(e.Value())
-}
-
-// FieldType return the enum type
-func (e *IntegerField) FieldType() int {
- return TypeIntegerField
-}
-
-// SetRaw convert interface int32/string to int32
-func (e *IntegerField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case int32:
- e.Set(d)
- case string:
- v, err := StrTo(d).Int32()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return IntegerField value
-func (e *IntegerField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify IntegerField implement Fielder
-var _ Fielder = new(IntegerField)
-
-// BigIntegerField -9223372036854775808 to 9223372036854775807.
-type BigIntegerField int64
-
-// Value return int64
-func (e BigIntegerField) Value() int64 {
- return int64(e)
-}
-
-// Set the BigIntegerField value
-func (e *BigIntegerField) Set(d int64) {
- *e = BigIntegerField(d)
-}
-
-// String convert BigIntegerField to string
-func (e *BigIntegerField) String() string {
- return ToStr(e.Value())
-}
-
-// FieldType return enum type
-func (e *BigIntegerField) FieldType() int {
- return TypeBigIntegerField
-}
-
-// SetRaw convert interface int64/string to int64
-func (e *BigIntegerField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case int64:
- e.Set(d)
- case string:
- v, err := StrTo(d).Int64()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return BigIntegerField value
-func (e *BigIntegerField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify BigIntegerField implement Fielder
-var _ Fielder = new(BigIntegerField)
-
-// PositiveSmallIntegerField 0 to 65535
-type PositiveSmallIntegerField uint16
-
-// Value return uint16
-func (e PositiveSmallIntegerField) Value() uint16 {
- return uint16(e)
-}
-
-// Set PositiveSmallIntegerField value
-func (e *PositiveSmallIntegerField) Set(d uint16) {
- *e = PositiveSmallIntegerField(d)
-}
-
-// String convert uint16 to string
-func (e *PositiveSmallIntegerField) String() string {
- return ToStr(e.Value())
-}
-
-// FieldType return enum type
-func (e *PositiveSmallIntegerField) FieldType() int {
- return TypePositiveSmallIntegerField
-}
-
-// SetRaw convert Interface uint16/string to uint16
-func (e *PositiveSmallIntegerField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case uint16:
- e.Set(d)
- case string:
- v, err := StrTo(d).Uint16()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue returns PositiveSmallIntegerField value
-func (e *PositiveSmallIntegerField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify PositiveSmallIntegerField implement Fielder
-var _ Fielder = new(PositiveSmallIntegerField)
-
-// PositiveIntegerField 0 to 4294967295
-type PositiveIntegerField uint32
-
-// Value return PositiveIntegerField value. Uint32
-func (e PositiveIntegerField) Value() uint32 {
- return uint32(e)
-}
-
-// Set the PositiveIntegerField value
-func (e *PositiveIntegerField) Set(d uint32) {
- *e = PositiveIntegerField(d)
-}
-
-// String convert PositiveIntegerField to string
-func (e *PositiveIntegerField) String() string {
- return ToStr(e.Value())
-}
-
-// FieldType return enum type
-func (e *PositiveIntegerField) FieldType() int {
- return TypePositiveIntegerField
-}
-
-// SetRaw convert interface uint32/string to Uint32
-func (e *PositiveIntegerField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case uint32:
- e.Set(d)
- case string:
- v, err := StrTo(d).Uint32()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return the PositiveIntegerField Value
-func (e *PositiveIntegerField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify PositiveIntegerField implement Fielder
-var _ Fielder = new(PositiveIntegerField)
-
-// PositiveBigIntegerField 0 to 18446744073709551615
-type PositiveBigIntegerField uint64
-
-// Value return uint64
-func (e PositiveBigIntegerField) Value() uint64 {
- return uint64(e)
-}
-
-// Set PositiveBigIntegerField value
-func (e *PositiveBigIntegerField) Set(d uint64) {
- *e = PositiveBigIntegerField(d)
-}
-
-// String convert PositiveBigIntegerField to string
-func (e *PositiveBigIntegerField) String() string {
- return ToStr(e.Value())
-}
-
-// FieldType return enum type
-func (e *PositiveBigIntegerField) FieldType() int {
- return TypePositiveIntegerField
-}
-
-// SetRaw convert interface uint64/string to Uint64
-func (e *PositiveBigIntegerField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case uint64:
- e.Set(d)
- case string:
- v, err := StrTo(d).Uint64()
- if err == nil {
- e.Set(v)
- }
- return err
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return PositiveBigIntegerField value
-func (e *PositiveBigIntegerField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify PositiveBigIntegerField implement Fielder
-var _ Fielder = new(PositiveBigIntegerField)
-
-// TextField A large text field.
-type TextField string
-
-// Value return TextField value
-func (e TextField) Value() string {
- return string(e)
-}
-
-// Set the TextField value
-func (e *TextField) Set(d string) {
- *e = TextField(d)
-}
-
-// String convert TextField to string
-func (e *TextField) String() string {
- return e.Value()
-}
-
-// FieldType return enum type
-func (e *TextField) FieldType() int {
- return TypeTextField
-}
-
-// SetRaw convert interface string to string
-func (e *TextField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case string:
- e.Set(d)
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return TextField value
-func (e *TextField) RawValue() interface{} {
- return e.Value()
-}
-
-// verify TextField implement Fielder
-var _ Fielder = new(TextField)
-
-// JSONField postgres json field.
-type JSONField string
-
-// Value return JSONField value
-func (j JSONField) Value() string {
- return string(j)
-}
-
-// Set the JSONField value
-func (j *JSONField) Set(d string) {
- *j = JSONField(d)
-}
-
-// String convert JSONField to string
-func (j *JSONField) String() string {
- return j.Value()
-}
-
-// FieldType return enum type
-func (j *JSONField) FieldType() int {
- return TypeJSONField
-}
-
-// SetRaw convert interface string to string
-func (j *JSONField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case string:
- j.Set(d)
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return JSONField value
-func (j *JSONField) RawValue() interface{} {
- return j.Value()
-}
-
-// verify JSONField implement Fielder
-var _ Fielder = new(JSONField)
-
-// JsonbField postgres json field.
-type JsonbField string
-
-// Value return JsonbField value
-func (j JsonbField) Value() string {
- return string(j)
-}
-
-// Set the JsonbField value
-func (j *JsonbField) Set(d string) {
- *j = JsonbField(d)
-}
-
-// String convert JsonbField to string
-func (j *JsonbField) String() string {
- return j.Value()
-}
-
-// FieldType return enum type
-func (j *JsonbField) FieldType() int {
- return TypeJsonbField
-}
-
-// SetRaw convert interface string to string
-func (j *JsonbField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case string:
- j.Set(d)
- default:
- return fmt.Errorf(" unknown value `%s`", value)
- }
- return nil
-}
-
-// RawValue return JsonbField value
-func (j *JsonbField) RawValue() interface{} {
- return j.Value()
-}
-
-// verify JsonbField implement Fielder
-var _ Fielder = new(JsonbField)
diff --git a/orm/models_info_f.go b/orm/models_info_f.go
deleted file mode 100644
index 7044b0bd..00000000
--- a/orm/models_info_f.go
+++ /dev/null
@@ -1,473 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
-)
-
-var errSkipField = errors.New("skip field")
-
-// field info collection
-type fields struct {
- pk *fieldInfo
- columns map[string]*fieldInfo
- fields map[string]*fieldInfo
- fieldsLow map[string]*fieldInfo
- fieldsByType map[int][]*fieldInfo
- fieldsRel []*fieldInfo
- fieldsReverse []*fieldInfo
- fieldsDB []*fieldInfo
- rels []*fieldInfo
- orders []string
- dbcols []string
-}
-
-// add field info
-func (f *fields) Add(fi *fieldInfo) (added bool) {
- if f.fields[fi.name] == nil && f.columns[fi.column] == nil {
- f.columns[fi.column] = fi
- f.fields[fi.name] = fi
- f.fieldsLow[strings.ToLower(fi.name)] = fi
- } else {
- return
- }
- if _, ok := f.fieldsByType[fi.fieldType]; !ok {
- f.fieldsByType[fi.fieldType] = make([]*fieldInfo, 0)
- }
- f.fieldsByType[fi.fieldType] = append(f.fieldsByType[fi.fieldType], fi)
- f.orders = append(f.orders, fi.column)
- if fi.dbcol {
- f.dbcols = append(f.dbcols, fi.column)
- f.fieldsDB = append(f.fieldsDB, fi)
- }
- if fi.rel {
- f.fieldsRel = append(f.fieldsRel, fi)
- }
- if fi.reverse {
- f.fieldsReverse = append(f.fieldsReverse, fi)
- }
- return true
-}
-
-// get field info by name
-func (f *fields) GetByName(name string) *fieldInfo {
- return f.fields[name]
-}
-
-// get field info by column name
-func (f *fields) GetByColumn(column string) *fieldInfo {
- return f.columns[column]
-}
-
-// get field info by string, name is prior
-func (f *fields) GetByAny(name string) (*fieldInfo, bool) {
- if fi, ok := f.fields[name]; ok {
- return fi, ok
- }
- if fi, ok := f.fieldsLow[strings.ToLower(name)]; ok {
- return fi, ok
- }
- if fi, ok := f.columns[name]; ok {
- return fi, ok
- }
- return nil, false
-}
-
-// create new field info collection
-func newFields() *fields {
- f := new(fields)
- f.fields = make(map[string]*fieldInfo)
- f.fieldsLow = make(map[string]*fieldInfo)
- f.columns = make(map[string]*fieldInfo)
- f.fieldsByType = make(map[int][]*fieldInfo)
- return f
-}
-
-// single field info
-type fieldInfo struct {
- mi *modelInfo
- fieldIndex []int
- fieldType int
- dbcol bool // table column fk and onetoone
- inModel bool
- name string
- fullName string
- column string
- addrValue reflect.Value
- sf reflect.StructField
- auto bool
- pk bool
- null bool
- index bool
- unique bool
- colDefault bool // whether has default tag
- initial StrTo // store the default value
- size int
- toText bool
- autoNow bool
- autoNowAdd bool
- rel bool // if type equal to RelForeignKey, RelOneToOne, RelManyToMany then true
- reverse bool
- reverseField string
- reverseFieldInfo *fieldInfo
- reverseFieldInfoTwo *fieldInfo
- reverseFieldInfoM2M *fieldInfo
- relTable string
- relThrough string
- relThroughModelInfo *modelInfo
- relModelInfo *modelInfo
- digits int
- decimals int
- isFielder bool // implement Fielder interface
- onDelete string
- description string
-}
-
-// new field info
-func newFieldInfo(mi *modelInfo, field reflect.Value, sf reflect.StructField, mName string) (fi *fieldInfo, err error) {
- var (
- tag string
- tagValue string
- initial StrTo // store the default value
- fieldType int
- attrs map[string]bool
- tags map[string]string
- addrField reflect.Value
- )
-
- fi = new(fieldInfo)
-
- // if field which CanAddr is the follow type
- // A value is addressable if it is an element of a slice,
- // an element of an addressable array, a field of an
- // addressable struct, or the result of dereferencing a pointer.
- addrField = field
- if field.CanAddr() && field.Kind() != reflect.Ptr {
- addrField = field.Addr()
- if _, ok := addrField.Interface().(Fielder); !ok {
- if field.Kind() == reflect.Slice {
- addrField = field
- }
- }
- }
-
- attrs, tags = parseStructTag(sf.Tag.Get(defaultStructTagName))
-
- if _, ok := attrs["-"]; ok {
- return nil, errSkipField
- }
-
- digits := tags["digits"]
- decimals := tags["decimals"]
- size := tags["size"]
- onDelete := tags["on_delete"]
-
- initial.Clear()
- if v, ok := tags["default"]; ok {
- initial.Set(v)
- }
-
-checkType:
- switch f := addrField.Interface().(type) {
- case Fielder:
- fi.isFielder = true
- if field.Kind() == reflect.Ptr {
- err = fmt.Errorf("the model Fielder can not be use ptr")
- goto end
- }
- fieldType = f.FieldType()
- if fieldType&IsRelField > 0 {
- err = fmt.Errorf("unsupport type custom field, please refer to https://github.com/astaxie/beego/blob/master/orm/models_fields.go#L24-L42")
- goto end
- }
- default:
- tag = "rel"
- tagValue = tags[tag]
- if tagValue != "" {
- switch tagValue {
- case "fk":
- fieldType = RelForeignKey
- break checkType
- case "one":
- fieldType = RelOneToOne
- break checkType
- case "m2m":
- fieldType = RelManyToMany
- if tv := tags["rel_table"]; tv != "" {
- fi.relTable = tv
- } else if tv := tags["rel_through"]; tv != "" {
- fi.relThrough = tv
- }
- break checkType
- default:
- err = fmt.Errorf("rel only allow these value: fk, one, m2m")
- goto wrongTag
- }
- }
- tag = "reverse"
- tagValue = tags[tag]
- if tagValue != "" {
- switch tagValue {
- case "one":
- fieldType = RelReverseOne
- break checkType
- case "many":
- fieldType = RelReverseMany
- if tv := tags["rel_table"]; tv != "" {
- fi.relTable = tv
- } else if tv := tags["rel_through"]; tv != "" {
- fi.relThrough = tv
- }
- break checkType
- default:
- err = fmt.Errorf("reverse only allow these value: one, many")
- goto wrongTag
- }
- }
-
- fieldType, err = getFieldType(addrField)
- if err != nil {
- goto end
- }
- if fieldType == TypeVarCharField {
- switch tags["type"] {
- case "char":
- fieldType = TypeCharField
- case "text":
- fieldType = TypeTextField
- case "json":
- fieldType = TypeJSONField
- case "jsonb":
- fieldType = TypeJsonbField
- }
- }
- if fieldType == TypeFloatField && (digits != "" || decimals != "") {
- fieldType = TypeDecimalField
- }
- if fieldType == TypeDateTimeField && tags["type"] == "date" {
- fieldType = TypeDateField
- }
- if fieldType == TypeTimeField && tags["type"] == "time" {
- fieldType = TypeTimeField
- }
- }
-
- // check the rel and reverse type
- // rel should Ptr
- // reverse should slice []*struct
- switch fieldType {
- case RelForeignKey, RelOneToOne, RelReverseOne:
- if field.Kind() != reflect.Ptr {
- err = fmt.Errorf("rel/reverse:one field must be *%s", field.Type().Name())
- goto end
- }
- case RelManyToMany, RelReverseMany:
- if field.Kind() != reflect.Slice {
- err = fmt.Errorf("rel/reverse:many field must be slice")
- goto end
- } else {
- if field.Type().Elem().Kind() != reflect.Ptr {
- err = fmt.Errorf("rel/reverse:many slice must be []*%s", field.Type().Elem().Name())
- goto end
- }
- }
- }
-
- if fieldType&IsFieldType == 0 {
- err = fmt.Errorf("wrong field type")
- goto end
- }
-
- fi.fieldType = fieldType
- fi.name = sf.Name
- fi.column = getColumnName(fieldType, addrField, sf, tags["column"])
- fi.addrValue = addrField
- fi.sf = sf
- fi.fullName = mi.fullName + mName + "." + sf.Name
-
- fi.description = tags["description"]
- fi.null = attrs["null"]
- fi.index = attrs["index"]
- fi.auto = attrs["auto"]
- fi.pk = attrs["pk"]
- fi.unique = attrs["unique"]
-
- // Mark object property if there is attribute "default" in the orm configuration
- if _, ok := tags["default"]; ok {
- fi.colDefault = true
- }
-
- switch fieldType {
- case RelManyToMany, RelReverseMany, RelReverseOne:
- fi.null = false
- fi.index = false
- fi.auto = false
- fi.pk = false
- fi.unique = false
- default:
- fi.dbcol = true
- }
-
- switch fieldType {
- case RelForeignKey, RelOneToOne, RelManyToMany:
- fi.rel = true
- if fieldType == RelOneToOne {
- fi.unique = true
- }
- case RelReverseMany, RelReverseOne:
- fi.reverse = true
- }
-
- if fi.rel && fi.dbcol {
- switch onDelete {
- case odCascade, odDoNothing:
- case odSetDefault:
- if !initial.Exist() {
- err = errors.New("on_delete: set_default need set field a default value")
- goto end
- }
- case odSetNULL:
- if !fi.null {
- err = errors.New("on_delete: set_null need set field null")
- goto end
- }
- default:
- if onDelete == "" {
- onDelete = odCascade
- } else {
- err = fmt.Errorf("on_delete value expected choice in `cascade,set_null,set_default,do_nothing`, unknown `%s`", onDelete)
- goto end
- }
- }
-
- fi.onDelete = onDelete
- }
-
- switch fieldType {
- case TypeBooleanField:
- case TypeVarCharField, TypeCharField, TypeJSONField, TypeJsonbField:
- if size != "" {
- v, e := StrTo(size).Int32()
- if e != nil {
- err = fmt.Errorf("wrong size value `%s`", size)
- } else {
- fi.size = int(v)
- }
- } else {
- fi.size = 255
- fi.toText = true
- }
- case TypeTextField:
- fi.index = false
- fi.unique = false
- case TypeTimeField, TypeDateField, TypeDateTimeField:
- if attrs["auto_now"] {
- fi.autoNow = true
- } else if attrs["auto_now_add"] {
- fi.autoNowAdd = true
- }
- case TypeFloatField:
- case TypeDecimalField:
- d1 := digits
- d2 := decimals
- v1, er1 := StrTo(d1).Int8()
- v2, er2 := StrTo(d2).Int8()
- if er1 != nil || er2 != nil {
- err = fmt.Errorf("wrong digits/decimals value %s/%s", d2, d1)
- goto end
- }
- fi.digits = int(v1)
- fi.decimals = int(v2)
- default:
- switch {
- case fieldType&IsIntegerField > 0:
- case fieldType&IsRelField > 0:
- }
- }
-
- if fieldType&IsIntegerField == 0 {
- if fi.auto {
- err = fmt.Errorf("non-integer type cannot set auto")
- goto end
- }
- }
-
- if fi.auto || fi.pk {
- if fi.auto {
- switch addrField.Elem().Kind() {
- case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
- default:
- err = fmt.Errorf("auto primary key only support int, int32, int64, uint, uint32, uint64 but found `%s`", addrField.Elem().Kind())
- goto end
- }
- fi.pk = true
- }
- fi.null = false
- fi.index = false
- fi.unique = false
- }
-
- if fi.unique {
- fi.index = false
- }
-
- // can not set default for these type
- if fi.auto || fi.pk || fi.unique || fieldType == TypeTimeField || fieldType == TypeDateField || fieldType == TypeDateTimeField {
- initial.Clear()
- }
-
- if initial.Exist() {
- v := initial
- switch fieldType {
- case TypeBooleanField:
- _, err = v.Bool()
- case TypeFloatField, TypeDecimalField:
- _, err = v.Float64()
- case TypeBitField:
- _, err = v.Int8()
- case TypeSmallIntegerField:
- _, err = v.Int16()
- case TypeIntegerField:
- _, err = v.Int32()
- case TypeBigIntegerField:
- _, err = v.Int64()
- case TypePositiveBitField:
- _, err = v.Uint8()
- case TypePositiveSmallIntegerField:
- _, err = v.Uint16()
- case TypePositiveIntegerField:
- _, err = v.Uint32()
- case TypePositiveBigIntegerField:
- _, err = v.Uint64()
- }
- if err != nil {
- tag, tagValue = "default", tags["default"]
- goto wrongTag
- }
- }
-
- fi.initial = initial
-end:
- if err != nil {
- return nil, err
- }
- return
-wrongTag:
- return nil, fmt.Errorf("wrong tag format: `%s:\"%s\"`, %s", tag, tagValue, err)
-}
diff --git a/orm/models_info_m.go b/orm/models_info_m.go
deleted file mode 100644
index a4d733b6..00000000
--- a/orm/models_info_m.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "os"
- "reflect"
-)
-
-// single model info
-type modelInfo struct {
- pkg string
- name string
- fullName string
- table string
- model interface{}
- fields *fields
- manual bool
- addrField reflect.Value //store the original struct value
- uniques []string
- isThrough bool
-}
-
-// new model info
-func newModelInfo(val reflect.Value) (mi *modelInfo) {
- mi = &modelInfo{}
- mi.fields = newFields()
- ind := reflect.Indirect(val)
- mi.addrField = val
- mi.name = ind.Type().Name()
- mi.fullName = getFullName(ind.Type())
- addModelFields(mi, ind, "", []int{})
- return
-}
-
-// index: FieldByIndex returns the nested field corresponding to index
-func addModelFields(mi *modelInfo, ind reflect.Value, mName string, index []int) {
- var (
- err error
- fi *fieldInfo
- sf reflect.StructField
- )
-
- for i := 0; i < ind.NumField(); i++ {
- field := ind.Field(i)
- sf = ind.Type().Field(i)
- // if the field is unexported skip
- if sf.PkgPath != "" {
- continue
- }
- // add anonymous struct fields
- if sf.Anonymous {
- addModelFields(mi, field, mName+"."+sf.Name, append(index, i))
- continue
- }
-
- fi, err = newFieldInfo(mi, field, sf, mName)
- if err == errSkipField {
- err = nil
- continue
- } else if err != nil {
- break
- }
- //record current field index
- fi.fieldIndex = append(fi.fieldIndex, index...)
- fi.fieldIndex = append(fi.fieldIndex, i)
- fi.mi = mi
- fi.inModel = true
- if !mi.fields.Add(fi) {
- err = fmt.Errorf("duplicate column name: %s", fi.column)
- break
- }
- if fi.pk {
- if mi.fields.pk != nil {
- err = fmt.Errorf("one model must have one pk field only")
- break
- } else {
- mi.fields.pk = fi
- }
- }
- }
-
- if err != nil {
- fmt.Println(fmt.Errorf("field: %s.%s, %s", ind.Type(), sf.Name, err))
- os.Exit(2)
- }
-}
-
-// combine related model info to new model info.
-// prepare for relation models query.
-func newM2MModelInfo(m1, m2 *modelInfo) (mi *modelInfo) {
- mi = new(modelInfo)
- mi.fields = newFields()
- mi.table = m1.table + "_" + m2.table + "s"
- mi.name = camelString(mi.table)
- mi.fullName = m1.pkg + "." + mi.name
-
- fa := new(fieldInfo) // pk
- f1 := new(fieldInfo) // m1 table RelForeignKey
- f2 := new(fieldInfo) // m2 table RelForeignKey
- fa.fieldType = TypeBigIntegerField
- fa.auto = true
- fa.pk = true
- fa.dbcol = true
- fa.name = "Id"
- fa.column = "id"
- fa.fullName = mi.fullName + "." + fa.name
-
- f1.dbcol = true
- f2.dbcol = true
- f1.fieldType = RelForeignKey
- f2.fieldType = RelForeignKey
- f1.name = camelString(m1.table)
- f2.name = camelString(m2.table)
- f1.fullName = mi.fullName + "." + f1.name
- f2.fullName = mi.fullName + "." + f2.name
- f1.column = m1.table + "_id"
- f2.column = m2.table + "_id"
- f1.rel = true
- f2.rel = true
- f1.relTable = m1.table
- f2.relTable = m2.table
- f1.relModelInfo = m1
- f2.relModelInfo = m2
- f1.mi = mi
- f2.mi = mi
-
- mi.fields.Add(fa)
- mi.fields.Add(f1)
- mi.fields.Add(f2)
- mi.fields.pk = fa
-
- mi.uniques = []string{f1.column, f2.column}
- return
-}
diff --git a/orm/models_test.go b/orm/models_test.go
deleted file mode 100644
index e3a635f2..00000000
--- a/orm/models_test.go
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "database/sql"
- "encoding/json"
- "fmt"
- "os"
- "strings"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- _ "github.com/lib/pq"
- _ "github.com/mattn/go-sqlite3"
- // As tidb can't use go get, so disable the tidb testing now
- // _ "github.com/pingcap/tidb"
-)
-
-// A slice string field.
-type SliceStringField []string
-
-func (e SliceStringField) Value() []string {
- return []string(e)
-}
-
-func (e *SliceStringField) Set(d []string) {
- *e = SliceStringField(d)
-}
-
-func (e *SliceStringField) Add(v string) {
- *e = append(*e, v)
-}
-
-func (e *SliceStringField) String() string {
- return strings.Join(e.Value(), ",")
-}
-
-func (e *SliceStringField) FieldType() int {
- return TypeVarCharField
-}
-
-func (e *SliceStringField) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case []string:
- e.Set(d)
- case string:
- if len(d) > 0 {
- parts := strings.Split(d, ",")
- v := make([]string, 0, len(parts))
- for _, p := range parts {
- v = append(v, strings.TrimSpace(p))
- }
- e.Set(v)
- }
- default:
- return fmt.Errorf(" unknown value `%v`", value)
- }
- return nil
-}
-
-func (e *SliceStringField) RawValue() interface{} {
- return e.String()
-}
-
-var _ Fielder = new(SliceStringField)
-
-// A json field.
-type JSONFieldTest struct {
- Name string
- Data string
-}
-
-func (e *JSONFieldTest) String() string {
- data, _ := json.Marshal(e)
- return string(data)
-}
-
-func (e *JSONFieldTest) FieldType() int {
- return TypeTextField
-}
-
-func (e *JSONFieldTest) SetRaw(value interface{}) error {
- switch d := value.(type) {
- case string:
- return json.Unmarshal([]byte(d), e)
- default:
- return fmt.Errorf(" unknown value `%v`", value)
- }
-}
-
-func (e *JSONFieldTest) RawValue() interface{} {
- return e.String()
-}
-
-var _ Fielder = new(JSONFieldTest)
-
-type Data struct {
- ID int `orm:"column(id)"`
- Boolean bool
- Char string `orm:"size(50)"`
- Text string `orm:"type(text)"`
- JSON string `orm:"type(json);default({\"name\":\"json\"})"`
- Jsonb string `orm:"type(jsonb)"`
- Time time.Time `orm:"type(time)"`
- Date time.Time `orm:"type(date)"`
- DateTime time.Time `orm:"column(datetime)"`
- Byte byte
- Rune rune
- Int int
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- Uint uint
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- Float32 float32
- Float64 float64
- Decimal float64 `orm:"digits(8);decimals(4)"`
-}
-
-type DataNull struct {
- ID int `orm:"column(id)"`
- Boolean bool `orm:"null"`
- Char string `orm:"null;size(50)"`
- Text string `orm:"null;type(text)"`
- JSON string `orm:"type(json);null"`
- Jsonb string `orm:"type(jsonb);null"`
- Time time.Time `orm:"null;type(time)"`
- Date time.Time `orm:"null;type(date)"`
- DateTime time.Time `orm:"null;column(datetime)"`
- Byte byte `orm:"null"`
- Rune rune `orm:"null"`
- Int int `orm:"null"`
- Int8 int8 `orm:"null"`
- Int16 int16 `orm:"null"`
- Int32 int32 `orm:"null"`
- Int64 int64 `orm:"null"`
- Uint uint `orm:"null"`
- Uint8 uint8 `orm:"null"`
- Uint16 uint16 `orm:"null"`
- Uint32 uint32 `orm:"null"`
- Uint64 uint64 `orm:"null"`
- Float32 float32 `orm:"null"`
- Float64 float64 `orm:"null"`
- Decimal float64 `orm:"digits(8);decimals(4);null"`
- NullString sql.NullString `orm:"null"`
- NullBool sql.NullBool `orm:"null"`
- NullFloat64 sql.NullFloat64 `orm:"null"`
- NullInt64 sql.NullInt64 `orm:"null"`
- BooleanPtr *bool `orm:"null"`
- CharPtr *string `orm:"null;size(50)"`
- TextPtr *string `orm:"null;type(text)"`
- BytePtr *byte `orm:"null"`
- RunePtr *rune `orm:"null"`
- IntPtr *int `orm:"null"`
- Int8Ptr *int8 `orm:"null"`
- Int16Ptr *int16 `orm:"null"`
- Int32Ptr *int32 `orm:"null"`
- Int64Ptr *int64 `orm:"null"`
- UintPtr *uint `orm:"null"`
- Uint8Ptr *uint8 `orm:"null"`
- Uint16Ptr *uint16 `orm:"null"`
- Uint32Ptr *uint32 `orm:"null"`
- Uint64Ptr *uint64 `orm:"null"`
- Float32Ptr *float32 `orm:"null"`
- Float64Ptr *float64 `orm:"null"`
- DecimalPtr *float64 `orm:"digits(8);decimals(4);null"`
- TimePtr *time.Time `orm:"null;type(time)"`
- DatePtr *time.Time `orm:"null;type(date)"`
- DateTimePtr *time.Time `orm:"null"`
-}
-
-type String string
-type Boolean bool
-type Byte byte
-type Rune rune
-type Int int
-type Int8 int8
-type Int16 int16
-type Int32 int32
-type Int64 int64
-type Uint uint
-type Uint8 uint8
-type Uint16 uint16
-type Uint32 uint32
-type Uint64 uint64
-type Float32 float64
-type Float64 float64
-
-type DataCustom struct {
- ID int `orm:"column(id)"`
- Boolean Boolean
- Char string `orm:"size(50)"`
- Text string `orm:"type(text)"`
- Byte Byte
- Rune Rune
- Int Int
- Int8 Int8
- Int16 Int16
- Int32 Int32
- Int64 Int64
- Uint Uint
- Uint8 Uint8
- Uint16 Uint16
- Uint32 Uint32
- Uint64 Uint64
- Float32 Float32
- Float64 Float64
- Decimal Float64 `orm:"digits(8);decimals(4)"`
-}
-
-// only for mysql
-type UserBig struct {
- ID uint64 `orm:"column(id)"`
- Name string
-}
-
-type User struct {
- ID int `orm:"column(id)"`
- UserName string `orm:"size(30);unique"`
- Email string `orm:"size(100)"`
- Password string `orm:"size(100)"`
- Status int16 `orm:"column(Status)"`
- IsStaff bool
- IsActive bool `orm:"default(true)"`
- Created time.Time `orm:"auto_now_add;type(date)"`
- Updated time.Time `orm:"auto_now"`
- Profile *Profile `orm:"null;rel(one);on_delete(set_null)"`
- Posts []*Post `orm:"reverse(many)" json:"-"`
- ShouldSkip string `orm:"-"`
- Nums int
- Langs SliceStringField `orm:"size(100)"`
- Extra JSONFieldTest `orm:"type(text)"`
- unexport bool `orm:"-"`
- unexportBool bool
-}
-
-func (u *User) TableIndex() [][]string {
- return [][]string{
- {"Id", "UserName"},
- {"Id", "Created"},
- }
-}
-
-func (u *User) TableUnique() [][]string {
- return [][]string{
- {"UserName", "Email"},
- }
-}
-
-func NewUser() *User {
- obj := new(User)
- return obj
-}
-
-type Profile struct {
- ID int `orm:"column(id)"`
- Age int16
- Money float64
- User *User `orm:"reverse(one)" json:"-"`
- BestPost *Post `orm:"rel(one);null"`
-}
-
-func (u *Profile) TableName() string {
- return "user_profile"
-}
-
-func NewProfile() *Profile {
- obj := new(Profile)
- return obj
-}
-
-type Post struct {
- ID int `orm:"column(id)"`
- User *User `orm:"rel(fk)"`
- Title string `orm:"size(60)"`
- Content string `orm:"type(text)"`
- Created time.Time `orm:"auto_now_add"`
- Updated time.Time `orm:"auto_now"`
- Tags []*Tag `orm:"rel(m2m);rel_through(github.com/astaxie/beego/orm.PostTags)"`
-}
-
-func (u *Post) TableIndex() [][]string {
- return [][]string{
- {"Id", "Created"},
- }
-}
-
-func NewPost() *Post {
- obj := new(Post)
- return obj
-}
-
-type Tag struct {
- ID int `orm:"column(id)"`
- Name string `orm:"size(30)"`
- BestPost *Post `orm:"rel(one);null"`
- Posts []*Post `orm:"reverse(many)" json:"-"`
-}
-
-func NewTag() *Tag {
- obj := new(Tag)
- return obj
-}
-
-type PostTags struct {
- ID int `orm:"column(id)"`
- Post *Post `orm:"rel(fk)"`
- Tag *Tag `orm:"rel(fk)"`
-}
-
-func (m *PostTags) TableName() string {
- return "prefix_post_tags"
-}
-
-type Comment struct {
- ID int `orm:"column(id)"`
- Post *Post `orm:"rel(fk);column(post)"`
- Content string `orm:"type(text)"`
- Parent *Comment `orm:"null;rel(fk)"`
- Created time.Time `orm:"auto_now_add"`
-}
-
-func NewComment() *Comment {
- obj := new(Comment)
- return obj
-}
-
-type Group struct {
- ID int `orm:"column(gid);size(32)"`
- Name string
- Permissions []*Permission `orm:"reverse(many)" json:"-"`
-}
-
-type Permission struct {
- ID int `orm:"column(id)"`
- Name string
- Groups []*Group `orm:"rel(m2m);rel_through(github.com/astaxie/beego/orm.GroupPermissions)"`
-}
-
-type GroupPermissions struct {
- ID int `orm:"column(id)"`
- Group *Group `orm:"rel(fk)"`
- Permission *Permission `orm:"rel(fk)"`
-}
-
-type ModelID struct {
- ID int64
-}
-
-type ModelBase struct {
- ModelID
-
- Created time.Time `orm:"auto_now_add;type(datetime)"`
- Updated time.Time `orm:"auto_now;type(datetime)"`
-}
-
-type InLine struct {
- // Common Fields
- ModelBase
-
- // Other Fields
- Name string `orm:"unique"`
- Email string
-}
-
-func NewInLine() *InLine {
- return new(InLine)
-}
-
-type InLineOneToOne struct {
- // Common Fields
- ModelBase
-
- Note string
- InLine *InLine `orm:"rel(fk);column(inline)"`
-}
-
-func NewInLineOneToOne() *InLineOneToOne {
- return new(InLineOneToOne)
-}
-
-type IntegerPk struct {
- ID int64 `orm:"pk"`
- Value string
-}
-
-type UintPk struct {
- ID uint32 `orm:"pk"`
- Name string
-}
-
-type PtrPk struct {
- ID *IntegerPk `orm:"pk;rel(one)"`
- Positive bool
-}
-
-var DBARGS = struct {
- Driver string
- Source string
- Debug string
-}{
- os.Getenv("ORM_DRIVER"),
- os.Getenv("ORM_SOURCE"),
- os.Getenv("ORM_DEBUG"),
-}
-
-var (
- IsMysql = DBARGS.Driver == "mysql"
- IsSqlite = DBARGS.Driver == "sqlite3"
- IsPostgres = DBARGS.Driver == "postgres"
- IsTidb = DBARGS.Driver == "tidb"
-)
-
-var (
- dORM Ormer
- dDbBaser dbBaser
-)
-
-var (
- helpinfo = `need driver and source!
-
- Default DB Drivers.
-
- driver: url
- mysql: https://github.com/go-sql-driver/mysql
- sqlite3: https://github.com/mattn/go-sqlite3
- postgres: https://github.com/lib/pq
- tidb: https://github.com/pingcap/tidb
-
- usage:
-
- go get -u github.com/astaxie/beego/orm
- go get -u github.com/go-sql-driver/mysql
- go get -u github.com/mattn/go-sqlite3
- go get -u github.com/lib/pq
- go get -u github.com/pingcap/tidb
-
- #### MySQL
- mysql -u root -e 'create database orm_test;'
- export ORM_DRIVER=mysql
- export ORM_SOURCE="root:@/orm_test?charset=utf8"
- go test -v github.com/astaxie/beego/orm
-
-
- #### Sqlite3
- export ORM_DRIVER=sqlite3
- export ORM_SOURCE='file:memory_test?mode=memory'
- go test -v github.com/astaxie/beego/orm
-
-
- #### PostgreSQL
- psql -c 'create database orm_test;' -U postgres
- export ORM_DRIVER=postgres
- export ORM_SOURCE="user=postgres dbname=orm_test sslmode=disable"
- go test -v github.com/astaxie/beego/orm
-
- #### TiDB
- export ORM_DRIVER=tidb
- export ORM_SOURCE='memory://test/test'
- go test -v github.com/astaxie/beego/orm
-
- `
-)
-
-func init() {
- Debug, _ = StrTo(DBARGS.Debug).Bool()
-
- if DBARGS.Driver == "" || DBARGS.Source == "" {
- fmt.Println(helpinfo)
- os.Exit(2)
- }
-
- RegisterDataBase("default", DBARGS.Driver, DBARGS.Source, 20)
-
- alias := getDbAlias("default")
- if alias.Driver == DRMySQL {
- alias.Engine = "INNODB"
- }
-
-}
diff --git a/orm/models_utils.go b/orm/models_utils.go
deleted file mode 100644
index 71127a6b..00000000
--- a/orm/models_utils.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "database/sql"
- "fmt"
- "reflect"
- "strings"
- "time"
-)
-
-// 1 is attr
-// 2 is tag
-var supportTag = map[string]int{
- "-": 1,
- "null": 1,
- "index": 1,
- "unique": 1,
- "pk": 1,
- "auto": 1,
- "auto_now": 1,
- "auto_now_add": 1,
- "size": 2,
- "column": 2,
- "default": 2,
- "rel": 2,
- "reverse": 2,
- "rel_table": 2,
- "rel_through": 2,
- "digits": 2,
- "decimals": 2,
- "on_delete": 2,
- "type": 2,
- "description": 2,
-}
-
-// get reflect.Type name with package path.
-func getFullName(typ reflect.Type) string {
- return typ.PkgPath() + "." + typ.Name()
-}
-
-// getTableName get struct table name.
-// If the struct implement the TableName, then get the result as tablename
-// else use the struct name which will apply snakeString.
-func getTableName(val reflect.Value) string {
- if fun := val.MethodByName("TableName"); fun.IsValid() {
- vals := fun.Call([]reflect.Value{})
- // has return and the first val is string
- if len(vals) > 0 && vals[0].Kind() == reflect.String {
- return vals[0].String()
- }
- }
- return snakeString(reflect.Indirect(val).Type().Name())
-}
-
-// get table engine, myisam or innodb.
-func getTableEngine(val reflect.Value) string {
- fun := val.MethodByName("TableEngine")
- if fun.IsValid() {
- vals := fun.Call([]reflect.Value{})
- if len(vals) > 0 && vals[0].Kind() == reflect.String {
- return vals[0].String()
- }
- }
- return ""
-}
-
-// get table index from method.
-func getTableIndex(val reflect.Value) [][]string {
- fun := val.MethodByName("TableIndex")
- if fun.IsValid() {
- vals := fun.Call([]reflect.Value{})
- if len(vals) > 0 && vals[0].CanInterface() {
- if d, ok := vals[0].Interface().([][]string); ok {
- return d
- }
- }
- }
- return nil
-}
-
-// get table unique from method
-func getTableUnique(val reflect.Value) [][]string {
- fun := val.MethodByName("TableUnique")
- if fun.IsValid() {
- vals := fun.Call([]reflect.Value{})
- if len(vals) > 0 && vals[0].CanInterface() {
- if d, ok := vals[0].Interface().([][]string); ok {
- return d
- }
- }
- }
- return nil
-}
-
-// get snaked column name
-func getColumnName(ft int, addrField reflect.Value, sf reflect.StructField, col string) string {
- column := col
- if col == "" {
- column = nameStrategyMap[nameStrategy](sf.Name)
- }
- switch ft {
- case RelForeignKey, RelOneToOne:
- if len(col) == 0 {
- column = column + "_id"
- }
- case RelManyToMany, RelReverseMany, RelReverseOne:
- column = sf.Name
- }
- return column
-}
-
-// return field type as type constant from reflect.Value
-func getFieldType(val reflect.Value) (ft int, err error) {
- switch val.Type() {
- case reflect.TypeOf(new(int8)):
- ft = TypeBitField
- case reflect.TypeOf(new(int16)):
- ft = TypeSmallIntegerField
- case reflect.TypeOf(new(int32)),
- reflect.TypeOf(new(int)):
- ft = TypeIntegerField
- case reflect.TypeOf(new(int64)):
- ft = TypeBigIntegerField
- case reflect.TypeOf(new(uint8)):
- ft = TypePositiveBitField
- case reflect.TypeOf(new(uint16)):
- ft = TypePositiveSmallIntegerField
- case reflect.TypeOf(new(uint32)),
- reflect.TypeOf(new(uint)):
- ft = TypePositiveIntegerField
- case reflect.TypeOf(new(uint64)):
- ft = TypePositiveBigIntegerField
- case reflect.TypeOf(new(float32)),
- reflect.TypeOf(new(float64)):
- ft = TypeFloatField
- case reflect.TypeOf(new(bool)):
- ft = TypeBooleanField
- case reflect.TypeOf(new(string)):
- ft = TypeVarCharField
- case reflect.TypeOf(new(time.Time)):
- ft = TypeDateTimeField
- default:
- elm := reflect.Indirect(val)
- switch elm.Kind() {
- case reflect.Int8:
- ft = TypeBitField
- case reflect.Int16:
- ft = TypeSmallIntegerField
- case reflect.Int32, reflect.Int:
- ft = TypeIntegerField
- case reflect.Int64:
- ft = TypeBigIntegerField
- case reflect.Uint8:
- ft = TypePositiveBitField
- case reflect.Uint16:
- ft = TypePositiveSmallIntegerField
- case reflect.Uint32, reflect.Uint:
- ft = TypePositiveIntegerField
- case reflect.Uint64:
- ft = TypePositiveBigIntegerField
- case reflect.Float32, reflect.Float64:
- ft = TypeFloatField
- case reflect.Bool:
- ft = TypeBooleanField
- case reflect.String:
- ft = TypeVarCharField
- default:
- if elm.Interface() == nil {
- panic(fmt.Errorf("%s is nil pointer, may be miss setting tag", val))
- }
- switch elm.Interface().(type) {
- case sql.NullInt64:
- ft = TypeBigIntegerField
- case sql.NullFloat64:
- ft = TypeFloatField
- case sql.NullBool:
- ft = TypeBooleanField
- case sql.NullString:
- ft = TypeVarCharField
- case time.Time:
- ft = TypeDateTimeField
- }
- }
- }
- if ft&IsFieldType == 0 {
- err = fmt.Errorf("unsupport field type %s, may be miss setting tag", val)
- }
- return
-}
-
-// parse struct tag string
-func parseStructTag(data string) (attrs map[string]bool, tags map[string]string) {
- attrs = make(map[string]bool)
- tags = make(map[string]string)
- for _, v := range strings.Split(data, defaultStructTagDelim) {
- if v == "" {
- continue
- }
- v = strings.TrimSpace(v)
- if t := strings.ToLower(v); supportTag[t] == 1 {
- attrs[t] = true
- } else if i := strings.Index(v, "("); i > 0 && strings.Index(v, ")") == len(v)-1 {
- name := t[:i]
- if supportTag[name] == 2 {
- v = v[i+1 : len(v)-1]
- tags[name] = v
- }
- } else {
- DebugLog.Println("unsupport orm tag", v)
- }
- }
- return
-}
diff --git a/orm/orm.go b/orm/orm.go
deleted file mode 100644
index c7566b9a..00000000
--- a/orm/orm.go
+++ /dev/null
@@ -1,602 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.8
-
-// Package orm provide ORM for MySQL/PostgreSQL/sqlite
-// Simple Usage
-//
-// package main
-//
-// import (
-// "fmt"
-// "github.com/astaxie/beego/orm"
-// _ "github.com/go-sql-driver/mysql" // import your used driver
-// )
-//
-// // Model Struct
-// type User struct {
-// Id int `orm:"auto"`
-// Name string `orm:"size(100)"`
-// }
-//
-// func init() {
-// orm.RegisterDataBase("default", "mysql", "root:root@/my_db?charset=utf8", 30)
-// }
-//
-// func main() {
-// o := orm.NewOrm()
-// user := User{Name: "slene"}
-// // insert
-// id, err := o.Insert(&user)
-// // update
-// user.Name = "astaxie"
-// num, err := o.Update(&user)
-// // read one
-// u := User{Id: user.Id}
-// err = o.Read(&u)
-// // delete
-// num, err = o.Delete(&u)
-// }
-//
-// more docs: http://beego.me/docs/mvc/model/overview.md
-package orm
-
-import (
- "context"
- "database/sql"
- "errors"
- "fmt"
- "os"
- "reflect"
- "sync"
- "time"
-)
-
-// DebugQueries define the debug
-const (
- DebugQueries = iota
-)
-
-// Define common vars
-var (
- Debug = false
- DebugLog = NewLog(os.Stdout)
- DefaultRowsLimit = -1
- DefaultRelsDepth = 2
- DefaultTimeLoc = time.Local
- ErrTxHasBegan = errors.New(" transaction already begin")
- ErrTxDone = errors.New(" transaction not begin")
- ErrMultiRows = errors.New(" return multi rows")
- ErrNoRows = errors.New(" no row found")
- ErrStmtClosed = errors.New(" stmt already closed")
- ErrArgs = errors.New(" args error may be empty")
- ErrNotImplement = errors.New("have not implement")
-)
-
-// Params stores the Params
-type Params map[string]interface{}
-
-// ParamsList stores paramslist
-type ParamsList []interface{}
-
-type orm struct {
- alias *alias
- db dbQuerier
- isTx bool
-}
-
-var _ Ormer = new(orm)
-
-// get model info and model reflect value
-func (o *orm) getMiInd(md interface{}, needPtr bool) (mi *modelInfo, ind reflect.Value) {
- val := reflect.ValueOf(md)
- ind = reflect.Indirect(val)
- typ := ind.Type()
- if needPtr && val.Kind() != reflect.Ptr {
- panic(fmt.Errorf(" cannot use non-ptr model struct `%s`", getFullName(typ)))
- }
- name := getFullName(typ)
- if mi, ok := modelCache.getByFullName(name); ok {
- return mi, ind
- }
- panic(fmt.Errorf(" table: `%s` not found, make sure it was registered with `RegisterModel()`", name))
-}
-
-// get field info from model info by given field name
-func (o *orm) getFieldInfo(mi *modelInfo, name string) *fieldInfo {
- fi, ok := mi.fields.GetByAny(name)
- if !ok {
- panic(fmt.Errorf(" cannot find field `%s` for model `%s`", name, mi.fullName))
- }
- return fi
-}
-
-// read data to model
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Read(md interface{}, cols ...string) error {
- mi, ind := o.getMiInd(md, true)
- return o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, false)
-}
-
-// read data to model, like Read(), but use "SELECT FOR UPDATE" form
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) ReadForUpdate(md interface{}, cols ...string) error {
- mi, ind := o.getMiInd(md, true)
- return o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, true)
-}
-
-// Try to read a row from the database, or insert one if it doesn't exist
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error) {
- cols = append([]string{col1}, cols...)
- mi, ind := o.getMiInd(md, true)
- err := o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, false)
- if err == ErrNoRows {
- // Create
- id, err := o.Insert(md)
- return (err == nil), id, err
- }
-
- id, vid := int64(0), ind.FieldByIndex(mi.fields.pk.fieldIndex)
- if mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
- id = int64(vid.Uint())
- } else if mi.fields.pk.rel {
- return o.ReadOrCreate(vid.Interface(), mi.fields.pk.relModelInfo.fields.pk.name)
- } else {
- id = vid.Int()
- }
-
- return false, id, err
-}
-
-// insert model data to database
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Insert(md interface{}) (int64, error) {
- mi, ind := o.getMiInd(md, true)
- id, err := o.alias.DbBaser.Insert(o.db, mi, ind, o.alias.TZ)
- if err != nil {
- return id, err
- }
-
- o.setPk(mi, ind, id)
-
- return id, nil
-}
-
-// set auto pk field
-func (o *orm) setPk(mi *modelInfo, ind reflect.Value, id int64) {
- if mi.fields.pk.auto {
- if mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
- ind.FieldByIndex(mi.fields.pk.fieldIndex).SetUint(uint64(id))
- } else {
- ind.FieldByIndex(mi.fields.pk.fieldIndex).SetInt(id)
- }
- }
-}
-
-// insert some models to database
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) InsertMulti(bulk int, mds interface{}) (int64, error) {
- var cnt int64
-
- sind := reflect.Indirect(reflect.ValueOf(mds))
-
- switch sind.Kind() {
- case reflect.Array, reflect.Slice:
- if sind.Len() == 0 {
- return cnt, ErrArgs
- }
- default:
- return cnt, ErrArgs
- }
-
- if bulk <= 1 {
- for i := 0; i < sind.Len(); i++ {
- ind := reflect.Indirect(sind.Index(i))
- mi, _ := o.getMiInd(ind.Interface(), false)
- id, err := o.alias.DbBaser.Insert(o.db, mi, ind, o.alias.TZ)
- if err != nil {
- return cnt, err
- }
-
- o.setPk(mi, ind, id)
-
- cnt++
- }
- } else {
- mi, _ := o.getMiInd(sind.Index(0).Interface(), false)
- return o.alias.DbBaser.InsertMulti(o.db, mi, sind, bulk, o.alias.TZ)
- }
- return cnt, nil
-}
-
-// InsertOrUpdate data to database
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) InsertOrUpdate(md interface{}, colConflitAndArgs ...string) (int64, error) {
- mi, ind := o.getMiInd(md, true)
- id, err := o.alias.DbBaser.InsertOrUpdate(o.db, mi, ind, o.alias, colConflitAndArgs...)
- if err != nil {
- return id, err
- }
-
- o.setPk(mi, ind, id)
-
- return id, nil
-}
-
-// update model to database.
-// cols set the columns those want to update.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Update(md interface{}, cols ...string) (int64, error) {
- mi, ind := o.getMiInd(md, true)
- return o.alias.DbBaser.Update(o.db, mi, ind, o.alias.TZ, cols)
-}
-
-// delete model in database
-// cols shows the delete conditions values read from. default is pk
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Delete(md interface{}, cols ...string) (int64, error) {
- mi, ind := o.getMiInd(md, true)
- num, err := o.alias.DbBaser.Delete(o.db, mi, ind, o.alias.TZ, cols)
- if err != nil {
- return num, err
- }
- if num > 0 {
- o.setPk(mi, ind, 0)
- }
- return num, nil
-}
-
-// create a models to models queryer
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) QueryM2M(md interface{}, name string) QueryM2Mer {
- mi, ind := o.getMiInd(md, true)
- fi := o.getFieldInfo(mi, name)
-
- switch {
- case fi.fieldType == RelManyToMany:
- case fi.fieldType == RelReverseMany && fi.reverseFieldInfo.mi.isThrough:
- default:
- panic(fmt.Errorf(" model `%s` . name `%s` is not a m2m field", fi.name, mi.fullName))
- }
-
- return newQueryM2M(md, o, mi, fi, ind)
-}
-
-// load related models to md model.
-// args are limit, offset int and order string.
-//
-// example:
-// orm.LoadRelated(post,"Tags")
-// for _,tag := range post.Tags{...}
-//
-// make sure the relation is defined in model struct tags.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) LoadRelated(md interface{}, name string, args ...interface{}) (int64, error) {
- _, fi, ind, qseter := o.queryRelated(md, name)
-
- qs := qseter.(*querySet)
-
- var relDepth int
- var limit, offset int64
- var order string
- for i, arg := range args {
- switch i {
- case 0:
- if v, ok := arg.(bool); ok {
- if v {
- relDepth = DefaultRelsDepth
- }
- } else if v, ok := arg.(int); ok {
- relDepth = v
- }
- case 1:
- limit = ToInt64(arg)
- case 2:
- offset = ToInt64(arg)
- case 3:
- order, _ = arg.(string)
- }
- }
-
- switch fi.fieldType {
- case RelOneToOne, RelForeignKey, RelReverseOne:
- limit = 1
- offset = 0
- }
-
- qs.limit = limit
- qs.offset = offset
- qs.relDepth = relDepth
-
- if len(order) > 0 {
- qs.orders = []string{order}
- }
-
- find := ind.FieldByIndex(fi.fieldIndex)
-
- var nums int64
- var err error
- switch fi.fieldType {
- case RelOneToOne, RelForeignKey, RelReverseOne:
- val := reflect.New(find.Type().Elem())
- container := val.Interface()
- err = qs.One(container)
- if err == nil {
- find.Set(val)
- nums = 1
- }
- default:
- nums, err = qs.All(find.Addr().Interface())
- }
-
- return nums, err
-}
-
-// return a QuerySeter for related models to md model.
-// it can do all, update, delete in QuerySeter.
-// example:
-// qs := orm.QueryRelated(post,"Tag")
-// qs.All(&[]*Tag{})
-//
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) QueryRelated(md interface{}, name string) QuerySeter {
- // is this api needed ?
- _, _, _, qs := o.queryRelated(md, name)
- return qs
-}
-
-// get QuerySeter for related models to md model
-func (o *orm) queryRelated(md interface{}, name string) (*modelInfo, *fieldInfo, reflect.Value, QuerySeter) {
- mi, ind := o.getMiInd(md, true)
- fi := o.getFieldInfo(mi, name)
-
- _, _, exist := getExistPk(mi, ind)
- if !exist {
- panic(ErrMissPK)
- }
-
- var qs *querySet
-
- switch fi.fieldType {
- case RelOneToOne, RelForeignKey, RelManyToMany:
- if !fi.inModel {
- break
- }
- qs = o.getRelQs(md, mi, fi)
- case RelReverseOne, RelReverseMany:
- if !fi.inModel {
- break
- }
- qs = o.getReverseQs(md, mi, fi)
- }
-
- if qs == nil {
- panic(fmt.Errorf(" name `%s` for model `%s` is not an available rel/reverse field", md, name))
- }
-
- return mi, fi, ind, qs
-}
-
-// get reverse relation QuerySeter
-func (o *orm) getReverseQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet {
- switch fi.fieldType {
- case RelReverseOne, RelReverseMany:
- default:
- panic(fmt.Errorf(" name `%s` for model `%s` is not an available reverse field", fi.name, mi.fullName))
- }
-
- var q *querySet
-
- if fi.fieldType == RelReverseMany && fi.reverseFieldInfo.mi.isThrough {
- q = newQuerySet(o, fi.relModelInfo).(*querySet)
- q.cond = NewCondition().And(fi.reverseFieldInfoM2M.column+ExprSep+fi.reverseFieldInfo.column, md)
- } else {
- q = newQuerySet(o, fi.reverseFieldInfo.mi).(*querySet)
- q.cond = NewCondition().And(fi.reverseFieldInfo.column, md)
- }
-
- return q
-}
-
-// get relation QuerySeter
-func (o *orm) getRelQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet {
- switch fi.fieldType {
- case RelOneToOne, RelForeignKey, RelManyToMany:
- default:
- panic(fmt.Errorf(" name `%s` for model `%s` is not an available rel field", fi.name, mi.fullName))
- }
-
- q := newQuerySet(o, fi.relModelInfo).(*querySet)
- q.cond = NewCondition()
-
- if fi.fieldType == RelManyToMany {
- q.cond = q.cond.And(fi.reverseFieldInfoM2M.column+ExprSep+fi.reverseFieldInfo.column, md)
- } else {
- q.cond = q.cond.And(fi.reverseFieldInfo.column, md)
- }
-
- return q
-}
-
-// return a QuerySeter for table operations.
-// table name can be string or struct.
-// e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)),
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) {
- var name string
- if table, ok := ptrStructOrTableName.(string); ok {
- name = nameStrategyMap[defaultNameStrategy](table)
- if mi, ok := modelCache.get(name); ok {
- qs = newQuerySet(o, mi)
- }
- } else {
- name = getFullName(indirectType(reflect.TypeOf(ptrStructOrTableName)))
- if mi, ok := modelCache.getByFullName(name); ok {
- qs = newQuerySet(o, mi)
- }
- }
- if qs == nil {
- panic(fmt.Errorf(" table name: `%s` not exists", name))
- }
- return
-}
-
-// switch to another registered database driver by given name.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-// Using NewOrmUsingDB(name)
-func (o *orm) Using(name string) error {
- if o.isTx {
- panic(fmt.Errorf(" transaction has been start, cannot change db"))
- }
- if al, ok := dataBaseCache.get(name); ok {
- o.alias = al
- if Debug {
- o.db = newDbQueryLog(al, al.DB)
- } else {
- o.db = al.DB
- }
- } else {
- return fmt.Errorf(" unknown db alias name `%s`", name)
- }
- return nil
-}
-
-// begin transaction
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Begin() error {
- return o.BeginTx(context.Background(), nil)
-}
-
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) BeginTx(ctx context.Context, opts *sql.TxOptions) error {
- if o.isTx {
- return ErrTxHasBegan
- }
- var tx *sql.Tx
- tx, err := o.db.(txer).BeginTx(ctx, opts)
- if err != nil {
- return err
- }
- o.isTx = true
- if Debug {
- o.db.(*dbQueryLog).SetDB(tx)
- } else {
- o.db = tx
- }
- return nil
-}
-
-// commit transaction
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Commit() error {
- if !o.isTx {
- return ErrTxDone
- }
- err := o.db.(txEnder).Commit()
- if err == nil {
- o.isTx = false
- o.Using(o.alias.Name)
- } else if err == sql.ErrTxDone {
- return ErrTxDone
- }
- return err
-}
-
-// rollback transaction
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Rollback() error {
- if !o.isTx {
- return ErrTxDone
- }
- err := o.db.(txEnder).Rollback()
- if err == nil {
- o.isTx = false
- o.Using(o.alias.Name)
- } else if err == sql.ErrTxDone {
- return ErrTxDone
- }
- return err
-}
-
-// return a raw query seter for raw sql string.
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Raw(query string, args ...interface{}) RawSeter {
- return newRawSet(o, query, args)
-}
-
-// return current using database Driver
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) Driver() Driver {
- return driver(o.alias.Name)
-}
-
-// return sql.DBStats for current database
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func (o *orm) DBStats() *sql.DBStats {
- if o.alias != nil && o.alias.DB != nil {
- stats := o.alias.DB.DB.Stats()
- return &stats
- }
- return nil
-}
-
-// NewOrm create new orm
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func NewOrm() Ormer {
- BootStrap() // execute only once
-
- o := new(orm)
- err := o.Using("default")
- if err != nil {
- panic(err)
- }
- return o
-}
-
-// NewOrmWithDB create a new ormer object with specify *sql.DB for query
-// Deprecated: using pkg/orm. We will remove this method in v2.1.0
-func NewOrmWithDB(driverName, aliasName string, db *sql.DB) (Ormer, error) {
- var al *alias
-
- if dr, ok := drivers[driverName]; ok {
- al = new(alias)
- al.DbBaser = dbBasers[dr]
- al.Driver = dr
- } else {
- return nil, fmt.Errorf("driver name `%s` have not registered", driverName)
- }
-
- al.Name = aliasName
- al.DriverName = driverName
- al.DB = &DB{
- RWMutex: new(sync.RWMutex),
- DB: db,
- stmtDecorators: newStmtDecoratorLruWithEvict(),
- }
-
- detectTZ(al)
-
- o := new(orm)
- o.alias = al
-
- if Debug {
- o.db = newDbQueryLog(o.alias, db)
- } else {
- o.db = db
- }
-
- return o, nil
-}
diff --git a/orm/orm_conds.go b/orm/orm_conds.go
deleted file mode 100644
index f3fd66f0..00000000
--- a/orm/orm_conds.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strings"
-)
-
-// ExprSep define the expression separation
-const (
- ExprSep = "__"
-)
-
-type condValue struct {
- exprs []string
- args []interface{}
- cond *Condition
- isOr bool
- isNot bool
- isCond bool
- isRaw bool
- sql string
-}
-
-// Condition struct.
-// work for WHERE conditions.
-type Condition struct {
- params []condValue
-}
-
-// NewCondition return new condition struct
-func NewCondition() *Condition {
- c := &Condition{}
- return c
-}
-
-// Raw add raw sql to condition
-func (c Condition) Raw(expr string, sql string) *Condition {
- if len(sql) == 0 {
- panic(fmt.Errorf(" sql cannot empty"))
- }
- c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), sql: sql, isRaw: true})
- return &c
-}
-
-// And add expression to condition
-func (c Condition) And(expr string, args ...interface{}) *Condition {
- if expr == "" || len(args) == 0 {
- panic(fmt.Errorf(" args cannot empty"))
- }
- c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args})
- return &c
-}
-
-// AndNot add NOT expression to condition
-func (c Condition) AndNot(expr string, args ...interface{}) *Condition {
- if expr == "" || len(args) == 0 {
- panic(fmt.Errorf(" args cannot empty"))
- }
- c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isNot: true})
- return &c
-}
-
-// AndCond combine a condition to current condition
-func (c *Condition) AndCond(cond *Condition) *Condition {
- c = c.clone()
- if c == cond {
- panic(fmt.Errorf(" cannot use self as sub cond"))
- }
- if cond != nil {
- c.params = append(c.params, condValue{cond: cond, isCond: true})
- }
- return c
-}
-
-// AndNotCond combine a AND NOT condition to current condition
-func (c *Condition) AndNotCond(cond *Condition) *Condition {
- c = c.clone()
- if c == cond {
- panic(fmt.Errorf(" cannot use self as sub cond"))
- }
-
- if cond != nil {
- c.params = append(c.params, condValue{cond: cond, isCond: true, isNot: true})
- }
- return c
-}
-
-// Or add OR expression to condition
-func (c Condition) Or(expr string, args ...interface{}) *Condition {
- if expr == "" || len(args) == 0 {
- panic(fmt.Errorf(" args cannot empty"))
- }
- c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isOr: true})
- return &c
-}
-
-// OrNot add OR NOT expression to condition
-func (c Condition) OrNot(expr string, args ...interface{}) *Condition {
- if expr == "" || len(args) == 0 {
- panic(fmt.Errorf(" args cannot empty"))
- }
- c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isNot: true, isOr: true})
- return &c
-}
-
-// OrCond combine a OR condition to current condition
-func (c *Condition) OrCond(cond *Condition) *Condition {
- c = c.clone()
- if c == cond {
- panic(fmt.Errorf(" cannot use self as sub cond"))
- }
- if cond != nil {
- c.params = append(c.params, condValue{cond: cond, isCond: true, isOr: true})
- }
- return c
-}
-
-// OrNotCond combine a OR NOT condition to current condition
-func (c *Condition) OrNotCond(cond *Condition) *Condition {
- c = c.clone()
- if c == cond {
- panic(fmt.Errorf(" cannot use self as sub cond"))
- }
-
- if cond != nil {
- c.params = append(c.params, condValue{cond: cond, isCond: true, isNot: true, isOr: true})
- }
- return c
-}
-
-// IsEmpty check the condition arguments are empty or not.
-func (c *Condition) IsEmpty() bool {
- return len(c.params) == 0
-}
-
-// clone clone a condition
-func (c Condition) clone() *Condition {
- return &c
-}
diff --git a/orm/orm_log.go b/orm/orm_log.go
deleted file mode 100644
index 5bb3a24f..00000000
--- a/orm/orm_log.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "context"
- "database/sql"
- "fmt"
- "io"
- "log"
- "strings"
- "time"
-)
-
-// Log implement the log.Logger
-type Log struct {
- *log.Logger
-}
-
-//costomer log func
-var LogFunc func(query map[string]interface{})
-
-// NewLog set io.Writer to create a Logger.
-func NewLog(out io.Writer) *Log {
- d := new(Log)
- d.Logger = log.New(out, "[ORM]", log.LstdFlags)
- return d
-}
-
-func debugLogQueies(alias *alias, operaton, query string, t time.Time, err error, args ...interface{}) {
- var logMap = make(map[string]interface{})
- sub := time.Now().Sub(t) / 1e5
- elsp := float64(int(sub)) / 10.0
- logMap["cost_time"] = elsp
- flag := " OK"
- if err != nil {
- flag = "FAIL"
- }
- logMap["flag"] = flag
- con := fmt.Sprintf(" -[Queries/%s] - [%s / %11s / %7.1fms] - [%s]", alias.Name, flag, operaton, elsp, query)
- cons := make([]string, 0, len(args))
- for _, arg := range args {
- cons = append(cons, fmt.Sprintf("%v", arg))
- }
- if len(cons) > 0 {
- con += fmt.Sprintf(" - `%s`", strings.Join(cons, "`, `"))
- }
- if err != nil {
- con += " - " + err.Error()
- }
- logMap["sql"] = fmt.Sprintf("%s-`%s`", query, strings.Join(cons, "`, `"))
- if LogFunc != nil {
- LogFunc(logMap)
- }
- DebugLog.Println(con)
-}
-
-// statement query logger struct.
-// if dev mode, use stmtQueryLog, or use stmtQuerier.
-type stmtQueryLog struct {
- alias *alias
- query string
- stmt stmtQuerier
-}
-
-var _ stmtQuerier = new(stmtQueryLog)
-
-func (d *stmtQueryLog) Close() error {
- a := time.Now()
- err := d.stmt.Close()
- debugLogQueies(d.alias, "st.Close", d.query, a, err)
- return err
-}
-
-func (d *stmtQueryLog) Exec(args ...interface{}) (sql.Result, error) {
- a := time.Now()
- res, err := d.stmt.Exec(args...)
- debugLogQueies(d.alias, "st.Exec", d.query, a, err, args...)
- return res, err
-}
-
-func (d *stmtQueryLog) Query(args ...interface{}) (*sql.Rows, error) {
- a := time.Now()
- res, err := d.stmt.Query(args...)
- debugLogQueies(d.alias, "st.Query", d.query, a, err, args...)
- return res, err
-}
-
-func (d *stmtQueryLog) QueryRow(args ...interface{}) *sql.Row {
- a := time.Now()
- res := d.stmt.QueryRow(args...)
- debugLogQueies(d.alias, "st.QueryRow", d.query, a, nil, args...)
- return res
-}
-
-func newStmtQueryLog(alias *alias, stmt stmtQuerier, query string) stmtQuerier {
- d := new(stmtQueryLog)
- d.stmt = stmt
- d.alias = alias
- d.query = query
- return d
-}
-
-// database query logger struct.
-// if dev mode, use dbQueryLog, or use dbQuerier.
-type dbQueryLog struct {
- alias *alias
- db dbQuerier
- tx txer
- txe txEnder
-}
-
-var _ dbQuerier = new(dbQueryLog)
-var _ txer = new(dbQueryLog)
-var _ txEnder = new(dbQueryLog)
-
-func (d *dbQueryLog) Prepare(query string) (*sql.Stmt, error) {
- a := time.Now()
- stmt, err := d.db.Prepare(query)
- debugLogQueies(d.alias, "db.Prepare", query, a, err)
- return stmt, err
-}
-
-func (d *dbQueryLog) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
- a := time.Now()
- stmt, err := d.db.PrepareContext(ctx, query)
- debugLogQueies(d.alias, "db.Prepare", query, a, err)
- return stmt, err
-}
-
-func (d *dbQueryLog) Exec(query string, args ...interface{}) (sql.Result, error) {
- a := time.Now()
- res, err := d.db.Exec(query, args...)
- debugLogQueies(d.alias, "db.Exec", query, a, err, args...)
- return res, err
-}
-
-func (d *dbQueryLog) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
- a := time.Now()
- res, err := d.db.ExecContext(ctx, query, args...)
- debugLogQueies(d.alias, "db.Exec", query, a, err, args...)
- return res, err
-}
-
-func (d *dbQueryLog) Query(query string, args ...interface{}) (*sql.Rows, error) {
- a := time.Now()
- res, err := d.db.Query(query, args...)
- debugLogQueies(d.alias, "db.Query", query, a, err, args...)
- return res, err
-}
-
-func (d *dbQueryLog) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
- a := time.Now()
- res, err := d.db.QueryContext(ctx, query, args...)
- debugLogQueies(d.alias, "db.Query", query, a, err, args...)
- return res, err
-}
-
-func (d *dbQueryLog) QueryRow(query string, args ...interface{}) *sql.Row {
- a := time.Now()
- res := d.db.QueryRow(query, args...)
- debugLogQueies(d.alias, "db.QueryRow", query, a, nil, args...)
- return res
-}
-
-func (d *dbQueryLog) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
- a := time.Now()
- res := d.db.QueryRowContext(ctx, query, args...)
- debugLogQueies(d.alias, "db.QueryRow", query, a, nil, args...)
- return res
-}
-
-func (d *dbQueryLog) Begin() (*sql.Tx, error) {
- a := time.Now()
- tx, err := d.db.(txer).Begin()
- debugLogQueies(d.alias, "db.Begin", "START TRANSACTION", a, err)
- return tx, err
-}
-
-func (d *dbQueryLog) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
- a := time.Now()
- tx, err := d.db.(txer).BeginTx(ctx, opts)
- debugLogQueies(d.alias, "db.BeginTx", "START TRANSACTION", a, err)
- return tx, err
-}
-
-func (d *dbQueryLog) Commit() error {
- a := time.Now()
- err := d.db.(txEnder).Commit()
- debugLogQueies(d.alias, "tx.Commit", "COMMIT", a, err)
- return err
-}
-
-func (d *dbQueryLog) Rollback() error {
- a := time.Now()
- err := d.db.(txEnder).Rollback()
- debugLogQueies(d.alias, "tx.Rollback", "ROLLBACK", a, err)
- return err
-}
-
-func (d *dbQueryLog) SetDB(db dbQuerier) {
- d.db = db
-}
-
-func newDbQueryLog(alias *alias, db dbQuerier) dbQuerier {
- d := new(dbQueryLog)
- d.alias = alias
- d.db = db
- return d
-}
diff --git a/orm/orm_object.go b/orm/orm_object.go
deleted file mode 100644
index de3181ce..00000000
--- a/orm/orm_object.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "reflect"
-)
-
-// an insert queryer struct
-type insertSet struct {
- mi *modelInfo
- orm *orm
- stmt stmtQuerier
- closed bool
-}
-
-var _ Inserter = new(insertSet)
-
-// insert model ignore it's registered or not.
-func (o *insertSet) Insert(md interface{}) (int64, error) {
- if o.closed {
- return 0, ErrStmtClosed
- }
- val := reflect.ValueOf(md)
- ind := reflect.Indirect(val)
- typ := ind.Type()
- name := getFullName(typ)
- if val.Kind() != reflect.Ptr {
- panic(fmt.Errorf(" cannot use non-ptr model struct `%s`", name))
- }
- if name != o.mi.fullName {
- panic(fmt.Errorf(" need model `%s` but found `%s`", o.mi.fullName, name))
- }
- id, err := o.orm.alias.DbBaser.InsertStmt(o.stmt, o.mi, ind, o.orm.alias.TZ)
- if err != nil {
- return id, err
- }
- if id > 0 {
- if o.mi.fields.pk.auto {
- if o.mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
- ind.FieldByIndex(o.mi.fields.pk.fieldIndex).SetUint(uint64(id))
- } else {
- ind.FieldByIndex(o.mi.fields.pk.fieldIndex).SetInt(id)
- }
- }
- }
- return id, nil
-}
-
-// close insert queryer statement
-func (o *insertSet) Close() error {
- if o.closed {
- return ErrStmtClosed
- }
- o.closed = true
- return o.stmt.Close()
-}
-
-// create new insert queryer.
-func newInsertSet(orm *orm, mi *modelInfo) (Inserter, error) {
- bi := new(insertSet)
- bi.orm = orm
- bi.mi = mi
- st, query, err := orm.alias.DbBaser.PrepareInsert(orm.db, mi)
- if err != nil {
- return nil, err
- }
- if Debug {
- bi.stmt = newStmtQueryLog(orm.alias, st, query)
- } else {
- bi.stmt = st
- }
- return bi, nil
-}
diff --git a/orm/orm_querym2m.go b/orm/orm_querym2m.go
deleted file mode 100644
index 6a270a0d..00000000
--- a/orm/orm_querym2m.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import "reflect"
-
-// model to model struct
-type queryM2M struct {
- md interface{}
- mi *modelInfo
- fi *fieldInfo
- qs *querySet
- ind reflect.Value
-}
-
-// add models to origin models when creating queryM2M.
-// example:
-// m2m := orm.QueryM2M(post,"Tag")
-// m2m.Add(&Tag1{},&Tag2{})
-// for _,tag := range post.Tags{}
-//
-// make sure the relation is defined in post model struct tag.
-func (o *queryM2M) Add(mds ...interface{}) (int64, error) {
- fi := o.fi
- mi := fi.relThroughModelInfo
- mfi := fi.reverseFieldInfo
- rfi := fi.reverseFieldInfoTwo
-
- orm := o.qs.orm
- dbase := orm.alias.DbBaser
-
- var models []interface{}
- var otherValues []interface{}
- var otherNames []string
-
- for _, colname := range mi.fields.dbcols {
- if colname != mfi.column && colname != rfi.column && colname != fi.mi.fields.pk.column &&
- mi.fields.columns[colname] != mi.fields.pk {
- otherNames = append(otherNames, colname)
- }
- }
- for i, md := range mds {
- if reflect.Indirect(reflect.ValueOf(md)).Kind() != reflect.Struct && i > 0 {
- otherValues = append(otherValues, md)
- mds = append(mds[:i], mds[i+1:]...)
- }
- }
- for _, md := range mds {
- val := reflect.ValueOf(md)
- if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
- for i := 0; i < val.Len(); i++ {
- v := val.Index(i)
- if v.CanInterface() {
- models = append(models, v.Interface())
- }
- }
- } else {
- models = append(models, md)
- }
- }
-
- _, v1, exist := getExistPk(o.mi, o.ind)
- if !exist {
- panic(ErrMissPK)
- }
-
- names := []string{mfi.column, rfi.column}
-
- values := make([]interface{}, 0, len(models)*2)
- for _, md := range models {
-
- ind := reflect.Indirect(reflect.ValueOf(md))
- var v2 interface{}
- if ind.Kind() != reflect.Struct {
- v2 = ind.Interface()
- } else {
- _, v2, exist = getExistPk(fi.relModelInfo, ind)
- if !exist {
- panic(ErrMissPK)
- }
- }
- values = append(values, v1, v2)
-
- }
- names = append(names, otherNames...)
- values = append(values, otherValues...)
- return dbase.InsertValue(orm.db, mi, true, names, values)
-}
-
-// remove models following the origin model relationship
-func (o *queryM2M) Remove(mds ...interface{}) (int64, error) {
- fi := o.fi
- qs := o.qs.Filter(fi.reverseFieldInfo.name, o.md)
-
- return qs.Filter(fi.reverseFieldInfoTwo.name+ExprSep+"in", mds).Delete()
-}
-
-// check model is existed in relationship of origin model
-func (o *queryM2M) Exist(md interface{}) bool {
- fi := o.fi
- return o.qs.Filter(fi.reverseFieldInfo.name, o.md).
- Filter(fi.reverseFieldInfoTwo.name, md).Exist()
-}
-
-// clean all models in related of origin model
-func (o *queryM2M) Clear() (int64, error) {
- fi := o.fi
- return o.qs.Filter(fi.reverseFieldInfo.name, o.md).Delete()
-}
-
-// count all related models of origin model
-func (o *queryM2M) Count() (int64, error) {
- fi := o.fi
- return o.qs.Filter(fi.reverseFieldInfo.name, o.md).Count()
-}
-
-var _ QueryM2Mer = new(queryM2M)
-
-// create new M2M queryer.
-func newQueryM2M(md interface{}, o *orm, mi *modelInfo, fi *fieldInfo, ind reflect.Value) QueryM2Mer {
- qm2m := new(queryM2M)
- qm2m.md = md
- qm2m.mi = mi
- qm2m.fi = fi
- qm2m.ind = ind
- qm2m.qs = newQuerySet(o, fi.relThroughModelInfo).(*querySet)
- return qm2m
-}
diff --git a/orm/orm_queryset.go b/orm/orm_queryset.go
deleted file mode 100644
index 878b836b..00000000
--- a/orm/orm_queryset.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "context"
- "fmt"
-)
-
-type colValue struct {
- value int64
- opt operator
-}
-
-type operator int
-
-// define Col operations
-const (
- ColAdd operator = iota
- ColMinus
- ColMultiply
- ColExcept
- ColBitAnd
- ColBitRShift
- ColBitLShift
- ColBitXOR
- ColBitOr
-)
-
-// ColValue do the field raw changes. e.g Nums = Nums + 10. usage:
-// Params{
-// "Nums": ColValue(Col_Add, 10),
-// }
-func ColValue(opt operator, value interface{}) interface{} {
- switch opt {
- case ColAdd, ColMinus, ColMultiply, ColExcept, ColBitAnd, ColBitRShift,
- ColBitLShift, ColBitXOR, ColBitOr:
- default:
- panic(fmt.Errorf("orm.ColValue wrong operator"))
- }
- v, err := StrTo(ToStr(value)).Int64()
- if err != nil {
- panic(fmt.Errorf("orm.ColValue doesn't support non string/numeric type, %s", err))
- }
- var val colValue
- val.value = v
- val.opt = opt
- return val
-}
-
-// real query struct
-type querySet struct {
- mi *modelInfo
- cond *Condition
- related []string
- relDepth int
- limit int64
- offset int64
- groups []string
- orders []string
- distinct bool
- forupdate bool
- orm *orm
- ctx context.Context
- forContext bool
-}
-
-var _ QuerySeter = new(querySet)
-
-// add condition expression to QuerySeter.
-func (o querySet) Filter(expr string, args ...interface{}) QuerySeter {
- if o.cond == nil {
- o.cond = NewCondition()
- }
- o.cond = o.cond.And(expr, args...)
- return &o
-}
-
-// add raw sql to querySeter.
-func (o querySet) FilterRaw(expr string, sql string) QuerySeter {
- if o.cond == nil {
- o.cond = NewCondition()
- }
- o.cond = o.cond.Raw(expr, sql)
- return &o
-}
-
-// add NOT condition to querySeter.
-func (o querySet) Exclude(expr string, args ...interface{}) QuerySeter {
- if o.cond == nil {
- o.cond = NewCondition()
- }
- o.cond = o.cond.AndNot(expr, args...)
- return &o
-}
-
-// set offset number
-func (o *querySet) setOffset(num interface{}) {
- o.offset = ToInt64(num)
-}
-
-// add LIMIT value.
-// args[0] means offset, e.g. LIMIT num,offset.
-func (o querySet) Limit(limit interface{}, args ...interface{}) QuerySeter {
- o.limit = ToInt64(limit)
- if len(args) > 0 {
- o.setOffset(args[0])
- }
- return &o
-}
-
-// add OFFSET value
-func (o querySet) Offset(offset interface{}) QuerySeter {
- o.setOffset(offset)
- return &o
-}
-
-// add GROUP expression
-func (o querySet) GroupBy(exprs ...string) QuerySeter {
- o.groups = exprs
- return &o
-}
-
-// add ORDER expression.
-// "column" means ASC, "-column" means DESC.
-func (o querySet) OrderBy(exprs ...string) QuerySeter {
- o.orders = exprs
- return &o
-}
-
-// add DISTINCT to SELECT
-func (o querySet) Distinct() QuerySeter {
- o.distinct = true
- return &o
-}
-
-// add FOR UPDATE to SELECT
-func (o querySet) ForUpdate() QuerySeter {
- o.forupdate = true
- return &o
-}
-
-// set relation model to query together.
-// it will query relation models and assign to parent model.
-func (o querySet) RelatedSel(params ...interface{}) QuerySeter {
- if len(params) == 0 {
- o.relDepth = DefaultRelsDepth
- } else {
- for _, p := range params {
- switch val := p.(type) {
- case string:
- o.related = append(o.related, val)
- case int:
- o.relDepth = val
- default:
- panic(fmt.Errorf(" wrong param kind: %v", val))
- }
- }
- }
- return &o
-}
-
-// set condition to QuerySeter.
-func (o querySet) SetCond(cond *Condition) QuerySeter {
- o.cond = cond
- return &o
-}
-
-// get condition from QuerySeter
-func (o querySet) GetCond() *Condition {
- return o.cond
-}
-
-// return QuerySeter execution result number
-func (o *querySet) Count() (int64, error) {
- return o.orm.alias.DbBaser.Count(o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ)
-}
-
-// check result empty or not after QuerySeter executed
-func (o *querySet) Exist() bool {
- cnt, _ := o.orm.alias.DbBaser.Count(o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ)
- return cnt > 0
-}
-
-// execute update with parameters
-func (o *querySet) Update(values Params) (int64, error) {
- return o.orm.alias.DbBaser.UpdateBatch(o.orm.db, o, o.mi, o.cond, values, o.orm.alias.TZ)
-}
-
-// execute delete
-func (o *querySet) Delete() (int64, error) {
- return o.orm.alias.DbBaser.DeleteBatch(o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ)
-}
-
-// return a insert queryer.
-// it can be used in times.
-// example:
-// i,err := sq.PrepareInsert()
-// i.Add(&user1{},&user2{})
-func (o *querySet) PrepareInsert() (Inserter, error) {
- return newInsertSet(o.orm, o.mi)
-}
-
-// query all data and map to containers.
-// cols means the columns when querying.
-func (o *querySet) All(container interface{}, cols ...string) (int64, error) {
- return o.orm.alias.DbBaser.ReadBatch(o.orm.db, o, o.mi, o.cond, container, o.orm.alias.TZ, cols)
-}
-
-// query one row data and map to containers.
-// cols means the columns when querying.
-func (o *querySet) One(container interface{}, cols ...string) error {
- o.limit = 1
- num, err := o.orm.alias.DbBaser.ReadBatch(o.orm.db, o, o.mi, o.cond, container, o.orm.alias.TZ, cols)
- if err != nil {
- return err
- }
- if num == 0 {
- return ErrNoRows
- }
-
- if num > 1 {
- return ErrMultiRows
- }
- return nil
-}
-
-// query all data and map to []map[string]interface.
-// expres means condition expression.
-// it converts data to []map[column]value.
-func (o *querySet) Values(results *[]Params, exprs ...string) (int64, error) {
- return o.orm.alias.DbBaser.ReadValues(o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ)
-}
-
-// query all data and map to [][]interface
-// it converts data to [][column_index]value
-func (o *querySet) ValuesList(results *[]ParamsList, exprs ...string) (int64, error) {
- return o.orm.alias.DbBaser.ReadValues(o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ)
-}
-
-// query all data and map to []interface.
-// it's designed for one row record set, auto change to []value, not [][column]value.
-func (o *querySet) ValuesFlat(result *ParamsList, expr string) (int64, error) {
- return o.orm.alias.DbBaser.ReadValues(o.orm.db, o, o.mi, o.cond, []string{expr}, result, o.orm.alias.TZ)
-}
-
-// query all rows into map[string]interface with specify key and value column name.
-// keyCol = "name", valueCol = "value"
-// table data
-// name | value
-// total | 100
-// found | 200
-// to map[string]interface{}{
-// "total": 100,
-// "found": 200,
-// }
-func (o *querySet) RowsToMap(result *Params, keyCol, valueCol string) (int64, error) {
- panic(ErrNotImplement)
-}
-
-// query all rows into struct with specify key and value column name.
-// keyCol = "name", valueCol = "value"
-// table data
-// name | value
-// total | 100
-// found | 200
-// to struct {
-// Total int
-// Found int
-// }
-func (o *querySet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) {
- panic(ErrNotImplement)
-}
-
-// set context to QuerySeter.
-func (o querySet) WithContext(ctx context.Context) QuerySeter {
- o.ctx = ctx
- o.forContext = true
- return &o
-}
-
-// create new QuerySeter.
-func newQuerySet(orm *orm, mi *modelInfo) QuerySeter {
- o := new(querySet)
- o.mi = mi
- o.orm = orm
- return o
-}
diff --git a/orm/orm_raw.go b/orm/orm_raw.go
deleted file mode 100644
index 3325a7ea..00000000
--- a/orm/orm_raw.go
+++ /dev/null
@@ -1,867 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "database/sql"
- "fmt"
- "reflect"
- "time"
-)
-
-// raw sql string prepared statement
-type rawPrepare struct {
- rs *rawSet
- stmt stmtQuerier
- closed bool
-}
-
-func (o *rawPrepare) Exec(args ...interface{}) (sql.Result, error) {
- if o.closed {
- return nil, ErrStmtClosed
- }
- return o.stmt.Exec(args...)
-}
-
-func (o *rawPrepare) Close() error {
- o.closed = true
- return o.stmt.Close()
-}
-
-func newRawPreparer(rs *rawSet) (RawPreparer, error) {
- o := new(rawPrepare)
- o.rs = rs
-
- query := rs.query
- rs.orm.alias.DbBaser.ReplaceMarks(&query)
-
- st, err := rs.orm.db.Prepare(query)
- if err != nil {
- return nil, err
- }
- if Debug {
- o.stmt = newStmtQueryLog(rs.orm.alias, st, query)
- } else {
- o.stmt = st
- }
- return o, nil
-}
-
-// raw query seter
-type rawSet struct {
- query string
- args []interface{}
- orm *orm
-}
-
-var _ RawSeter = new(rawSet)
-
-// set args for every query
-func (o rawSet) SetArgs(args ...interface{}) RawSeter {
- o.args = args
- return &o
-}
-
-// execute raw sql and return sql.Result
-func (o *rawSet) Exec() (sql.Result, error) {
- query := o.query
- o.orm.alias.DbBaser.ReplaceMarks(&query)
-
- args := getFlatParams(nil, o.args, o.orm.alias.TZ)
- return o.orm.db.Exec(query, args...)
-}
-
-// set field value to row container
-func (o *rawSet) setFieldValue(ind reflect.Value, value interface{}) {
- switch ind.Kind() {
- case reflect.Bool:
- if value == nil {
- ind.SetBool(false)
- } else if v, ok := value.(bool); ok {
- ind.SetBool(v)
- } else {
- v, _ := StrTo(ToStr(value)).Bool()
- ind.SetBool(v)
- }
-
- case reflect.String:
- if value == nil {
- ind.SetString("")
- } else {
- ind.SetString(ToStr(value))
- }
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if value == nil {
- ind.SetInt(0)
- } else {
- val := reflect.ValueOf(value)
- switch val.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- ind.SetInt(val.Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- ind.SetInt(int64(val.Uint()))
- default:
- v, _ := StrTo(ToStr(value)).Int64()
- ind.SetInt(v)
- }
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if value == nil {
- ind.SetUint(0)
- } else {
- val := reflect.ValueOf(value)
- switch val.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- ind.SetUint(uint64(val.Int()))
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- ind.SetUint(val.Uint())
- default:
- v, _ := StrTo(ToStr(value)).Uint64()
- ind.SetUint(v)
- }
- }
- case reflect.Float64, reflect.Float32:
- if value == nil {
- ind.SetFloat(0)
- } else {
- val := reflect.ValueOf(value)
- switch val.Kind() {
- case reflect.Float64:
- ind.SetFloat(val.Float())
- default:
- v, _ := StrTo(ToStr(value)).Float64()
- ind.SetFloat(v)
- }
- }
-
- case reflect.Struct:
- if value == nil {
- ind.Set(reflect.Zero(ind.Type()))
- return
- }
- switch ind.Interface().(type) {
- case time.Time:
- var str string
- switch d := value.(type) {
- case time.Time:
- o.orm.alias.DbBaser.TimeFromDB(&d, o.orm.alias.TZ)
- ind.Set(reflect.ValueOf(d))
- case []byte:
- str = string(d)
- case string:
- str = d
- }
- if str != "" {
- if len(str) >= 19 {
- str = str[:19]
- t, err := time.ParseInLocation(formatDateTime, str, o.orm.alias.TZ)
- if err == nil {
- t = t.In(DefaultTimeLoc)
- ind.Set(reflect.ValueOf(t))
- }
- } else if len(str) >= 10 {
- str = str[:10]
- t, err := time.ParseInLocation(formatDate, str, DefaultTimeLoc)
- if err == nil {
- ind.Set(reflect.ValueOf(t))
- }
- }
- }
- case sql.NullString, sql.NullInt64, sql.NullFloat64, sql.NullBool:
- indi := reflect.New(ind.Type()).Interface()
- sc, ok := indi.(sql.Scanner)
- if !ok {
- return
- }
- err := sc.Scan(value)
- if err == nil {
- ind.Set(reflect.Indirect(reflect.ValueOf(sc)))
- }
- }
-
- case reflect.Ptr:
- if value == nil {
- ind.Set(reflect.Zero(ind.Type()))
- break
- }
- ind.Set(reflect.New(ind.Type().Elem()))
- o.setFieldValue(reflect.Indirect(ind), value)
- }
-}
-
-// set field value in loop for slice container
-func (o *rawSet) loopSetRefs(refs []interface{}, sInds []reflect.Value, nIndsPtr *[]reflect.Value, eTyps []reflect.Type, init bool) {
- nInds := *nIndsPtr
-
- cur := 0
- for i := 0; i < len(sInds); i++ {
- sInd := sInds[i]
- eTyp := eTyps[i]
-
- typ := eTyp
- isPtr := false
- if typ.Kind() == reflect.Ptr {
- isPtr = true
- typ = typ.Elem()
- }
- if typ.Kind() == reflect.Ptr {
- isPtr = true
- typ = typ.Elem()
- }
-
- var nInd reflect.Value
- if init {
- nInd = reflect.New(sInd.Type()).Elem()
- } else {
- nInd = nInds[i]
- }
-
- val := reflect.New(typ)
- ind := val.Elem()
-
- tpName := ind.Type().String()
-
- if ind.Kind() == reflect.Struct {
- if tpName == "time.Time" {
- value := reflect.ValueOf(refs[cur]).Elem().Interface()
- if isPtr && value == nil {
- val = reflect.New(val.Type()).Elem()
- } else {
- o.setFieldValue(ind, value)
- }
- cur++
- }
-
- } else {
- value := reflect.ValueOf(refs[cur]).Elem().Interface()
- if isPtr && value == nil {
- val = reflect.New(val.Type()).Elem()
- } else {
- o.setFieldValue(ind, value)
- }
- cur++
- }
-
- if nInd.Kind() == reflect.Slice {
- if isPtr {
- nInd = reflect.Append(nInd, val)
- } else {
- nInd = reflect.Append(nInd, ind)
- }
- } else {
- if isPtr {
- nInd.Set(val)
- } else {
- nInd.Set(ind)
- }
- }
-
- nInds[i] = nInd
- }
-}
-
-// query data and map to container
-func (o *rawSet) QueryRow(containers ...interface{}) error {
- var (
- refs = make([]interface{}, 0, len(containers))
- sInds []reflect.Value
- eTyps []reflect.Type
- sMi *modelInfo
- )
- structMode := false
- for _, container := range containers {
- val := reflect.ValueOf(container)
- ind := reflect.Indirect(val)
-
- if val.Kind() != reflect.Ptr {
- panic(fmt.Errorf(" all args must be use ptr"))
- }
-
- etyp := ind.Type()
- typ := etyp
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- }
-
- sInds = append(sInds, ind)
- eTyps = append(eTyps, etyp)
-
- if typ.Kind() == reflect.Struct && typ.String() != "time.Time" {
- if len(containers) > 1 {
- panic(fmt.Errorf(" now support one struct only. see #384"))
- }
-
- structMode = true
- fn := getFullName(typ)
- if mi, ok := modelCache.getByFullName(fn); ok {
- sMi = mi
- }
- } else {
- var ref interface{}
- refs = append(refs, &ref)
- }
- }
-
- query := o.query
- o.orm.alias.DbBaser.ReplaceMarks(&query)
-
- args := getFlatParams(nil, o.args, o.orm.alias.TZ)
- rows, err := o.orm.db.Query(query, args...)
- if err != nil {
- if err == sql.ErrNoRows {
- return ErrNoRows
- }
- return err
- }
-
- defer rows.Close()
-
- if rows.Next() {
- if structMode {
- columns, err := rows.Columns()
- if err != nil {
- return err
- }
-
- columnsMp := make(map[string]interface{}, len(columns))
-
- refs = make([]interface{}, 0, len(columns))
- for _, col := range columns {
- var ref interface{}
- columnsMp[col] = &ref
- refs = append(refs, &ref)
- }
-
- if err := rows.Scan(refs...); err != nil {
- return err
- }
-
- ind := sInds[0]
-
- if ind.Kind() == reflect.Ptr {
- if ind.IsNil() || !ind.IsValid() {
- ind.Set(reflect.New(eTyps[0].Elem()))
- }
- ind = ind.Elem()
- }
-
- if sMi != nil {
- for _, col := range columns {
- if fi := sMi.fields.GetByColumn(col); fi != nil {
- value := reflect.ValueOf(columnsMp[col]).Elem().Interface()
- field := ind.FieldByIndex(fi.fieldIndex)
- if fi.fieldType&IsRelField > 0 {
- mf := reflect.New(fi.relModelInfo.addrField.Elem().Type())
- field.Set(mf)
- field = mf.Elem().FieldByIndex(fi.relModelInfo.fields.pk.fieldIndex)
- }
- o.setFieldValue(field, value)
- }
- }
- } else {
- for i := 0; i < ind.NumField(); i++ {
- f := ind.Field(i)
- fe := ind.Type().Field(i)
- _, tags := parseStructTag(fe.Tag.Get(defaultStructTagName))
- var col string
- if col = tags["column"]; col == "" {
- col = nameStrategyMap[nameStrategy](fe.Name)
- }
- if v, ok := columnsMp[col]; ok {
- value := reflect.ValueOf(v).Elem().Interface()
- o.setFieldValue(f, value)
- }
- }
- }
-
- } else {
- if err := rows.Scan(refs...); err != nil {
- return err
- }
-
- nInds := make([]reflect.Value, len(sInds))
- o.loopSetRefs(refs, sInds, &nInds, eTyps, true)
- for i, sInd := range sInds {
- nInd := nInds[i]
- sInd.Set(nInd)
- }
- }
-
- } else {
- return ErrNoRows
- }
-
- return nil
-}
-
-// query data rows and map to container
-func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) {
- var (
- refs = make([]interface{}, 0, len(containers))
- sInds []reflect.Value
- eTyps []reflect.Type
- sMi *modelInfo
- )
- structMode := false
- for _, container := range containers {
- val := reflect.ValueOf(container)
- sInd := reflect.Indirect(val)
- if val.Kind() != reflect.Ptr || sInd.Kind() != reflect.Slice {
- panic(fmt.Errorf(" all args must be use ptr slice"))
- }
-
- etyp := sInd.Type().Elem()
- typ := etyp
- if typ.Kind() == reflect.Ptr {
- typ = typ.Elem()
- }
-
- sInds = append(sInds, sInd)
- eTyps = append(eTyps, etyp)
-
- if typ.Kind() == reflect.Struct && typ.String() != "time.Time" {
- if len(containers) > 1 {
- panic(fmt.Errorf(" now support one struct only. see #384"))
- }
-
- structMode = true
- fn := getFullName(typ)
- if mi, ok := modelCache.getByFullName(fn); ok {
- sMi = mi
- }
- } else {
- var ref interface{}
- refs = append(refs, &ref)
- }
- }
-
- query := o.query
- o.orm.alias.DbBaser.ReplaceMarks(&query)
-
- args := getFlatParams(nil, o.args, o.orm.alias.TZ)
- rows, err := o.orm.db.Query(query, args...)
- if err != nil {
- return 0, err
- }
-
- defer rows.Close()
-
- var cnt int64
- nInds := make([]reflect.Value, len(sInds))
- sInd := sInds[0]
-
- for rows.Next() {
-
- if structMode {
- columns, err := rows.Columns()
- if err != nil {
- return 0, err
- }
-
- columnsMp := make(map[string]interface{}, len(columns))
-
- refs = make([]interface{}, 0, len(columns))
- for _, col := range columns {
- var ref interface{}
- columnsMp[col] = &ref
- refs = append(refs, &ref)
- }
-
- if err := rows.Scan(refs...); err != nil {
- return 0, err
- }
-
- if cnt == 0 && !sInd.IsNil() {
- sInd.Set(reflect.New(sInd.Type()).Elem())
- }
-
- var ind reflect.Value
- if eTyps[0].Kind() == reflect.Ptr {
- ind = reflect.New(eTyps[0].Elem())
- } else {
- ind = reflect.New(eTyps[0])
- }
-
- if ind.Kind() == reflect.Ptr {
- ind = ind.Elem()
- }
-
- if sMi != nil {
- for _, col := range columns {
- if fi := sMi.fields.GetByColumn(col); fi != nil {
- value := reflect.ValueOf(columnsMp[col]).Elem().Interface()
- field := ind.FieldByIndex(fi.fieldIndex)
- if fi.fieldType&IsRelField > 0 {
- mf := reflect.New(fi.relModelInfo.addrField.Elem().Type())
- field.Set(mf)
- field = mf.Elem().FieldByIndex(fi.relModelInfo.fields.pk.fieldIndex)
- }
- o.setFieldValue(field, value)
- }
- }
- } else {
- // define recursive function
- var recursiveSetField func(rv reflect.Value)
- recursiveSetField = func(rv reflect.Value) {
- for i := 0; i < rv.NumField(); i++ {
- f := rv.Field(i)
- fe := rv.Type().Field(i)
-
- // check if the field is a Struct
- // recursive the Struct type
- if fe.Type.Kind() == reflect.Struct {
- recursiveSetField(f)
- }
-
- _, tags := parseStructTag(fe.Tag.Get(defaultStructTagName))
- var col string
- if col = tags["column"]; col == "" {
- col = nameStrategyMap[nameStrategy](fe.Name)
- }
- if v, ok := columnsMp[col]; ok {
- value := reflect.ValueOf(v).Elem().Interface()
- o.setFieldValue(f, value)
- }
- }
- }
-
- // init call the recursive function
- recursiveSetField(ind)
- }
-
- if eTyps[0].Kind() == reflect.Ptr {
- ind = ind.Addr()
- }
-
- sInd = reflect.Append(sInd, ind)
-
- } else {
- if err := rows.Scan(refs...); err != nil {
- return 0, err
- }
-
- o.loopSetRefs(refs, sInds, &nInds, eTyps, cnt == 0)
- }
-
- cnt++
- }
-
- if cnt > 0 {
-
- if structMode {
- sInds[0].Set(sInd)
- } else {
- for i, sInd := range sInds {
- nInd := nInds[i]
- sInd.Set(nInd)
- }
- }
- }
-
- return cnt, nil
-}
-
-func (o *rawSet) readValues(container interface{}, needCols []string) (int64, error) {
- var (
- maps []Params
- lists []ParamsList
- list ParamsList
- )
-
- typ := 0
- switch container.(type) {
- case *[]Params:
- typ = 1
- case *[]ParamsList:
- typ = 2
- case *ParamsList:
- typ = 3
- default:
- panic(fmt.Errorf(" unsupport read values type `%T`", container))
- }
-
- query := o.query
- o.orm.alias.DbBaser.ReplaceMarks(&query)
-
- args := getFlatParams(nil, o.args, o.orm.alias.TZ)
-
- var rs *sql.Rows
- rs, err := o.orm.db.Query(query, args...)
- if err != nil {
- return 0, err
- }
-
- defer rs.Close()
-
- var (
- refs []interface{}
- cnt int64
- cols []string
- indexs []int
- )
-
- for rs.Next() {
- if cnt == 0 {
- columns, err := rs.Columns()
- if err != nil {
- return 0, err
- }
- if len(needCols) > 0 {
- indexs = make([]int, 0, len(needCols))
- } else {
- indexs = make([]int, 0, len(columns))
- }
-
- cols = columns
- refs = make([]interface{}, len(cols))
- for i := range refs {
- var ref sql.NullString
- refs[i] = &ref
-
- if len(needCols) > 0 {
- for _, c := range needCols {
- if c == cols[i] {
- indexs = append(indexs, i)
- }
- }
- } else {
- indexs = append(indexs, i)
- }
- }
- }
-
- if err := rs.Scan(refs...); err != nil {
- return 0, err
- }
-
- switch typ {
- case 1:
- params := make(Params, len(cols))
- for _, i := range indexs {
- ref := refs[i]
- value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString)
- if value.Valid {
- params[cols[i]] = value.String
- } else {
- params[cols[i]] = nil
- }
- }
- maps = append(maps, params)
- case 2:
- params := make(ParamsList, 0, len(cols))
- for _, i := range indexs {
- ref := refs[i]
- value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString)
- if value.Valid {
- params = append(params, value.String)
- } else {
- params = append(params, nil)
- }
- }
- lists = append(lists, params)
- case 3:
- for _, i := range indexs {
- ref := refs[i]
- value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString)
- if value.Valid {
- list = append(list, value.String)
- } else {
- list = append(list, nil)
- }
- }
- }
-
- cnt++
- }
-
- switch v := container.(type) {
- case *[]Params:
- *v = maps
- case *[]ParamsList:
- *v = lists
- case *ParamsList:
- *v = list
- }
-
- return cnt, nil
-}
-
-func (o *rawSet) queryRowsTo(container interface{}, keyCol, valueCol string) (int64, error) {
- var (
- maps Params
- ind *reflect.Value
- )
-
- var typ int
- switch container.(type) {
- case *Params:
- typ = 1
- default:
- typ = 2
- vl := reflect.ValueOf(container)
- id := reflect.Indirect(vl)
- if vl.Kind() != reflect.Ptr || id.Kind() != reflect.Struct {
- panic(fmt.Errorf(" RowsTo unsupport type `%T` need ptr struct", container))
- }
-
- ind = &id
- }
-
- query := o.query
- o.orm.alias.DbBaser.ReplaceMarks(&query)
-
- args := getFlatParams(nil, o.args, o.orm.alias.TZ)
-
- rs, err := o.orm.db.Query(query, args...)
- if err != nil {
- return 0, err
- }
-
- defer rs.Close()
-
- var (
- refs []interface{}
- cnt int64
- cols []string
- )
-
- var (
- keyIndex = -1
- valueIndex = -1
- )
-
- for rs.Next() {
- if cnt == 0 {
- columns, err := rs.Columns()
- if err != nil {
- return 0, err
- }
- cols = columns
- refs = make([]interface{}, len(cols))
- for i := range refs {
- if keyCol == cols[i] {
- keyIndex = i
- }
- if typ == 1 || keyIndex == i {
- var ref sql.NullString
- refs[i] = &ref
- } else {
- var ref interface{}
- refs[i] = &ref
- }
- if valueCol == cols[i] {
- valueIndex = i
- }
- }
- if keyIndex == -1 || valueIndex == -1 {
- panic(fmt.Errorf(" RowsTo unknown key, value column name `%s: %s`", keyCol, valueCol))
- }
- }
-
- if err := rs.Scan(refs...); err != nil {
- return 0, err
- }
-
- if cnt == 0 {
- switch typ {
- case 1:
- maps = make(Params)
- }
- }
-
- key := reflect.Indirect(reflect.ValueOf(refs[keyIndex])).Interface().(sql.NullString).String
-
- switch typ {
- case 1:
- value := reflect.Indirect(reflect.ValueOf(refs[valueIndex])).Interface().(sql.NullString)
- if value.Valid {
- maps[key] = value.String
- } else {
- maps[key] = nil
- }
-
- default:
- if id := ind.FieldByName(camelString(key)); id.IsValid() {
- o.setFieldValue(id, reflect.ValueOf(refs[valueIndex]).Elem().Interface())
- }
- }
-
- cnt++
- }
-
- if typ == 1 {
- v, _ := container.(*Params)
- *v = maps
- }
-
- return cnt, nil
-}
-
-// query data to []map[string]interface
-func (o *rawSet) Values(container *[]Params, cols ...string) (int64, error) {
- return o.readValues(container, cols)
-}
-
-// query data to [][]interface
-func (o *rawSet) ValuesList(container *[]ParamsList, cols ...string) (int64, error) {
- return o.readValues(container, cols)
-}
-
-// query data to []interface
-func (o *rawSet) ValuesFlat(container *ParamsList, cols ...string) (int64, error) {
- return o.readValues(container, cols)
-}
-
-// query all rows into map[string]interface with specify key and value column name.
-// keyCol = "name", valueCol = "value"
-// table data
-// name | value
-// total | 100
-// found | 200
-// to map[string]interface{}{
-// "total": 100,
-// "found": 200,
-// }
-func (o *rawSet) RowsToMap(result *Params, keyCol, valueCol string) (int64, error) {
- return o.queryRowsTo(result, keyCol, valueCol)
-}
-
-// query all rows into struct with specify key and value column name.
-// keyCol = "name", valueCol = "value"
-// table data
-// name | value
-// total | 100
-// found | 200
-// to struct {
-// Total int
-// Found int
-// }
-func (o *rawSet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) {
- return o.queryRowsTo(ptrStruct, keyCol, valueCol)
-}
-
-// return prepared raw statement for used in times.
-func (o *rawSet) Prepare() (RawPreparer, error) {
- return newRawPreparer(o)
-}
-
-func newRawSet(orm *orm, query string, args []interface{}) RawSeter {
- o := new(rawSet)
- o.query = query
- o.args = args
- o.orm = orm
- return o
-}
diff --git a/orm/orm_test.go b/orm/orm_test.go
deleted file mode 100644
index eac7b33a..00000000
--- a/orm/orm_test.go
+++ /dev/null
@@ -1,2500 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build go1.8
-
-package orm
-
-import (
- "bytes"
- "context"
- "database/sql"
- "fmt"
- "io/ioutil"
- "math"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strings"
- "testing"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-var _ = os.PathSeparator
-
-var (
- testDate = formatDate + " -0700"
- testDateTime = formatDateTime + " -0700"
- testTime = formatTime + " -0700"
-)
-
-type argAny []interface{}
-
-// get interface by index from interface slice
-func (a argAny) Get(i int, args ...interface{}) (r interface{}) {
- if i >= 0 && i < len(a) {
- r = a[i]
- }
- if len(args) > 0 {
- r = args[0]
- }
- return
-}
-
-func ValuesCompare(is bool, a interface{}, args ...interface{}) (ok bool, err error) {
- if len(args) == 0 {
- return false, fmt.Errorf("miss args")
- }
- b := args[0]
- arg := argAny(args)
-
- switch v := a.(type) {
- case reflect.Kind:
- ok = reflect.ValueOf(b).Kind() == v
- case time.Time:
- if v2, vo := b.(time.Time); vo {
- if arg.Get(1) != nil {
- format := ToStr(arg.Get(1))
- a = v.Format(format)
- b = v2.Format(format)
- ok = a == b
- } else {
- err = fmt.Errorf("compare datetime miss format")
- goto wrongArg
- }
- }
- default:
- ok = ToStr(a) == ToStr(b)
- }
- ok = is && ok || !is && !ok
- if !ok {
- if is {
- err = fmt.Errorf("expected: `%v`, get `%v`", b, a)
- } else {
- err = fmt.Errorf("expected: `%v`, get `%v`", b, a)
- }
- }
-
-wrongArg:
- if err != nil {
- return false, err
- }
-
- return true, nil
-}
-
-func AssertIs(a interface{}, args ...interface{}) error {
- if ok, err := ValuesCompare(true, a, args...); !ok {
- return err
- }
- return nil
-}
-
-func AssertNot(a interface{}, args ...interface{}) error {
- if ok, err := ValuesCompare(false, a, args...); !ok {
- return err
- }
- return nil
-}
-
-func getCaller(skip int) string {
- pc, file, line, _ := runtime.Caller(skip)
- fun := runtime.FuncForPC(pc)
- _, fn := filepath.Split(file)
- data, err := ioutil.ReadFile(file)
- var codes []string
- if err == nil {
- lines := bytes.Split(data, []byte{'\n'})
- n := 10
- for i := 0; i < n; i++ {
- o := line - n
- if o < 0 {
- continue
- }
- cur := o + i + 1
- flag := " "
- if cur == line {
- flag = ">>"
- }
- code := fmt.Sprintf(" %s %5d: %s", flag, cur, strings.Replace(string(lines[o+i]), "\t", " ", -1))
- if code != "" {
- codes = append(codes, code)
- }
- }
- }
- funName := fun.Name()
- if i := strings.LastIndex(funName, "."); i > -1 {
- funName = funName[i+1:]
- }
- return fmt.Sprintf("%s:%s:%d: \n%s", fn, funName, line, strings.Join(codes, "\n"))
-}
-
-// Deprecated: Using stretchr/testify/assert
-func throwFail(t *testing.T, err error, args ...interface{}) {
- if err != nil {
- con := fmt.Sprintf("\t\nError: %s\n%s\n", err.Error(), getCaller(2))
- if len(args) > 0 {
- parts := make([]string, 0, len(args))
- for _, arg := range args {
- parts = append(parts, fmt.Sprintf("%v", arg))
- }
- con += " " + strings.Join(parts, ", ")
- }
- t.Error(con)
- t.Fail()
- }
-}
-
-func throwFailNow(t *testing.T, err error, args ...interface{}) {
- if err != nil {
- con := fmt.Sprintf("\t\nError: %s\n%s\n", err.Error(), getCaller(2))
- if len(args) > 0 {
- parts := make([]string, 0, len(args))
- for _, arg := range args {
- parts = append(parts, fmt.Sprintf("%v", arg))
- }
- con += " " + strings.Join(parts, ", ")
- }
- t.Error(con)
- t.FailNow()
- }
-}
-
-func TestGetDB(t *testing.T) {
- if db, err := GetDB(); err != nil {
- throwFailNow(t, err)
- } else {
- err = db.Ping()
- throwFailNow(t, err)
- }
-}
-
-func TestSyncDb(t *testing.T) {
- RegisterModel(new(Data), new(DataNull), new(DataCustom))
- RegisterModel(new(User))
- RegisterModel(new(Profile))
- RegisterModel(new(Post))
- RegisterModel(new(Tag))
- RegisterModel(new(Comment))
- RegisterModel(new(UserBig))
- RegisterModel(new(PostTags))
- RegisterModel(new(Group))
- RegisterModel(new(Permission))
- RegisterModel(new(GroupPermissions))
- RegisterModel(new(InLine))
- RegisterModel(new(InLineOneToOne))
- RegisterModel(new(IntegerPk))
- RegisterModel(new(UintPk))
- RegisterModel(new(PtrPk))
-
- err := RunSyncdb("default", true, Debug)
- throwFail(t, err)
-
- modelCache.clean()
-}
-
-func TestRegisterModels(t *testing.T) {
- RegisterModel(new(Data), new(DataNull), new(DataCustom))
- RegisterModel(new(User))
- RegisterModel(new(Profile))
- RegisterModel(new(Post))
- RegisterModel(new(Tag))
- RegisterModel(new(Comment))
- RegisterModel(new(UserBig))
- RegisterModel(new(PostTags))
- RegisterModel(new(Group))
- RegisterModel(new(Permission))
- RegisterModel(new(GroupPermissions))
- RegisterModel(new(InLine))
- RegisterModel(new(InLineOneToOne))
- RegisterModel(new(IntegerPk))
- RegisterModel(new(UintPk))
- RegisterModel(new(PtrPk))
-
- BootStrap()
-
- dORM = NewOrm()
- dDbBaser = getDbAlias("default").DbBaser
-}
-
-func TestModelSyntax(t *testing.T) {
- user := &User{}
- ind := reflect.ValueOf(user).Elem()
- fn := getFullName(ind.Type())
- mi, ok := modelCache.getByFullName(fn)
- throwFail(t, AssertIs(ok, true))
-
- mi, ok = modelCache.get("user")
- throwFail(t, AssertIs(ok, true))
- if ok {
- throwFail(t, AssertIs(mi.fields.GetByName("ShouldSkip") == nil, true))
- }
-}
-
-var DataValues = map[string]interface{}{
- "Boolean": true,
- "Char": "char",
- "Text": "text",
- "JSON": `{"name":"json"}`,
- "Jsonb": `{"name": "jsonb"}`,
- "Time": time.Now(),
- "Date": time.Now(),
- "DateTime": time.Now(),
- "Byte": byte(1<<8 - 1),
- "Rune": rune(1<<31 - 1),
- "Int": int(1<<31 - 1),
- "Int8": int8(1<<7 - 1),
- "Int16": int16(1<<15 - 1),
- "Int32": int32(1<<31 - 1),
- "Int64": int64(1<<63 - 1),
- "Uint": uint(1<<32 - 1),
- "Uint8": uint8(1<<8 - 1),
- "Uint16": uint16(1<<16 - 1),
- "Uint32": uint32(1<<32 - 1),
- "Uint64": uint64(1<<63 - 1), // uint64 values with high bit set are not supported
- "Float32": float32(100.1234),
- "Float64": float64(100.1234),
- "Decimal": float64(100.1234),
-}
-
-func TestDataTypes(t *testing.T) {
- d := Data{}
- ind := reflect.Indirect(reflect.ValueOf(&d))
-
- for name, value := range DataValues {
- if name == "JSON" {
- continue
- }
- e := ind.FieldByName(name)
- e.Set(reflect.ValueOf(value))
- }
- id, err := dORM.Insert(&d)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- d = Data{ID: 1}
- err = dORM.Read(&d)
- throwFail(t, err)
-
- ind = reflect.Indirect(reflect.ValueOf(&d))
-
- for name, value := range DataValues {
- e := ind.FieldByName(name)
- vu := e.Interface()
- switch name {
- case "Date":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testDate)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testDate)
- case "DateTime":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testDateTime)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testDateTime)
- case "Time":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testTime)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testTime)
- }
- throwFail(t, AssertIs(vu == value, true), value, vu)
- }
-}
-
-func TestNullDataTypes(t *testing.T) {
- d := DataNull{}
-
- if IsPostgres {
- // can removed when this fixed
- // https://github.com/lib/pq/pull/125
- d.DateTime = time.Now()
- }
-
- id, err := dORM.Insert(&d)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- data := `{"ok":1,"data":{"arr":[1,2],"msg":"gopher"}}`
- d = DataNull{ID: 1, JSON: data}
- num, err := dORM.Update(&d)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- d = DataNull{ID: 1}
- err = dORM.Read(&d)
- throwFail(t, err)
-
- throwFail(t, AssertIs(d.JSON, data))
-
- throwFail(t, AssertIs(d.NullBool.Valid, false))
- throwFail(t, AssertIs(d.NullString.Valid, false))
- throwFail(t, AssertIs(d.NullInt64.Valid, false))
- throwFail(t, AssertIs(d.NullFloat64.Valid, false))
-
- throwFail(t, AssertIs(d.BooleanPtr, nil))
- throwFail(t, AssertIs(d.CharPtr, nil))
- throwFail(t, AssertIs(d.TextPtr, nil))
- throwFail(t, AssertIs(d.BytePtr, nil))
- throwFail(t, AssertIs(d.RunePtr, nil))
- throwFail(t, AssertIs(d.IntPtr, nil))
- throwFail(t, AssertIs(d.Int8Ptr, nil))
- throwFail(t, AssertIs(d.Int16Ptr, nil))
- throwFail(t, AssertIs(d.Int32Ptr, nil))
- throwFail(t, AssertIs(d.Int64Ptr, nil))
- throwFail(t, AssertIs(d.UintPtr, nil))
- throwFail(t, AssertIs(d.Uint8Ptr, nil))
- throwFail(t, AssertIs(d.Uint16Ptr, nil))
- throwFail(t, AssertIs(d.Uint32Ptr, nil))
- throwFail(t, AssertIs(d.Uint64Ptr, nil))
- throwFail(t, AssertIs(d.Float32Ptr, nil))
- throwFail(t, AssertIs(d.Float64Ptr, nil))
- throwFail(t, AssertIs(d.DecimalPtr, nil))
- throwFail(t, AssertIs(d.TimePtr, nil))
- throwFail(t, AssertIs(d.DatePtr, nil))
- throwFail(t, AssertIs(d.DateTimePtr, nil))
-
- _, err = dORM.Raw(`INSERT INTO data_null (boolean) VALUES (?)`, nil).Exec()
- throwFail(t, err)
-
- d = DataNull{ID: 2}
- err = dORM.Read(&d)
- throwFail(t, err)
-
- booleanPtr := true
- charPtr := string("test")
- textPtr := string("test")
- bytePtr := byte('t')
- runePtr := rune('t')
- intPtr := int(42)
- int8Ptr := int8(42)
- int16Ptr := int16(42)
- int32Ptr := int32(42)
- int64Ptr := int64(42)
- uintPtr := uint(42)
- uint8Ptr := uint8(42)
- uint16Ptr := uint16(42)
- uint32Ptr := uint32(42)
- uint64Ptr := uint64(42)
- float32Ptr := float32(42.0)
- float64Ptr := float64(42.0)
- decimalPtr := float64(42.0)
- timePtr := time.Now()
- datePtr := time.Now()
- dateTimePtr := time.Now()
-
- d = DataNull{
- DateTime: time.Now(),
- NullString: sql.NullString{String: "test", Valid: true},
- NullBool: sql.NullBool{Bool: true, Valid: true},
- NullInt64: sql.NullInt64{Int64: 42, Valid: true},
- NullFloat64: sql.NullFloat64{Float64: 42.42, Valid: true},
- BooleanPtr: &booleanPtr,
- CharPtr: &charPtr,
- TextPtr: &textPtr,
- BytePtr: &bytePtr,
- RunePtr: &runePtr,
- IntPtr: &intPtr,
- Int8Ptr: &int8Ptr,
- Int16Ptr: &int16Ptr,
- Int32Ptr: &int32Ptr,
- Int64Ptr: &int64Ptr,
- UintPtr: &uintPtr,
- Uint8Ptr: &uint8Ptr,
- Uint16Ptr: &uint16Ptr,
- Uint32Ptr: &uint32Ptr,
- Uint64Ptr: &uint64Ptr,
- Float32Ptr: &float32Ptr,
- Float64Ptr: &float64Ptr,
- DecimalPtr: &decimalPtr,
- TimePtr: &timePtr,
- DatePtr: &datePtr,
- DateTimePtr: &dateTimePtr,
- }
-
- id, err = dORM.Insert(&d)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 3))
-
- d = DataNull{ID: 3}
- err = dORM.Read(&d)
- throwFail(t, err)
-
- throwFail(t, AssertIs(d.NullBool.Valid, true))
- throwFail(t, AssertIs(d.NullBool.Bool, true))
-
- throwFail(t, AssertIs(d.NullString.Valid, true))
- throwFail(t, AssertIs(d.NullString.String, "test"))
-
- throwFail(t, AssertIs(d.NullInt64.Valid, true))
- throwFail(t, AssertIs(d.NullInt64.Int64, 42))
-
- throwFail(t, AssertIs(d.NullFloat64.Valid, true))
- throwFail(t, AssertIs(d.NullFloat64.Float64, 42.42))
-
- throwFail(t, AssertIs(*d.BooleanPtr, booleanPtr))
- throwFail(t, AssertIs(*d.CharPtr, charPtr))
- throwFail(t, AssertIs(*d.TextPtr, textPtr))
- throwFail(t, AssertIs(*d.BytePtr, bytePtr))
- throwFail(t, AssertIs(*d.RunePtr, runePtr))
- throwFail(t, AssertIs(*d.IntPtr, intPtr))
- throwFail(t, AssertIs(*d.Int8Ptr, int8Ptr))
- throwFail(t, AssertIs(*d.Int16Ptr, int16Ptr))
- throwFail(t, AssertIs(*d.Int32Ptr, int32Ptr))
- throwFail(t, AssertIs(*d.Int64Ptr, int64Ptr))
- throwFail(t, AssertIs(*d.UintPtr, uintPtr))
- throwFail(t, AssertIs(*d.Uint8Ptr, uint8Ptr))
- throwFail(t, AssertIs(*d.Uint16Ptr, uint16Ptr))
- throwFail(t, AssertIs(*d.Uint32Ptr, uint32Ptr))
- throwFail(t, AssertIs(*d.Uint64Ptr, uint64Ptr))
- throwFail(t, AssertIs(*d.Float32Ptr, float32Ptr))
- throwFail(t, AssertIs(*d.Float64Ptr, float64Ptr))
- throwFail(t, AssertIs(*d.DecimalPtr, decimalPtr))
-
- // in mysql, there are some precision problem, (*d.TimePtr).UTC() != timePtr.UTC()
- assert.True(t, (*d.TimePtr).UTC().Sub(timePtr.UTC()) <= time.Second)
- assert.True(t, (*d.DatePtr).UTC().Sub(datePtr.UTC()) <= time.Second)
- assert.True(t, (*d.DateTimePtr).UTC().Sub(dateTimePtr.UTC()) <= time.Second)
-
- // test support for pointer fields using RawSeter.QueryRows()
- var dnList []*DataNull
- Q := dDbBaser.TableQuote()
- num, err = dORM.Raw(fmt.Sprintf("SELECT * FROM %sdata_null%s where id=?", Q, Q), 3).QueryRows(&dnList)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- equal := reflect.DeepEqual(*dnList[0], d)
- throwFailNow(t, AssertIs(equal, true))
-}
-
-func TestDataCustomTypes(t *testing.T) {
- d := DataCustom{}
- ind := reflect.Indirect(reflect.ValueOf(&d))
-
- for name, value := range DataValues {
- e := ind.FieldByName(name)
- if !e.IsValid() {
- continue
- }
- e.Set(reflect.ValueOf(value).Convert(e.Type()))
- }
-
- id, err := dORM.Insert(&d)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- d = DataCustom{ID: 1}
- err = dORM.Read(&d)
- throwFail(t, err)
-
- ind = reflect.Indirect(reflect.ValueOf(&d))
-
- for name, value := range DataValues {
- e := ind.FieldByName(name)
- if !e.IsValid() {
- continue
- }
- vu := e.Interface()
- value = reflect.ValueOf(value).Convert(e.Type()).Interface()
- throwFail(t, AssertIs(vu == value, true), value, vu)
- }
-}
-
-func TestCRUD(t *testing.T) {
- profile := NewProfile()
- profile.Age = 30
- profile.Money = 1234.12
- id, err := dORM.Insert(profile)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- user := NewUser()
- user.UserName = "slene"
- user.Email = "vslene@gmail.com"
- user.Password = "pass"
- user.Status = 3
- user.IsStaff = true
- user.IsActive = true
-
- id, err = dORM.Insert(user)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- u := &User{ID: user.ID}
- err = dORM.Read(u)
- throwFail(t, err)
-
- throwFail(t, AssertIs(u.UserName, "slene"))
- throwFail(t, AssertIs(u.Email, "vslene@gmail.com"))
- throwFail(t, AssertIs(u.Password, "pass"))
- throwFail(t, AssertIs(u.Status, 3))
- throwFail(t, AssertIs(u.IsStaff, true))
- throwFail(t, AssertIs(u.IsActive, true))
-
- assert.True(t, u.Created.In(DefaultTimeLoc).Sub(user.Created.In(DefaultTimeLoc)) <= time.Second)
- assert.True(t, u.Updated.In(DefaultTimeLoc).Sub(user.Updated.In(DefaultTimeLoc)) <= time.Second)
-
- user.UserName = "astaxie"
- user.Profile = profile
- num, err := dORM.Update(user)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- u = &User{ID: user.ID}
- err = dORM.Read(u)
- throwFailNow(t, err)
- throwFail(t, AssertIs(u.UserName, "astaxie"))
- throwFail(t, AssertIs(u.Profile.ID, profile.ID))
-
- u = &User{UserName: "astaxie", Password: "pass"}
- err = dORM.Read(u, "UserName")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(id, 1))
-
- u.UserName = "QQ"
- u.Password = "111"
- num, err = dORM.Update(u, "UserName")
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- u = &User{ID: user.ID}
- err = dORM.Read(u)
- throwFailNow(t, err)
- throwFail(t, AssertIs(u.UserName, "QQ"))
- throwFail(t, AssertIs(u.Password, "pass"))
-
- num, err = dORM.Delete(profile)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- u = &User{ID: user.ID}
- err = dORM.Read(u)
- throwFail(t, err)
- throwFail(t, AssertIs(true, u.Profile == nil))
-
- num, err = dORM.Delete(user)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- u = &User{ID: 100}
- err = dORM.Read(u)
- throwFail(t, AssertIs(err, ErrNoRows))
-
- ub := UserBig{}
- ub.Name = "name"
- id, err = dORM.Insert(&ub)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- ub = UserBig{ID: 1}
- err = dORM.Read(&ub)
- throwFail(t, err)
- throwFail(t, AssertIs(ub.Name, "name"))
-
- num, err = dORM.Delete(&ub, "name")
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestInsertTestData(t *testing.T) {
- var users []*User
-
- profile := NewProfile()
- profile.Age = 28
- profile.Money = 1234.12
-
- id, err := dORM.Insert(profile)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 2))
-
- user := NewUser()
- user.UserName = "slene"
- user.Email = "vslene@gmail.com"
- user.Password = "pass"
- user.Status = 1
- user.IsStaff = false
- user.IsActive = true
- user.Profile = profile
-
- users = append(users, user)
-
- id, err = dORM.Insert(user)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 2))
-
- profile = NewProfile()
- profile.Age = 30
- profile.Money = 4321.09
-
- id, err = dORM.Insert(profile)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 3))
-
- user = NewUser()
- user.UserName = "astaxie"
- user.Email = "astaxie@gmail.com"
- user.Password = "password"
- user.Status = 2
- user.IsStaff = true
- user.IsActive = false
- user.Profile = profile
-
- users = append(users, user)
-
- id, err = dORM.Insert(user)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 3))
-
- user = NewUser()
- user.UserName = "nobody"
- user.Email = "nobody@gmail.com"
- user.Password = "nobody"
- user.Status = 3
- user.IsStaff = false
- user.IsActive = false
-
- users = append(users, user)
-
- id, err = dORM.Insert(user)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 4))
-
- tags := []*Tag{
- {Name: "golang", BestPost: &Post{ID: 2}},
- {Name: "example"},
- {Name: "format"},
- {Name: "c++"},
- }
-
- posts := []*Post{
- {User: users[0], Tags: []*Tag{tags[0]}, Title: "Introduction", Content: `Go is a new language. Although it borrows ideas from existing languages, it has unusual properties that make effective Go programs different in character from programs written in its relatives. A straightforward translation of a C++ or Java program into Go is unlikely to produce a satisfactory result—Java programs are written in Java, not Go. On the other hand, thinking about the problem from a Go perspective could produce a successful but quite different program. In other words, to write Go well, it's important to understand its properties and idioms. It's also important to know the established conventions for programming in Go, such as naming, formatting, program construction, and so on, so that programs you write will be easy for other Go programmers to understand.
-This document gives tips for writing clear, idiomatic Go code. It augments the language specification, the Tour of Go, and How to Write Go Code, all of which you should read first.`},
- {User: users[1], Tags: []*Tag{tags[0], tags[1]}, Title: "Examples", Content: `The Go package sources are intended to serve not only as the core library but also as examples of how to use the language. Moreover, many of the packages contain working, self-contained executable examples you can run directly from the golang.org web site, such as this one (click on the word "Example" to open it up). If you have a question about how to approach a problem or how something might be implemented, the documentation, code and examples in the library can provide answers, ideas and background.`},
- {User: users[1], Tags: []*Tag{tags[0], tags[2]}, Title: "Formatting", Content: `Formatting issues are the most contentious but the least consequential. People can adapt to different formatting styles but it's better if they don't have to, and less time is devoted to the topic if everyone adheres to the same style. The problem is how to approach this Utopia without a long prescriptive style guide.
-With Go we take an unusual approach and let the machine take care of most formatting issues. The gofmt program (also available as go fmt, which operates at the package level rather than source file level) reads a Go program and emits the source in a standard style of indentation and vertical alignment, retaining and if necessary reformatting comments. If you want to know how to handle some new layout situation, run gofmt; if the answer doesn't seem right, rearrange your program (or file a bug about gofmt), don't work around it.`},
- {User: users[2], Tags: []*Tag{tags[3]}, Title: "Commentary", Content: `Go provides C-style /* */ block comments and C++-style // line comments. Line comments are the norm; block comments appear mostly as package comments, but are useful within an expression or to disable large swaths of code.
-The program—and web server—godoc processes Go source files to extract documentation about the contents of the package. Comments that appear before top-level declarations, with no intervening newlines, are extracted along with the declaration to serve as explanatory text for the item. The nature and style of these comments determines the quality of the documentation godoc produces.`},
- }
-
- comments := []*Comment{
- {Post: posts[0], Content: "a comment"},
- {Post: posts[1], Content: "yes"},
- {Post: posts[1]},
- {Post: posts[1]},
- {Post: posts[2]},
- {Post: posts[2]},
- }
-
- for _, tag := range tags {
- id, err := dORM.Insert(tag)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
- }
-
- for _, post := range posts {
- id, err := dORM.Insert(post)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
-
- num := len(post.Tags)
- if num > 0 {
- nums, err := dORM.QueryM2M(post, "tags").Add(post.Tags)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(nums, num))
- }
- }
-
- for _, comment := range comments {
- id, err := dORM.Insert(comment)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
- }
-
- permissions := []*Permission{
- {Name: "writePosts"},
- {Name: "readComments"},
- {Name: "readPosts"},
- }
-
- groups := []*Group{
- {
- Name: "admins",
- Permissions: []*Permission{permissions[0], permissions[1], permissions[2]},
- },
- {
- Name: "users",
- Permissions: []*Permission{permissions[1], permissions[2]},
- },
- }
-
- for _, permission := range permissions {
- id, err := dORM.Insert(permission)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
- }
-
- for _, group := range groups {
- _, err := dORM.Insert(group)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
-
- num := len(group.Permissions)
- if num > 0 {
- nums, err := dORM.QueryM2M(group, "permissions").Add(group.Permissions)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(nums, num))
- }
- }
-
-}
-
-func TestCustomField(t *testing.T) {
- user := User{ID: 2}
- err := dORM.Read(&user)
- throwFailNow(t, err)
-
- user.Langs = append(user.Langs, "zh-CN", "en-US")
- user.Extra.Name = "beego"
- user.Extra.Data = "orm"
- _, err = dORM.Update(&user, "Langs", "Extra")
- throwFailNow(t, err)
-
- user = User{ID: 2}
- err = dORM.Read(&user)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(len(user.Langs), 2))
- throwFailNow(t, AssertIs(user.Langs[0], "zh-CN"))
- throwFailNow(t, AssertIs(user.Langs[1], "en-US"))
-
- throwFailNow(t, AssertIs(user.Extra.Name, "beego"))
- throwFailNow(t, AssertIs(user.Extra.Data, "orm"))
-}
-
-func TestExpr(t *testing.T) {
- user := &User{}
- qs := dORM.QueryTable(user)
- qs = dORM.QueryTable((*User)(nil))
- qs = dORM.QueryTable("User")
- qs = dORM.QueryTable("user")
- num, err := qs.Filter("UserName", "slene").Filter("user_name", "slene").Filter("profile__Age", 28).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("created", time.Now()).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-
- // num, err = qs.Filter("created", time.Now().Format(format_Date)).Count()
- // throwFail(t, err)
- // throwFail(t, AssertIs(num, 3))
-}
-
-func TestOperators(t *testing.T) {
- qs := dORM.QueryTable("user")
- num, err := qs.Filter("user_name", "slene").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name__exact", String("slene")).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name__exact", "slene").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name__iexact", "Slene").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name__contains", "e").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- var shouldNum int
-
- if IsSqlite || IsTidb {
- shouldNum = 2
- } else {
- shouldNum = 0
- }
-
- num, err = qs.Filter("user_name__contains", "E").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, shouldNum))
-
- num, err = qs.Filter("user_name__icontains", "E").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("user_name__icontains", "E").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("status__gt", 1).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("status__gte", 1).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-
- num, err = qs.Filter("status__lt", Uint(3)).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("status__lte", Int(3)).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-
- num, err = qs.Filter("user_name__startswith", "s").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- if IsSqlite || IsTidb {
- shouldNum = 1
- } else {
- shouldNum = 0
- }
-
- num, err = qs.Filter("user_name__startswith", "S").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, shouldNum))
-
- num, err = qs.Filter("user_name__istartswith", "S").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name__endswith", "e").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- if IsSqlite || IsTidb {
- shouldNum = 2
- } else {
- shouldNum = 0
- }
-
- num, err = qs.Filter("user_name__endswith", "E").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, shouldNum))
-
- num, err = qs.Filter("user_name__iendswith", "E").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("profile__isnull", true).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("status__in", 1, 2).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("status__in", []int{1, 2}).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- n1, n2 := 1, 2
- num, err = qs.Filter("status__in", []*int{&n1}, &n2).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("id__between", 2, 3).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Filter("id__between", []int{2, 3}).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.FilterRaw("user_name", "= 'slene'").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.FilterRaw("status", "IN (1, 2)").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.FilterRaw("profile_id", "IN (SELECT id FROM user_profile WHERE age=30)").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestSetCond(t *testing.T) {
- cond := NewCondition()
- cond1 := cond.And("profile__isnull", false).AndNot("status__in", 1).Or("profile__age__gt", 2000)
-
- qs := dORM.QueryTable("user")
- num, err := qs.SetCond(cond1).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- cond2 := cond.AndCond(cond1).OrCond(cond.And("user_name", "slene"))
- num, err = qs.SetCond(cond2).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- cond3 := cond.AndNotCond(cond.And("status__in", 1))
- num, err = qs.SetCond(cond3).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- cond4 := cond.And("user_name", "slene").OrNotCond(cond.And("user_name", "slene"))
- num, err = qs.SetCond(cond4).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-
- cond5 := cond.Raw("user_name", "= 'slene'").OrNotCond(cond.And("user_name", "slene"))
- num, err = qs.SetCond(cond5).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-}
-
-func TestLimit(t *testing.T) {
- var posts []*Post
- qs := dORM.QueryTable("post")
- num, err := qs.Limit(1).All(&posts)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Limit(-1).All(&posts)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 4))
-
- num, err = qs.Limit(-1, 2).All(&posts)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- num, err = qs.Limit(0, 2).All(&posts)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-}
-
-func TestOffset(t *testing.T) {
- var posts []*Post
- qs := dORM.QueryTable("post")
- num, err := qs.Limit(1).Offset(2).All(&posts)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Offset(2).All(&posts)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-}
-
-func TestOrderBy(t *testing.T) {
- qs := dORM.QueryTable("user")
- num, err := qs.OrderBy("-status").Filter("user_name", "nobody").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.OrderBy("status").Filter("user_name", "slene").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.OrderBy("-profile__age").Filter("user_name", "astaxie").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestAll(t *testing.T) {
- var users []*User
- qs := dORM.QueryTable("user")
- num, err := qs.OrderBy("Id").All(&users)
- throwFail(t, err)
- throwFailNow(t, AssertIs(num, 3))
-
- throwFail(t, AssertIs(users[0].UserName, "slene"))
- throwFail(t, AssertIs(users[1].UserName, "astaxie"))
- throwFail(t, AssertIs(users[2].UserName, "nobody"))
-
- var users2 []User
- qs = dORM.QueryTable("user")
- num, err = qs.OrderBy("Id").All(&users2)
- throwFail(t, err)
- throwFailNow(t, AssertIs(num, 3))
-
- throwFailNow(t, AssertIs(users2[0].UserName, "slene"))
- throwFailNow(t, AssertIs(users2[1].UserName, "astaxie"))
- throwFailNow(t, AssertIs(users2[2].UserName, "nobody"))
-
- qs = dORM.QueryTable("user")
- num, err = qs.OrderBy("Id").RelatedSel().All(&users2, "UserName")
- throwFail(t, err)
- throwFailNow(t, AssertIs(num, 3))
- throwFailNow(t, AssertIs(len(users2), 3))
- throwFailNow(t, AssertIs(users2[0].UserName, "slene"))
- throwFailNow(t, AssertIs(users2[1].UserName, "astaxie"))
- throwFailNow(t, AssertIs(users2[2].UserName, "nobody"))
- throwFailNow(t, AssertIs(users2[0].ID, 0))
- throwFailNow(t, AssertIs(users2[1].ID, 0))
- throwFailNow(t, AssertIs(users2[2].ID, 0))
- throwFailNow(t, AssertIs(users2[0].Profile == nil, false))
- throwFailNow(t, AssertIs(users2[1].Profile == nil, false))
- throwFailNow(t, AssertIs(users2[2].Profile == nil, true))
-
- qs = dORM.QueryTable("user")
- num, err = qs.Filter("user_name", "nothing").All(&users)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 0))
-
- var users3 []*User
- qs = dORM.QueryTable("user")
- num, err = qs.Filter("user_name", "nothing").All(&users3)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 0))
- throwFailNow(t, AssertIs(users3 == nil, false))
-}
-
-func TestOne(t *testing.T) {
- var user User
- qs := dORM.QueryTable("user")
- err := qs.One(&user)
- throwFail(t, err)
-
- user = User{}
- err = qs.OrderBy("Id").Limit(1).One(&user)
- throwFailNow(t, err)
- throwFail(t, AssertIs(user.UserName, "slene"))
- throwFail(t, AssertNot(err, ErrMultiRows))
-
- user = User{}
- err = qs.OrderBy("-Id").Limit(100).One(&user)
- throwFailNow(t, err)
- throwFail(t, AssertIs(user.UserName, "nobody"))
- throwFail(t, AssertNot(err, ErrMultiRows))
-
- err = qs.Filter("user_name", "nothing").One(&user)
- throwFail(t, AssertIs(err, ErrNoRows))
-
-}
-
-func TestValues(t *testing.T) {
- var maps []Params
- qs := dORM.QueryTable("user")
-
- num, err := qs.OrderBy("Id").Values(&maps)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- if num == 3 {
- throwFail(t, AssertIs(maps[0]["UserName"], "slene"))
- throwFail(t, AssertIs(maps[2]["Profile"], nil))
- }
-
- num, err = qs.OrderBy("Id").Values(&maps, "UserName", "Profile__Age")
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- if num == 3 {
- throwFail(t, AssertIs(maps[0]["UserName"], "slene"))
- throwFail(t, AssertIs(maps[0]["Profile__Age"], 28))
- throwFail(t, AssertIs(maps[2]["Profile__Age"], nil))
- }
-
- num, err = qs.Filter("UserName", "slene").Values(&maps)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestValuesList(t *testing.T) {
- var list []ParamsList
- qs := dORM.QueryTable("user")
-
- num, err := qs.OrderBy("Id").ValuesList(&list)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- if num == 3 {
- throwFail(t, AssertIs(list[0][1], "slene"))
- throwFail(t, AssertIs(list[2][9], nil))
- }
-
- num, err = qs.OrderBy("Id").ValuesList(&list, "UserName", "Profile__Age")
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- if num == 3 {
- throwFail(t, AssertIs(list[0][0], "slene"))
- throwFail(t, AssertIs(list[0][1], 28))
- throwFail(t, AssertIs(list[2][1], nil))
- }
-}
-
-func TestValuesFlat(t *testing.T) {
- var list ParamsList
- qs := dORM.QueryTable("user")
-
- num, err := qs.OrderBy("id").ValuesFlat(&list, "UserName")
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- if num == 3 {
- throwFail(t, AssertIs(list[0], "slene"))
- throwFail(t, AssertIs(list[1], "astaxie"))
- throwFail(t, AssertIs(list[2], "nobody"))
- }
-}
-
-func TestRelatedSel(t *testing.T) {
- if IsTidb {
- // Skip it. TiDB does not support relation now.
- return
- }
- qs := dORM.QueryTable("user")
- num, err := qs.Filter("profile__age", 28).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("profile__age__gt", 28).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("profile__user__profile__age__gt", 28).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- var user User
- err = qs.Filter("user_name", "slene").RelatedSel("profile").One(&user)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- throwFail(t, AssertNot(user.Profile, nil))
- if user.Profile != nil {
- throwFail(t, AssertIs(user.Profile.Age, 28))
- }
-
- err = qs.Filter("user_name", "slene").RelatedSel().One(&user)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- throwFail(t, AssertNot(user.Profile, nil))
- if user.Profile != nil {
- throwFail(t, AssertIs(user.Profile.Age, 28))
- }
-
- err = qs.Filter("user_name", "nobody").RelatedSel("profile").One(&user)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- throwFail(t, AssertIs(user.Profile, nil))
-
- qs = dORM.QueryTable("user_profile")
- num, err = qs.Filter("user__username", "slene").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- var posts []*Post
- qs = dORM.QueryTable("post")
- num, err = qs.RelatedSel().All(&posts)
- throwFail(t, err)
- throwFailNow(t, AssertIs(num, 4))
-
- throwFailNow(t, AssertIs(posts[0].User.UserName, "slene"))
- throwFailNow(t, AssertIs(posts[1].User.UserName, "astaxie"))
- throwFailNow(t, AssertIs(posts[2].User.UserName, "astaxie"))
- throwFailNow(t, AssertIs(posts[3].User.UserName, "nobody"))
-}
-
-func TestReverseQuery(t *testing.T) {
- var profile Profile
- err := dORM.QueryTable("user_profile").Filter("User", 3).One(&profile)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(profile.Age, 30))
-
- profile = Profile{}
- err = dORM.QueryTable("user_profile").Filter("User__UserName", "astaxie").One(&profile)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(profile.Age, 30))
-
- var user User
- err = dORM.QueryTable("user").Filter("Posts__Title", "Examples").One(&user)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(user.UserName, "astaxie"))
-
- user = User{}
- err = dORM.QueryTable("user").Filter("Posts__User__UserName", "astaxie").Limit(1).One(&user)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(user.UserName, "astaxie"))
-
- user = User{}
- err = dORM.QueryTable("user").Filter("Posts__User__UserName", "astaxie").RelatedSel().Limit(1).One(&user)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(user.UserName, "astaxie"))
- throwFailNow(t, AssertIs(user.Profile == nil, false))
- throwFailNow(t, AssertIs(user.Profile.Age, 30))
-
- var posts []*Post
- num, err := dORM.QueryTable("post").Filter("Tags__Tag__Name", "golang").All(&posts)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
- throwFailNow(t, AssertIs(posts[0].Title, "Introduction"))
-
- posts = []*Post{}
- num, err = dORM.QueryTable("post").Filter("Tags__Tag__Name", "golang").Filter("User__UserName", "slene").All(&posts)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(posts[0].Title, "Introduction"))
-
- posts = []*Post{}
- num, err = dORM.QueryTable("post").Filter("Tags__Tag__Name", "golang").
- Filter("User__UserName", "slene").RelatedSel().All(&posts)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(posts[0].User == nil, false))
- throwFailNow(t, AssertIs(posts[0].User.UserName, "slene"))
-
- var tags []*Tag
- num, err = dORM.QueryTable("tag").Filter("Posts__Post__Title", "Introduction").All(&tags)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(tags[0].Name, "golang"))
-
- tags = []*Tag{}
- num, err = dORM.QueryTable("tag").Filter("Posts__Post__Title", "Introduction").
- Filter("BestPost__User__UserName", "astaxie").All(&tags)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(tags[0].Name, "golang"))
-
- tags = []*Tag{}
- num, err = dORM.QueryTable("tag").Filter("Posts__Post__Title", "Introduction").
- Filter("BestPost__User__UserName", "astaxie").RelatedSel().All(&tags)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(tags[0].Name, "golang"))
- throwFailNow(t, AssertIs(tags[0].BestPost == nil, false))
- throwFailNow(t, AssertIs(tags[0].BestPost.Title, "Examples"))
- throwFailNow(t, AssertIs(tags[0].BestPost.User == nil, false))
- throwFailNow(t, AssertIs(tags[0].BestPost.User.UserName, "astaxie"))
-}
-
-func TestLoadRelated(t *testing.T) {
- // load reverse foreign key
- user := User{ID: 3}
-
- err := dORM.Read(&user)
- throwFailNow(t, err)
-
- num, err := dORM.LoadRelated(&user, "Posts")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
- throwFailNow(t, AssertIs(len(user.Posts), 2))
- throwFailNow(t, AssertIs(user.Posts[0].User.ID, 3))
-
- num, err = dORM.LoadRelated(&user, "Posts", true)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
- throwFailNow(t, AssertIs(len(user.Posts), 2))
- throwFailNow(t, AssertIs(user.Posts[0].User.UserName, "astaxie"))
-
- num, err = dORM.LoadRelated(&user, "Posts", true, 1)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(len(user.Posts), 1))
-
- num, err = dORM.LoadRelated(&user, "Posts", true, 0, 0, "-Id")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
- throwFailNow(t, AssertIs(len(user.Posts), 2))
- throwFailNow(t, AssertIs(user.Posts[0].Title, "Formatting"))
-
- num, err = dORM.LoadRelated(&user, "Posts", true, 1, 1, "Id")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(len(user.Posts), 1))
- throwFailNow(t, AssertIs(user.Posts[0].Title, "Formatting"))
-
- // load reverse one to one
- profile := Profile{ID: 3}
- profile.BestPost = &Post{ID: 2}
- num, err = dORM.Update(&profile, "BestPost")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- err = dORM.Read(&profile)
- throwFailNow(t, err)
-
- num, err = dORM.LoadRelated(&profile, "User")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(profile.User == nil, false))
- throwFailNow(t, AssertIs(profile.User.UserName, "astaxie"))
-
- num, err = dORM.LoadRelated(&profile, "User", true)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(profile.User == nil, false))
- throwFailNow(t, AssertIs(profile.User.UserName, "astaxie"))
- throwFailNow(t, AssertIs(profile.User.Profile.Age, profile.Age))
-
- // load rel one to one
- err = dORM.Read(&user)
- throwFailNow(t, err)
-
- num, err = dORM.LoadRelated(&user, "Profile")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(user.Profile == nil, false))
- throwFailNow(t, AssertIs(user.Profile.Age, 30))
-
- num, err = dORM.LoadRelated(&user, "Profile", true)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(user.Profile == nil, false))
- throwFailNow(t, AssertIs(user.Profile.Age, 30))
- throwFailNow(t, AssertIs(user.Profile.BestPost == nil, false))
- throwFailNow(t, AssertIs(user.Profile.BestPost.Title, "Examples"))
-
- post := Post{ID: 2}
-
- // load rel foreign key
- err = dORM.Read(&post)
- throwFailNow(t, err)
-
- num, err = dORM.LoadRelated(&post, "User")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(post.User == nil, false))
- throwFailNow(t, AssertIs(post.User.UserName, "astaxie"))
-
- num, err = dORM.LoadRelated(&post, "User", true)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(post.User == nil, false))
- throwFailNow(t, AssertIs(post.User.UserName, "astaxie"))
- throwFailNow(t, AssertIs(post.User.Profile == nil, false))
- throwFailNow(t, AssertIs(post.User.Profile.Age, 30))
-
- // load rel m2m
- post = Post{ID: 2}
-
- err = dORM.Read(&post)
- throwFailNow(t, err)
-
- num, err = dORM.LoadRelated(&post, "Tags")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
- throwFailNow(t, AssertIs(len(post.Tags), 2))
- throwFailNow(t, AssertIs(post.Tags[0].Name, "golang"))
-
- num, err = dORM.LoadRelated(&post, "Tags", true)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
- throwFailNow(t, AssertIs(len(post.Tags), 2))
- throwFailNow(t, AssertIs(post.Tags[0].Name, "golang"))
- throwFailNow(t, AssertIs(post.Tags[0].BestPost == nil, false))
- throwFailNow(t, AssertIs(post.Tags[0].BestPost.User.UserName, "astaxie"))
-
- // load reverse m2m
- tag := Tag{ID: 1}
-
- err = dORM.Read(&tag)
- throwFailNow(t, err)
-
- num, err = dORM.LoadRelated(&tag, "Posts")
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
- throwFailNow(t, AssertIs(tag.Posts[0].Title, "Introduction"))
- throwFailNow(t, AssertIs(tag.Posts[0].User.ID, 2))
- throwFailNow(t, AssertIs(tag.Posts[0].User.Profile == nil, true))
-
- num, err = dORM.LoadRelated(&tag, "Posts", true)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
- throwFailNow(t, AssertIs(tag.Posts[0].Title, "Introduction"))
- throwFailNow(t, AssertIs(tag.Posts[0].User.ID, 2))
- throwFailNow(t, AssertIs(tag.Posts[0].User.UserName, "slene"))
-}
-
-func TestQueryM2M(t *testing.T) {
- post := Post{ID: 4}
- m2m := dORM.QueryM2M(&post, "Tags")
-
- tag1 := []*Tag{{Name: "TestTag1"}, {Name: "TestTag2"}}
- tag2 := &Tag{Name: "TestTag3"}
- tag3 := []interface{}{&Tag{Name: "TestTag4"}}
-
- tags := []interface{}{tag1[0], tag1[1], tag2, tag3[0]}
-
- for _, tag := range tags {
- _, err := dORM.Insert(tag)
- throwFailNow(t, err)
- }
-
- num, err := m2m.Add(tag1)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
-
- num, err = m2m.Add(tag2)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- num, err = m2m.Add(tag3)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 5))
-
- num, err = m2m.Remove(tag3)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 4))
-
- exist := m2m.Exist(tag2)
- throwFailNow(t, AssertIs(exist, true))
-
- num, err = m2m.Remove(tag2)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- exist = m2m.Exist(tag2)
- throwFailNow(t, AssertIs(exist, false))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
-
- num, err = m2m.Clear()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 0))
-
- tag := Tag{Name: "test"}
- _, err = dORM.Insert(&tag)
- throwFailNow(t, err)
-
- m2m = dORM.QueryM2M(&tag, "Posts")
-
- post1 := []*Post{{Title: "TestPost1"}, {Title: "TestPost2"}}
- post2 := &Post{Title: "TestPost3"}
- post3 := []interface{}{&Post{Title: "TestPost4"}}
-
- posts := []interface{}{post1[0], post1[1], post2, post3[0]}
-
- for _, post := range posts {
- p := post.(*Post)
- p.User = &User{ID: 1}
- _, err := dORM.Insert(post)
- throwFailNow(t, err)
- }
-
- num, err = m2m.Add(post1)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
-
- num, err = m2m.Add(post2)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- num, err = m2m.Add(post3)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 4))
-
- num, err = m2m.Remove(post3)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
-
- exist = m2m.Exist(post2)
- throwFailNow(t, AssertIs(exist, true))
-
- num, err = m2m.Remove(post2)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- exist = m2m.Exist(post2)
- throwFailNow(t, AssertIs(exist, false))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
-
- num, err = m2m.Clear()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
-
- num, err = m2m.Count()
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 0))
-
- num, err = dORM.Delete(&tag)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-}
-
-func TestQueryRelate(t *testing.T) {
- // post := &Post{Id: 2}
-
- // qs := dORM.QueryRelate(post, "Tags")
- // num, err := qs.Count()
- // throwFailNow(t, err)
- // throwFailNow(t, AssertIs(num, 2))
-
- // var tags []*Tag
- // num, err = qs.All(&tags)
- // throwFailNow(t, err)
- // throwFailNow(t, AssertIs(num, 2))
- // throwFailNow(t, AssertIs(tags[0].Name, "golang"))
-
- // num, err = dORM.QueryTable("Tag").Filter("Posts__Post", 2).Count()
- // throwFailNow(t, err)
- // throwFailNow(t, AssertIs(num, 2))
-}
-
-func TestPkManyRelated(t *testing.T) {
- permission := &Permission{Name: "readPosts"}
- err := dORM.Read(permission, "Name")
- throwFailNow(t, err)
-
- var groups []*Group
- qs := dORM.QueryTable("Group")
- num, err := qs.Filter("Permissions__Permission", permission.ID).All(&groups)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
-}
-
-func TestPrepareInsert(t *testing.T) {
- qs := dORM.QueryTable("user")
- i, err := qs.PrepareInsert()
- throwFailNow(t, err)
-
- var user User
- user.UserName = "testing1"
- num, err := i.Insert(&user)
- throwFail(t, err)
- throwFail(t, AssertIs(num > 0, true))
-
- user.UserName = "testing2"
- num, err = i.Insert(&user)
- throwFail(t, err)
- throwFail(t, AssertIs(num > 0, true))
-
- num, err = qs.Filter("user_name__in", "testing1", "testing2").Delete()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 2))
-
- err = i.Close()
- throwFail(t, err)
- err = i.Close()
- throwFail(t, AssertIs(err, ErrStmtClosed))
-}
-
-func TestRawExec(t *testing.T) {
- Q := dDbBaser.TableQuote()
-
- query := fmt.Sprintf("UPDATE %suser%s SET %suser_name%s = ? WHERE %suser_name%s = ?", Q, Q, Q, Q, Q, Q)
- res, err := dORM.Raw(query, "testing", "slene").Exec()
- throwFail(t, err)
- num, err := res.RowsAffected()
- throwFail(t, AssertIs(num, 1), err)
-
- res, err = dORM.Raw(query, "slene", "testing").Exec()
- throwFail(t, err)
- num, err = res.RowsAffected()
- throwFail(t, AssertIs(num, 1), err)
-}
-
-func TestRawQueryRow(t *testing.T) {
- var (
- Boolean bool
- Char string
- Text string
- Time time.Time
- Date time.Time
- DateTime time.Time
- Byte byte
- Rune rune
- Int int
- Int8 int
- Int16 int16
- Int32 int32
- Int64 int64
- Uint uint
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- Float32 float32
- Float64 float64
- Decimal float64
- )
-
- dataValues := make(map[string]interface{}, len(DataValues))
-
- for k, v := range DataValues {
- dataValues[strings.ToLower(k)] = v
- }
-
- Q := dDbBaser.TableQuote()
-
- cols := []string{
- "id", "boolean", "char", "text", "time", "date", "datetime", "byte", "rune", "int", "int8", "int16", "int32",
- "int64", "uint", "uint8", "uint16", "uint32", "uint64", "float32", "float64", "decimal",
- }
- sep := fmt.Sprintf("%s, %s", Q, Q)
- query := fmt.Sprintf("SELECT %s%s%s FROM data WHERE id = ?", Q, strings.Join(cols, sep), Q)
- var id int
- values := []interface{}{
- &id, &Boolean, &Char, &Text, &Time, &Date, &DateTime, &Byte, &Rune, &Int, &Int8, &Int16, &Int32,
- &Int64, &Uint, &Uint8, &Uint16, &Uint32, &Uint64, &Float32, &Float64, &Decimal,
- }
- err := dORM.Raw(query, 1).QueryRow(values...)
- throwFailNow(t, err)
- for i, col := range cols {
- vu := values[i]
- v := reflect.ValueOf(vu).Elem().Interface()
- switch col {
- case "id":
- throwFail(t, AssertIs(id, 1))
- case "time":
- v = v.(time.Time).In(DefaultTimeLoc)
- value := dataValues[col].(time.Time).In(DefaultTimeLoc)
- throwFail(t, AssertIs(v, value, testTime))
- case "date":
- v = v.(time.Time).In(DefaultTimeLoc)
- value := dataValues[col].(time.Time).In(DefaultTimeLoc)
- throwFail(t, AssertIs(v, value, testDate))
- case "datetime":
- v = v.(time.Time).In(DefaultTimeLoc)
- value := dataValues[col].(time.Time).In(DefaultTimeLoc)
- throwFail(t, AssertIs(v, value, testDateTime))
- default:
- throwFail(t, AssertIs(v, dataValues[col]))
- }
- }
-
- var (
- uid int
- status *int
- pid *int
- )
-
- cols = []string{
- "id", "Status", "profile_id",
- }
- query = fmt.Sprintf("SELECT %s%s%s FROM %suser%s WHERE id = ?", Q, strings.Join(cols, sep), Q, Q, Q)
- err = dORM.Raw(query, 4).QueryRow(&uid, &status, &pid)
- throwFail(t, err)
- throwFail(t, AssertIs(uid, 4))
- throwFail(t, AssertIs(*status, 3))
- throwFail(t, AssertIs(pid, nil))
-
- // test for sql.Null* fields
- nData := &DataNull{
- NullString: sql.NullString{String: "test sql.null", Valid: true},
- NullBool: sql.NullBool{Bool: true, Valid: true},
- NullInt64: sql.NullInt64{Int64: 42, Valid: true},
- NullFloat64: sql.NullFloat64{Float64: 42.42, Valid: true},
- }
- newId, err := dORM.Insert(nData)
- throwFailNow(t, err)
-
- var nd *DataNull
- query = fmt.Sprintf("SELECT * FROM %sdata_null%s where id=?", Q, Q)
- err = dORM.Raw(query, newId).QueryRow(&nd)
- throwFailNow(t, err)
-
- throwFailNow(t, AssertNot(nd, nil))
- throwFail(t, AssertIs(nd.NullBool.Valid, true))
- throwFail(t, AssertIs(nd.NullBool.Bool, true))
- throwFail(t, AssertIs(nd.NullString.Valid, true))
- throwFail(t, AssertIs(nd.NullString.String, "test sql.null"))
- throwFail(t, AssertIs(nd.NullInt64.Valid, true))
- throwFail(t, AssertIs(nd.NullInt64.Int64, 42))
- throwFail(t, AssertIs(nd.NullFloat64.Valid, true))
- throwFail(t, AssertIs(nd.NullFloat64.Float64, 42.42))
-}
-
-// user_profile table
-type userProfile struct {
- User
- Age int
- Money float64
-}
-
-func TestQueryRows(t *testing.T) {
- Q := dDbBaser.TableQuote()
-
- var datas []*Data
-
- query := fmt.Sprintf("SELECT * FROM %sdata%s", Q, Q)
- num, err := dORM.Raw(query).QueryRows(&datas)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(len(datas), 1))
-
- ind := reflect.Indirect(reflect.ValueOf(datas[0]))
-
- for name, value := range DataValues {
- e := ind.FieldByName(name)
- vu := e.Interface()
- switch name {
- case "Time":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testTime)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testTime)
- case "Date":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testDate)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testDate)
- case "DateTime":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testDateTime)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testDateTime)
- }
- throwFail(t, AssertIs(vu == value, true), value, vu)
- }
-
- var datas2 []Data
-
- query = fmt.Sprintf("SELECT * FROM %sdata%s", Q, Q)
- num, err = dORM.Raw(query).QueryRows(&datas2)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
- throwFailNow(t, AssertIs(len(datas2), 1))
-
- ind = reflect.Indirect(reflect.ValueOf(datas2[0]))
-
- for name, value := range DataValues {
- e := ind.FieldByName(name)
- vu := e.Interface()
- switch name {
- case "Time":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testTime)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testTime)
- case "Date":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testDate)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testDate)
- case "DateTime":
- vu = vu.(time.Time).In(DefaultTimeLoc).Format(testDateTime)
- value = value.(time.Time).In(DefaultTimeLoc).Format(testDateTime)
- }
- throwFail(t, AssertIs(vu == value, true), value, vu)
- }
-
- var ids []int
- var usernames []string
- query = fmt.Sprintf("SELECT %sid%s, %suser_name%s FROM %suser%s ORDER BY %sid%s ASC", Q, Q, Q, Q, Q, Q, Q, Q)
- num, err = dORM.Raw(query).QueryRows(&ids, &usernames)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 3))
- throwFailNow(t, AssertIs(len(ids), 3))
- throwFailNow(t, AssertIs(ids[0], 2))
- throwFailNow(t, AssertIs(usernames[0], "slene"))
- throwFailNow(t, AssertIs(ids[1], 3))
- throwFailNow(t, AssertIs(usernames[1], "astaxie"))
- throwFailNow(t, AssertIs(ids[2], 4))
- throwFailNow(t, AssertIs(usernames[2], "nobody"))
-
- // test query rows by nested struct
- var l []userProfile
- query = fmt.Sprintf("SELECT * FROM %suser_profile%s LEFT JOIN %suser%s ON %suser_profile%s.%sid%s = %suser%s.%sid%s", Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q)
- num, err = dORM.Raw(query).QueryRows(&l)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 2))
- throwFailNow(t, AssertIs(len(l), 2))
- throwFailNow(t, AssertIs(l[0].UserName, "slene"))
- throwFailNow(t, AssertIs(l[0].Age, 28))
- throwFailNow(t, AssertIs(l[1].UserName, "astaxie"))
- throwFailNow(t, AssertIs(l[1].Age, 30))
-
- // test for sql.Null* fields
- nData := &DataNull{
- NullString: sql.NullString{String: "test sql.null", Valid: true},
- NullBool: sql.NullBool{Bool: true, Valid: true},
- NullInt64: sql.NullInt64{Int64: 42, Valid: true},
- NullFloat64: sql.NullFloat64{Float64: 42.42, Valid: true},
- }
- newId, err := dORM.Insert(nData)
- throwFailNow(t, err)
-
- var nDataList []*DataNull
- query = fmt.Sprintf("SELECT * FROM %sdata_null%s where id=?", Q, Q)
- num, err = dORM.Raw(query, newId).QueryRows(&nDataList)
- throwFailNow(t, err)
- throwFailNow(t, AssertIs(num, 1))
-
- nd := nDataList[0]
- throwFailNow(t, AssertNot(nd, nil))
- throwFail(t, AssertIs(nd.NullBool.Valid, true))
- throwFail(t, AssertIs(nd.NullBool.Bool, true))
- throwFail(t, AssertIs(nd.NullString.Valid, true))
- throwFail(t, AssertIs(nd.NullString.String, "test sql.null"))
- throwFail(t, AssertIs(nd.NullInt64.Valid, true))
- throwFail(t, AssertIs(nd.NullInt64.Int64, 42))
- throwFail(t, AssertIs(nd.NullFloat64.Valid, true))
- throwFail(t, AssertIs(nd.NullFloat64.Float64, 42.42))
-}
-
-func TestRawValues(t *testing.T) {
- Q := dDbBaser.TableQuote()
-
- var maps []Params
- query := fmt.Sprintf("SELECT %suser_name%s FROM %suser%s WHERE %sStatus%s = ?", Q, Q, Q, Q, Q, Q)
- num, err := dORM.Raw(query, 1).Values(&maps)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- if num == 1 {
- throwFail(t, AssertIs(maps[0]["user_name"], "slene"))
- }
-
- var lists []ParamsList
- num, err = dORM.Raw(query, 1).ValuesList(&lists)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- if num == 1 {
- throwFail(t, AssertIs(lists[0][0], "slene"))
- }
-
- query = fmt.Sprintf("SELECT %sprofile_id%s FROM %suser%s ORDER BY %sid%s ASC", Q, Q, Q, Q, Q, Q)
- var list ParamsList
- num, err = dORM.Raw(query).ValuesFlat(&list)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- if num == 3 {
- throwFail(t, AssertIs(list[0], "2"))
- throwFail(t, AssertIs(list[1], "3"))
- throwFail(t, AssertIs(list[2], nil))
- }
-}
-
-func TestRawPrepare(t *testing.T) {
- switch {
- case IsMysql || IsSqlite:
-
- pre, err := dORM.Raw("INSERT INTO tag (name) VALUES (?)").Prepare()
- throwFail(t, err)
- if pre != nil {
- r, err := pre.Exec("name1")
- throwFail(t, err)
-
- tid, err := r.LastInsertId()
- throwFail(t, err)
- throwFail(t, AssertIs(tid > 0, true))
-
- r, err = pre.Exec("name2")
- throwFail(t, err)
-
- id, err := r.LastInsertId()
- throwFail(t, err)
- throwFail(t, AssertIs(id, tid+1))
-
- r, err = pre.Exec("name3")
- throwFail(t, err)
-
- id, err = r.LastInsertId()
- throwFail(t, err)
- throwFail(t, AssertIs(id, tid+2))
-
- err = pre.Close()
- throwFail(t, err)
-
- res, err := dORM.Raw("DELETE FROM tag WHERE name IN (?, ?, ?)", []string{"name1", "name2", "name3"}).Exec()
- throwFail(t, err)
-
- num, err := res.RowsAffected()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- }
-
- case IsPostgres:
-
- pre, err := dORM.Raw(`INSERT INTO "tag" ("name") VALUES (?) RETURNING "id"`).Prepare()
- throwFail(t, err)
- if pre != nil {
- _, err := pre.Exec("name1")
- throwFail(t, err)
-
- _, err = pre.Exec("name2")
- throwFail(t, err)
-
- _, err = pre.Exec("name3")
- throwFail(t, err)
-
- err = pre.Close()
- throwFail(t, err)
-
- res, err := dORM.Raw(`DELETE FROM "tag" WHERE "name" IN (?, ?, ?)`, []string{"name1", "name2", "name3"}).Exec()
- throwFail(t, err)
-
- if err == nil {
- num, err := res.RowsAffected()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
- }
- }
- }
-}
-
-func TestUpdate(t *testing.T) {
- qs := dORM.QueryTable("user")
- num, err := qs.Filter("user_name", "slene").Filter("is_staff", false).Update(Params{
- "is_staff": true,
- "is_active": true,
- })
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- // with join
- num, err = qs.Filter("user_name", "slene").Filter("profile__age", 28).Filter("is_staff", true).Update(Params{
- "is_staff": false,
- })
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name", "slene").Update(Params{
- "Nums": ColValue(ColAdd, 100),
- })
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name", "slene").Update(Params{
- "Nums": ColValue(ColMinus, 50),
- })
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name", "slene").Update(Params{
- "Nums": ColValue(ColMultiply, 3),
- })
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = qs.Filter("user_name", "slene").Update(Params{
- "Nums": ColValue(ColExcept, 5),
- })
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- user := User{UserName: "slene"}
- err = dORM.Read(&user, "UserName")
- throwFail(t, err)
- throwFail(t, AssertIs(user.Nums, 30))
-}
-
-func TestDelete(t *testing.T) {
- qs := dORM.QueryTable("user_profile")
- num, err := qs.Filter("user__user_name", "slene").Delete()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- qs = dORM.QueryTable("user")
- num, err = qs.Filter("user_name", "slene").Filter("profile__isnull", true).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- qs = dORM.QueryTable("comment")
- num, err = qs.Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 6))
-
- qs = dORM.QueryTable("post")
- num, err = qs.Filter("Id", 3).Delete()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- qs = dORM.QueryTable("comment")
- num, err = qs.Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 4))
-
- qs = dORM.QueryTable("comment")
- num, err = qs.Filter("Post__User", 3).Delete()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-
- qs = dORM.QueryTable("comment")
- num, err = qs.Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestTransaction(t *testing.T) {
- // this test worked when database support transaction
-
- o := NewOrm()
- err := o.Begin()
- throwFail(t, err)
-
- var names = []string{"1", "2", "3"}
-
- var tag Tag
- tag.Name = names[0]
- id, err := o.Insert(&tag)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
-
- num, err := o.QueryTable("tag").Filter("name", "golang").Update(Params{"name": names[1]})
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- switch {
- case IsMysql || IsSqlite:
- res, err := o.Raw("INSERT INTO tag (name) VALUES (?)", names[2]).Exec()
- throwFail(t, err)
- if err == nil {
- id, err = res.LastInsertId()
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
- }
- }
-
- err = o.Rollback()
- throwFail(t, err)
-
- num, err = o.QueryTable("tag").Filter("name__in", names).Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 0))
-
- err = o.Begin()
- throwFail(t, err)
-
- tag.Name = "commit"
- id, err = o.Insert(&tag)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
-
- o.Commit()
- throwFail(t, err)
-
- num, err = o.QueryTable("tag").Filter("name", "commit").Delete()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
-}
-
-func TestTransactionIsolationLevel(t *testing.T) {
- // this test worked when database support transaction isolation level
- if IsSqlite {
- return
- }
-
- o1 := NewOrm()
- o2 := NewOrm()
-
- // start two transaction with isolation level repeatable read
- err := o1.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
- throwFail(t, err)
- err = o2.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
- throwFail(t, err)
-
- // o1 insert tag
- var tag Tag
- tag.Name = "test-transaction"
- id, err := o1.Insert(&tag)
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
-
- // o2 query tag table, no result
- num, err := o2.QueryTable("tag").Filter("name", "test-transaction").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 0))
-
- // o1 commit
- o1.Commit()
-
- // o2 query tag table, still no result
- num, err = o2.QueryTable("tag").Filter("name", "test-transaction").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 0))
-
- // o2 commit and query tag table, get the result
- o2.Commit()
- num, err = o2.QueryTable("tag").Filter("name", "test-transaction").Count()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-
- num, err = o1.QueryTable("tag").Filter("name", "test-transaction").Delete()
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestBeginTxWithContextCanceled(t *testing.T) {
- o := NewOrm()
- ctx, cancel := context.WithCancel(context.Background())
- o.BeginTx(ctx, nil)
- id, err := o.Insert(&Tag{Name: "test-context"})
- throwFail(t, err)
- throwFail(t, AssertIs(id > 0, true))
-
- // cancel the context before commit to make it error
- cancel()
- err = o.Commit()
- throwFail(t, AssertIs(err, context.Canceled))
-}
-
-func TestReadOrCreate(t *testing.T) {
- u := &User{
- UserName: "Kyle",
- Email: "kylemcc@gmail.com",
- Password: "other_pass",
- Status: 7,
- IsStaff: false,
- IsActive: true,
- }
-
- created, pk, err := dORM.ReadOrCreate(u, "UserName")
- throwFail(t, err)
- throwFail(t, AssertIs(created, true))
- throwFail(t, AssertIs(u.ID, pk))
- throwFail(t, AssertIs(u.UserName, "Kyle"))
- throwFail(t, AssertIs(u.Email, "kylemcc@gmail.com"))
- throwFail(t, AssertIs(u.Password, "other_pass"))
- throwFail(t, AssertIs(u.Status, 7))
- throwFail(t, AssertIs(u.IsStaff, false))
- throwFail(t, AssertIs(u.IsActive, true))
- throwFail(t, AssertIs(u.Created.In(DefaultTimeLoc), u.Created.In(DefaultTimeLoc), testDate))
- throwFail(t, AssertIs(u.Updated.In(DefaultTimeLoc), u.Updated.In(DefaultTimeLoc), testDateTime))
-
- nu := &User{UserName: u.UserName, Email: "someotheremail@gmail.com"}
- created, pk, err = dORM.ReadOrCreate(nu, "UserName")
- throwFail(t, err)
- throwFail(t, AssertIs(created, false))
- throwFail(t, AssertIs(nu.ID, u.ID))
- throwFail(t, AssertIs(pk, u.ID))
- throwFail(t, AssertIs(nu.UserName, u.UserName))
- throwFail(t, AssertIs(nu.Email, u.Email)) // should contain the value in the table, not the one specified above
- throwFail(t, AssertIs(nu.Password, u.Password))
- throwFail(t, AssertIs(nu.Status, u.Status))
- throwFail(t, AssertIs(nu.IsStaff, u.IsStaff))
- throwFail(t, AssertIs(nu.IsActive, u.IsActive))
-
- dORM.Delete(u)
-}
-
-func TestInLine(t *testing.T) {
- name := "inline"
- email := "hello@go.com"
- inline := NewInLine()
- inline.Name = name
- inline.Email = email
-
- id, err := dORM.Insert(inline)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 1))
-
- il := NewInLine()
- il.ID = 1
- err = dORM.Read(il)
- throwFail(t, err)
-
- throwFail(t, AssertIs(il.Name, name))
- throwFail(t, AssertIs(il.Email, email))
- throwFail(t, AssertIs(il.Created.In(DefaultTimeLoc), inline.Created.In(DefaultTimeLoc), testDate))
- throwFail(t, AssertIs(il.Updated.In(DefaultTimeLoc), inline.Updated.In(DefaultTimeLoc), testDateTime))
-}
-
-func TestInLineOneToOne(t *testing.T) {
- name := "121"
- email := "121@go.com"
- inline := NewInLine()
- inline.Name = name
- inline.Email = email
-
- id, err := dORM.Insert(inline)
- throwFail(t, err)
- throwFail(t, AssertIs(id, 2))
-
- note := "one2one"
- il121 := NewInLineOneToOne()
- il121.Note = note
- il121.InLine = inline
- _, err = dORM.Insert(il121)
- throwFail(t, err)
- throwFail(t, AssertIs(il121.ID, 1))
-
- il := NewInLineOneToOne()
- err = dORM.QueryTable(il).Filter("Id", 1).RelatedSel().One(il)
-
- throwFail(t, err)
- throwFail(t, AssertIs(il.Note, note))
- throwFail(t, AssertIs(il.InLine.ID, id))
- throwFail(t, AssertIs(il.InLine.Name, name))
- throwFail(t, AssertIs(il.InLine.Email, email))
-
- rinline := NewInLine()
- err = dORM.QueryTable(rinline).Filter("InLineOneToOne__Id", 1).One(rinline)
-
- throwFail(t, err)
- throwFail(t, AssertIs(rinline.ID, id))
- throwFail(t, AssertIs(rinline.Name, name))
- throwFail(t, AssertIs(rinline.Email, email))
-}
-
-func TestIntegerPk(t *testing.T) {
- its := []IntegerPk{
- {ID: math.MinInt64, Value: "-"},
- {ID: 0, Value: "0"},
- {ID: math.MaxInt64, Value: "+"},
- }
-
- num, err := dORM.InsertMulti(len(its), its)
- throwFail(t, err)
- throwFail(t, AssertIs(num, len(its)))
-
- for _, intPk := range its {
- out := IntegerPk{ID: intPk.ID}
- err = dORM.Read(&out)
- throwFail(t, err)
- throwFail(t, AssertIs(out.Value, intPk.Value))
- }
-
- num, err = dORM.InsertMulti(1, []*IntegerPk{{
- ID: 1, Value: "ok",
- }})
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestInsertAuto(t *testing.T) {
- u := &User{
- UserName: "autoPre",
- Email: "autoPre@gmail.com",
- }
-
- id, err := dORM.Insert(u)
- throwFail(t, err)
-
- id += 100
- su := &User{
- ID: int(id),
- UserName: "auto",
- Email: "auto@gmail.com",
- }
-
- nid, err := dORM.Insert(su)
- throwFail(t, err)
- throwFail(t, AssertIs(nid, id))
-
- users := []User{
- {ID: int(id + 100), UserName: "auto_100"},
- {ID: int(id + 110), UserName: "auto_110"},
- {ID: int(id + 120), UserName: "auto_120"},
- }
- num, err := dORM.InsertMulti(100, users)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 3))
-
- u = &User{
- UserName: "auto_121",
- }
-
- nid, err = dORM.Insert(u)
- throwFail(t, err)
- throwFail(t, AssertIs(nid, id+120+1))
-}
-
-func TestUintPk(t *testing.T) {
- name := "go"
- u := &UintPk{
- ID: 8,
- Name: name,
- }
-
- created, _, err := dORM.ReadOrCreate(u, "ID")
- throwFail(t, err)
- throwFail(t, AssertIs(created, true))
- throwFail(t, AssertIs(u.Name, name))
-
- nu := &UintPk{ID: 8}
- created, pk, err := dORM.ReadOrCreate(nu, "ID")
- throwFail(t, err)
- throwFail(t, AssertIs(created, false))
- throwFail(t, AssertIs(nu.ID, u.ID))
- throwFail(t, AssertIs(pk, u.ID))
- throwFail(t, AssertIs(nu.Name, name))
-
- dORM.Delete(u)
-}
-
-func TestPtrPk(t *testing.T) {
- parent := &IntegerPk{ID: 10, Value: "10"}
-
- id, _ := dORM.Insert(parent)
- if !IsMysql {
- // MySql does not support last_insert_id in this case: see #2382
- throwFail(t, AssertIs(id, 10))
- }
-
- ptr := PtrPk{ID: parent, Positive: true}
- num, err := dORM.InsertMulti(2, []PtrPk{ptr})
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- throwFail(t, AssertIs(ptr.ID, parent))
-
- nptr := &PtrPk{ID: parent}
- created, pk, err := dORM.ReadOrCreate(nptr, "ID")
- throwFail(t, err)
- throwFail(t, AssertIs(created, false))
- throwFail(t, AssertIs(pk, 10))
- throwFail(t, AssertIs(nptr.ID, parent))
- throwFail(t, AssertIs(nptr.Positive, true))
-
- nptr = &PtrPk{Positive: true}
- created, pk, err = dORM.ReadOrCreate(nptr, "Positive")
- throwFail(t, err)
- throwFail(t, AssertIs(created, false))
- throwFail(t, AssertIs(pk, 10))
- throwFail(t, AssertIs(nptr.ID, parent))
-
- nptr.Positive = false
- num, err = dORM.Update(nptr)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
- throwFail(t, AssertIs(nptr.ID, parent))
- throwFail(t, AssertIs(nptr.Positive, false))
-
- num, err = dORM.Delete(nptr)
- throwFail(t, err)
- throwFail(t, AssertIs(num, 1))
-}
-
-func TestSnake(t *testing.T) {
- cases := map[string]string{
- "i": "i",
- "I": "i",
- "iD": "i_d",
- "ID": "i_d",
- "NO": "n_o",
- "NOO": "n_o_o",
- "NOOooOOoo": "n_o_ooo_o_ooo",
- "OrderNO": "order_n_o",
- "tagName": "tag_name",
- "tag_Name": "tag__name",
- "tag_name": "tag_name",
- "_tag_name": "_tag_name",
- "tag_666name": "tag_666name",
- "tag_666Name": "tag_666_name",
- }
- for name, want := range cases {
- got := snakeString(name)
- throwFail(t, AssertIs(got, want))
- }
-}
-
-func TestIgnoreCaseTag(t *testing.T) {
- type testTagModel struct {
- ID int `orm:"pk"`
- NOO string `orm:"column(n)"`
- Name01 string `orm:"NULL"`
- Name02 string `orm:"COLUMN(Name)"`
- Name03 string `orm:"Column(name)"`
- }
- modelCache.clean()
- RegisterModel(&testTagModel{})
- info, ok := modelCache.get("test_tag_model")
- throwFail(t, AssertIs(ok, true))
- throwFail(t, AssertNot(info, nil))
- if t == nil {
- return
- }
- throwFail(t, AssertIs(info.fields.GetByName("NOO").column, "n"))
- throwFail(t, AssertIs(info.fields.GetByName("Name01").null, true))
- throwFail(t, AssertIs(info.fields.GetByName("Name02").column, "Name"))
- throwFail(t, AssertIs(info.fields.GetByName("Name03").column, "name"))
-}
-
-func TestInsertOrUpdate(t *testing.T) {
- RegisterModel(new(User))
- user := User{UserName: "unique_username133", Status: 1, Password: "o"}
- user1 := User{UserName: "unique_username133", Status: 2, Password: "o"}
- user2 := User{UserName: "unique_username133", Status: 3, Password: "oo"}
- dORM.Insert(&user)
- test := User{UserName: "unique_username133"}
- fmt.Println(dORM.Driver().Name())
- if dORM.Driver().Name() == "sqlite3" {
- fmt.Println("sqlite3 is nonsupport")
- return
- }
- // test1
- _, err := dORM.InsertOrUpdate(&user1, "user_name")
- if err != nil {
- fmt.Println(err)
- if err.Error() == "postgres version must 9.5 or higher" || err.Error() == "`sqlite3` nonsupport InsertOrUpdate in beego" {
- } else {
- throwFailNow(t, err)
- }
- } else {
- dORM.Read(&test, "user_name")
- throwFailNow(t, AssertIs(user1.Status, test.Status))
- }
- // test2
- _, err = dORM.InsertOrUpdate(&user2, "user_name")
- if err != nil {
- fmt.Println(err)
- if err.Error() == "postgres version must 9.5 or higher" || err.Error() == "`sqlite3` nonsupport InsertOrUpdate in beego" {
- } else {
- throwFailNow(t, err)
- }
- } else {
- dORM.Read(&test, "user_name")
- throwFailNow(t, AssertIs(user2.Status, test.Status))
- throwFailNow(t, AssertIs(user2.Password, strings.TrimSpace(test.Password)))
- }
-
- // postgres ON CONFLICT DO UPDATE SET can`t use colu=colu+values
- if IsPostgres {
- return
- }
- // test3 +
- _, err = dORM.InsertOrUpdate(&user2, "user_name", "status=status+1")
- if err != nil {
- fmt.Println(err)
- if err.Error() == "postgres version must 9.5 or higher" || err.Error() == "`sqlite3` nonsupport InsertOrUpdate in beego" {
- } else {
- throwFailNow(t, err)
- }
- } else {
- dORM.Read(&test, "user_name")
- throwFailNow(t, AssertIs(user2.Status+1, test.Status))
- }
- // test4 -
- _, err = dORM.InsertOrUpdate(&user2, "user_name", "status=status-1")
- if err != nil {
- fmt.Println(err)
- if err.Error() == "postgres version must 9.5 or higher" || err.Error() == "`sqlite3` nonsupport InsertOrUpdate in beego" {
- } else {
- throwFailNow(t, err)
- }
- } else {
- dORM.Read(&test, "user_name")
- throwFailNow(t, AssertIs((user2.Status+1)-1, test.Status))
- }
- // test5 *
- _, err = dORM.InsertOrUpdate(&user2, "user_name", "status=status*3")
- if err != nil {
- fmt.Println(err)
- if err.Error() == "postgres version must 9.5 or higher" || err.Error() == "`sqlite3` nonsupport InsertOrUpdate in beego" {
- } else {
- throwFailNow(t, err)
- }
- } else {
- dORM.Read(&test, "user_name")
- throwFailNow(t, AssertIs(((user2.Status+1)-1)*3, test.Status))
- }
- // test6 /
- _, err = dORM.InsertOrUpdate(&user2, "user_name", "Status=Status/3")
- if err != nil {
- fmt.Println(err)
- if err.Error() == "postgres version must 9.5 or higher" || err.Error() == "`sqlite3` nonsupport InsertOrUpdate in beego" {
- } else {
- throwFailNow(t, err)
- }
- } else {
- dORM.Read(&test, "user_name")
- throwFailNow(t, AssertIs((((user2.Status+1)-1)*3)/3, test.Status))
- }
-}
diff --git a/orm/qb.go b/orm/qb.go
deleted file mode 100644
index e0655a17..00000000
--- a/orm/qb.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import "errors"
-
-// QueryBuilder is the Query builder interface
-type QueryBuilder interface {
- Select(fields ...string) QueryBuilder
- ForUpdate() QueryBuilder
- From(tables ...string) QueryBuilder
- InnerJoin(table string) QueryBuilder
- LeftJoin(table string) QueryBuilder
- RightJoin(table string) QueryBuilder
- On(cond string) QueryBuilder
- Where(cond string) QueryBuilder
- And(cond string) QueryBuilder
- Or(cond string) QueryBuilder
- In(vals ...string) QueryBuilder
- OrderBy(fields ...string) QueryBuilder
- Asc() QueryBuilder
- Desc() QueryBuilder
- Limit(limit int) QueryBuilder
- Offset(offset int) QueryBuilder
- GroupBy(fields ...string) QueryBuilder
- Having(cond string) QueryBuilder
- Update(tables ...string) QueryBuilder
- Set(kv ...string) QueryBuilder
- Delete(tables ...string) QueryBuilder
- InsertInto(table string, fields ...string) QueryBuilder
- Values(vals ...string) QueryBuilder
- Subquery(sub string, alias string) string
- String() string
-}
-
-// NewQueryBuilder return the QueryBuilder
-func NewQueryBuilder(driver string) (qb QueryBuilder, err error) {
- if driver == "mysql" {
- qb = new(MySQLQueryBuilder)
- } else if driver == "tidb" {
- qb = new(TiDBQueryBuilder)
- } else if driver == "postgres" {
- err = errors.New("postgres query builder is not supported yet")
- } else if driver == "sqlite" {
- err = errors.New("sqlite query builder is not supported yet")
- } else {
- err = errors.New("unknown driver for query builder")
- }
- return
-}
diff --git a/orm/qb_mysql.go b/orm/qb_mysql.go
deleted file mode 100644
index 23bdc9ee..00000000
--- a/orm/qb_mysql.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// CommaSpace is the separation
-const CommaSpace = ", "
-
-// MySQLQueryBuilder is the SQL build
-type MySQLQueryBuilder struct {
- Tokens []string
-}
-
-// Select will join the fields
-func (qb *MySQLQueryBuilder) Select(fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "SELECT", strings.Join(fields, CommaSpace))
- return qb
-}
-
-// ForUpdate add the FOR UPDATE clause
-func (qb *MySQLQueryBuilder) ForUpdate() QueryBuilder {
- qb.Tokens = append(qb.Tokens, "FOR UPDATE")
- return qb
-}
-
-// From join the tables
-func (qb *MySQLQueryBuilder) From(tables ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "FROM", strings.Join(tables, CommaSpace))
- return qb
-}
-
-// InnerJoin INNER JOIN the table
-func (qb *MySQLQueryBuilder) InnerJoin(table string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "INNER JOIN", table)
- return qb
-}
-
-// LeftJoin LEFT JOIN the table
-func (qb *MySQLQueryBuilder) LeftJoin(table string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "LEFT JOIN", table)
- return qb
-}
-
-// RightJoin RIGHT JOIN the table
-func (qb *MySQLQueryBuilder) RightJoin(table string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "RIGHT JOIN", table)
- return qb
-}
-
-// On join with on cond
-func (qb *MySQLQueryBuilder) On(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "ON", cond)
- return qb
-}
-
-// Where join the Where cond
-func (qb *MySQLQueryBuilder) Where(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "WHERE", cond)
- return qb
-}
-
-// And join the and cond
-func (qb *MySQLQueryBuilder) And(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "AND", cond)
- return qb
-}
-
-// Or join the or cond
-func (qb *MySQLQueryBuilder) Or(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "OR", cond)
- return qb
-}
-
-// In join the IN (vals)
-func (qb *MySQLQueryBuilder) In(vals ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "IN", "(", strings.Join(vals, CommaSpace), ")")
- return qb
-}
-
-// OrderBy join the Order by fields
-func (qb *MySQLQueryBuilder) OrderBy(fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "ORDER BY", strings.Join(fields, CommaSpace))
- return qb
-}
-
-// Asc join the asc
-func (qb *MySQLQueryBuilder) Asc() QueryBuilder {
- qb.Tokens = append(qb.Tokens, "ASC")
- return qb
-}
-
-// Desc join the desc
-func (qb *MySQLQueryBuilder) Desc() QueryBuilder {
- qb.Tokens = append(qb.Tokens, "DESC")
- return qb
-}
-
-// Limit join the limit num
-func (qb *MySQLQueryBuilder) Limit(limit int) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "LIMIT", strconv.Itoa(limit))
- return qb
-}
-
-// Offset join the offset num
-func (qb *MySQLQueryBuilder) Offset(offset int) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "OFFSET", strconv.Itoa(offset))
- return qb
-}
-
-// GroupBy join the Group by fields
-func (qb *MySQLQueryBuilder) GroupBy(fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "GROUP BY", strings.Join(fields, CommaSpace))
- return qb
-}
-
-// Having join the Having cond
-func (qb *MySQLQueryBuilder) Having(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "HAVING", cond)
- return qb
-}
-
-// Update join the update table
-func (qb *MySQLQueryBuilder) Update(tables ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "UPDATE", strings.Join(tables, CommaSpace))
- return qb
-}
-
-// Set join the set kv
-func (qb *MySQLQueryBuilder) Set(kv ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "SET", strings.Join(kv, CommaSpace))
- return qb
-}
-
-// Delete join the Delete tables
-func (qb *MySQLQueryBuilder) Delete(tables ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "DELETE")
- if len(tables) != 0 {
- qb.Tokens = append(qb.Tokens, strings.Join(tables, CommaSpace))
- }
- return qb
-}
-
-// InsertInto join the insert SQL
-func (qb *MySQLQueryBuilder) InsertInto(table string, fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "INSERT INTO", table)
- if len(fields) != 0 {
- fieldsStr := strings.Join(fields, CommaSpace)
- qb.Tokens = append(qb.Tokens, "(", fieldsStr, ")")
- }
- return qb
-}
-
-// Values join the Values(vals)
-func (qb *MySQLQueryBuilder) Values(vals ...string) QueryBuilder {
- valsStr := strings.Join(vals, CommaSpace)
- qb.Tokens = append(qb.Tokens, "VALUES", "(", valsStr, ")")
- return qb
-}
-
-// Subquery join the sub as alias
-func (qb *MySQLQueryBuilder) Subquery(sub string, alias string) string {
- return fmt.Sprintf("(%s) AS %s", sub, alias)
-}
-
-// String join all Tokens
-func (qb *MySQLQueryBuilder) String() string {
- return strings.Join(qb.Tokens, " ")
-}
diff --git a/orm/qb_tidb.go b/orm/qb_tidb.go
deleted file mode 100644
index 87b3ae84..00000000
--- a/orm/qb_tidb.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2015 TiDB Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// TiDBQueryBuilder is the SQL build
-type TiDBQueryBuilder struct {
- Tokens []string
-}
-
-// Select will join the fields
-func (qb *TiDBQueryBuilder) Select(fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "SELECT", strings.Join(fields, CommaSpace))
- return qb
-}
-
-// ForUpdate add the FOR UPDATE clause
-func (qb *TiDBQueryBuilder) ForUpdate() QueryBuilder {
- qb.Tokens = append(qb.Tokens, "FOR UPDATE")
- return qb
-}
-
-// From join the tables
-func (qb *TiDBQueryBuilder) From(tables ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "FROM", strings.Join(tables, CommaSpace))
- return qb
-}
-
-// InnerJoin INNER JOIN the table
-func (qb *TiDBQueryBuilder) InnerJoin(table string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "INNER JOIN", table)
- return qb
-}
-
-// LeftJoin LEFT JOIN the table
-func (qb *TiDBQueryBuilder) LeftJoin(table string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "LEFT JOIN", table)
- return qb
-}
-
-// RightJoin RIGHT JOIN the table
-func (qb *TiDBQueryBuilder) RightJoin(table string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "RIGHT JOIN", table)
- return qb
-}
-
-// On join with on cond
-func (qb *TiDBQueryBuilder) On(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "ON", cond)
- return qb
-}
-
-// Where join the Where cond
-func (qb *TiDBQueryBuilder) Where(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "WHERE", cond)
- return qb
-}
-
-// And join the and cond
-func (qb *TiDBQueryBuilder) And(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "AND", cond)
- return qb
-}
-
-// Or join the or cond
-func (qb *TiDBQueryBuilder) Or(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "OR", cond)
- return qb
-}
-
-// In join the IN (vals)
-func (qb *TiDBQueryBuilder) In(vals ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "IN", "(", strings.Join(vals, CommaSpace), ")")
- return qb
-}
-
-// OrderBy join the Order by fields
-func (qb *TiDBQueryBuilder) OrderBy(fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "ORDER BY", strings.Join(fields, CommaSpace))
- return qb
-}
-
-// Asc join the asc
-func (qb *TiDBQueryBuilder) Asc() QueryBuilder {
- qb.Tokens = append(qb.Tokens, "ASC")
- return qb
-}
-
-// Desc join the desc
-func (qb *TiDBQueryBuilder) Desc() QueryBuilder {
- qb.Tokens = append(qb.Tokens, "DESC")
- return qb
-}
-
-// Limit join the limit num
-func (qb *TiDBQueryBuilder) Limit(limit int) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "LIMIT", strconv.Itoa(limit))
- return qb
-}
-
-// Offset join the offset num
-func (qb *TiDBQueryBuilder) Offset(offset int) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "OFFSET", strconv.Itoa(offset))
- return qb
-}
-
-// GroupBy join the Group by fields
-func (qb *TiDBQueryBuilder) GroupBy(fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "GROUP BY", strings.Join(fields, CommaSpace))
- return qb
-}
-
-// Having join the Having cond
-func (qb *TiDBQueryBuilder) Having(cond string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "HAVING", cond)
- return qb
-}
-
-// Update join the update table
-func (qb *TiDBQueryBuilder) Update(tables ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "UPDATE", strings.Join(tables, CommaSpace))
- return qb
-}
-
-// Set join the set kv
-func (qb *TiDBQueryBuilder) Set(kv ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "SET", strings.Join(kv, CommaSpace))
- return qb
-}
-
-// Delete join the Delete tables
-func (qb *TiDBQueryBuilder) Delete(tables ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "DELETE")
- if len(tables) != 0 {
- qb.Tokens = append(qb.Tokens, strings.Join(tables, CommaSpace))
- }
- return qb
-}
-
-// InsertInto join the insert SQL
-func (qb *TiDBQueryBuilder) InsertInto(table string, fields ...string) QueryBuilder {
- qb.Tokens = append(qb.Tokens, "INSERT INTO", table)
- if len(fields) != 0 {
- fieldsStr := strings.Join(fields, CommaSpace)
- qb.Tokens = append(qb.Tokens, "(", fieldsStr, ")")
- }
- return qb
-}
-
-// Values join the Values(vals)
-func (qb *TiDBQueryBuilder) Values(vals ...string) QueryBuilder {
- valsStr := strings.Join(vals, CommaSpace)
- qb.Tokens = append(qb.Tokens, "VALUES", "(", valsStr, ")")
- return qb
-}
-
-// Subquery join the sub as alias
-func (qb *TiDBQueryBuilder) Subquery(sub string, alias string) string {
- return fmt.Sprintf("(%s) AS %s", sub, alias)
-}
-
-// String join all Tokens
-func (qb *TiDBQueryBuilder) String() string {
- return strings.Join(qb.Tokens, " ")
-}
diff --git a/orm/types.go b/orm/types.go
deleted file mode 100644
index 75af7149..00000000
--- a/orm/types.go
+++ /dev/null
@@ -1,474 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "context"
- "database/sql"
- "reflect"
- "time"
-)
-
-// Driver define database driver
-
-type Driver interface {
- Name() string
- Type() DriverType
-}
-
-// Fielder define field info
-type Fielder interface {
- String() string
- FieldType() int
- SetRaw(interface{}) error
- RawValue() interface{}
-}
-
-// Ormer define the orm interface
-type Ormer interface {
- // read data to model
- // for example:
- // this will find User by Id field
- // u = &User{Id: user.Id}
- // err = Ormer.Read(u)
- // this will find User by UserName field
- // u = &User{UserName: "astaxie", Password: "pass"}
- // err = Ormer.Read(u, "UserName")
- Read(md interface{}, cols ...string) error
- // Like Read(), but with "FOR UPDATE" clause, useful in transaction.
- // Some databases are not support this feature.
- ReadForUpdate(md interface{}, cols ...string) error
- // Try to read a row from the database, or insert one if it doesn't exist
- ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error)
- // insert model data to database
- // for example:
- // user := new(User)
- // id, err = Ormer.Insert(user)
- // user must be a pointer and Insert will set user's pk field
- Insert(interface{}) (int64, error)
- // mysql:InsertOrUpdate(model) or InsertOrUpdate(model,"colu=colu+value")
- // if colu type is integer : can use(+-*/), string : convert(colu,"value")
- // postgres: InsertOrUpdate(model,"conflictColumnName") or InsertOrUpdate(model,"conflictColumnName","colu=colu+value")
- // if colu type is integer : can use(+-*/), string : colu || "value"
- InsertOrUpdate(md interface{}, colConflitAndArgs ...string) (int64, error)
- // insert some models to database
- InsertMulti(bulk int, mds interface{}) (int64, error)
- // update model to database.
- // cols set the columns those want to update.
- // find model by Id(pk) field and update columns specified by fields, if cols is null then update all columns
- // for example:
- // user := User{Id: 2}
- // user.Langs = append(user.Langs, "zh-CN", "en-US")
- // user.Extra.Name = "beego"
- // user.Extra.Data = "orm"
- // num, err = Ormer.Update(&user, "Langs", "Extra")
- Update(md interface{}, cols ...string) (int64, error)
- // delete model in database
- Delete(md interface{}, cols ...string) (int64, error)
- // load related models to md model.
- // args are limit, offset int and order string.
- //
- // example:
- // Ormer.LoadRelated(post,"Tags")
- // for _,tag := range post.Tags{...}
- //args[0] bool true useDefaultRelsDepth ; false depth 0
- //args[0] int loadRelationDepth
- //args[1] int limit default limit 1000
- //args[2] int offset default offset 0
- //args[3] string order for example : "-Id"
- // make sure the relation is defined in model struct tags.
- LoadRelated(md interface{}, name string, args ...interface{}) (int64, error)
- // create a models to models queryer
- // for example:
- // post := Post{Id: 4}
- // m2m := Ormer.QueryM2M(&post, "Tags")
- QueryM2M(md interface{}, name string) QueryM2Mer
- // return a QuerySeter for table operations.
- // table name can be string or struct.
- // e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)),
- QueryTable(ptrStructOrTableName interface{}) QuerySeter
- // switch to another registered database driver by given name.
- Using(name string) error
- // begin transaction
- // for example:
- // o := NewOrm()
- // err := o.Begin()
- // ...
- // err = o.Rollback()
- Begin() error
- // begin transaction with provided context and option
- // the provided context is used until the transaction is committed or rolled back.
- // if the context is canceled, the transaction will be rolled back.
- // the provided TxOptions is optional and may be nil if defaults should be used.
- // if a non-default isolation level is used that the driver doesn't support, an error will be returned.
- // for example:
- // o := NewOrm()
- // err := o.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
- // ...
- // err = o.Rollback()
- BeginTx(ctx context.Context, opts *sql.TxOptions) error
- // commit transaction
- Commit() error
- // rollback transaction
- Rollback() error
- // return a raw query seter for raw sql string.
- // for example:
- // ormer.Raw("UPDATE `user` SET `user_name` = ? WHERE `user_name` = ?", "slene", "testing").Exec()
- // // update user testing's name to slene
- Raw(query string, args ...interface{}) RawSeter
- Driver() Driver
- DBStats() *sql.DBStats
-}
-
-// Inserter insert prepared statement
-type Inserter interface {
- Insert(interface{}) (int64, error)
- Close() error
-}
-
-// QuerySeter query seter
-type QuerySeter interface {
- // add condition expression to QuerySeter.
- // for example:
- // filter by UserName == 'slene'
- // qs.Filter("UserName", "slene")
- // sql : left outer join profile on t0.id1==t1.id2 where t1.age == 28
- // Filter("profile__Age", 28)
- // // time compare
- // qs.Filter("created", time.Now())
- Filter(string, ...interface{}) QuerySeter
- // add raw sql to querySeter.
- // for example:
- // qs.FilterRaw("user_id IN (SELECT id FROM profile WHERE age>=18)")
- // //sql-> WHERE user_id IN (SELECT id FROM profile WHERE age>=18)
- FilterRaw(string, string) QuerySeter
- // add NOT condition to querySeter.
- // have the same usage as Filter
- Exclude(string, ...interface{}) QuerySeter
- // set condition to QuerySeter.
- // sql's where condition
- // cond := orm.NewCondition()
- // cond1 := cond.And("profile__isnull", false).AndNot("status__in", 1).Or("profile__age__gt", 2000)
- // //sql-> WHERE T0.`profile_id` IS NOT NULL AND NOT T0.`Status` IN (?) OR T1.`age` > 2000
- // num, err := qs.SetCond(cond1).Count()
- SetCond(*Condition) QuerySeter
- // get condition from QuerySeter.
- // sql's where condition
- // cond := orm.NewCondition()
- // cond = cond.And("profile__isnull", false).AndNot("status__in", 1)
- // qs = qs.SetCond(cond)
- // cond = qs.GetCond()
- // cond := cond.Or("profile__age__gt", 2000)
- // //sql-> WHERE T0.`profile_id` IS NOT NULL AND NOT T0.`Status` IN (?) OR T1.`age` > 2000
- // num, err := qs.SetCond(cond).Count()
- GetCond() *Condition
- // add LIMIT value.
- // args[0] means offset, e.g. LIMIT num,offset.
- // if Limit <= 0 then Limit will be set to default limit ,eg 1000
- // if QuerySeter doesn't call Limit, the sql's Limit will be set to default limit, eg 1000
- // for example:
- // qs.Limit(10, 2)
- // // sql-> limit 10 offset 2
- Limit(limit interface{}, args ...interface{}) QuerySeter
- // add OFFSET value
- // same as Limit function's args[0]
- Offset(offset interface{}) QuerySeter
- // add GROUP BY expression
- // for example:
- // qs.GroupBy("id")
- GroupBy(exprs ...string) QuerySeter
- // add ORDER expression.
- // "column" means ASC, "-column" means DESC.
- // for example:
- // qs.OrderBy("-status")
- OrderBy(exprs ...string) QuerySeter
- // set relation model to query together.
- // it will query relation models and assign to parent model.
- // for example:
- // // will load all related fields use left join .
- // qs.RelatedSel().One(&user)
- // // will load related field only profile
- // qs.RelatedSel("profile").One(&user)
- // user.Profile.Age = 32
- RelatedSel(params ...interface{}) QuerySeter
- // Set Distinct
- // for example:
- // o.QueryTable("policy").Filter("Groups__Group__Users__User", user).
- // Distinct().
- // All(&permissions)
- Distinct() QuerySeter
- // set FOR UPDATE to query.
- // for example:
- // o.QueryTable("user").Filter("uid", uid).ForUpdate().All(&users)
- ForUpdate() QuerySeter
- // return QuerySeter execution result number
- // for example:
- // num, err = qs.Filter("profile__age__gt", 28).Count()
- Count() (int64, error)
- // check result empty or not after QuerySeter executed
- // the same as QuerySeter.Count > 0
- Exist() bool
- // execute update with parameters
- // for example:
- // num, err = qs.Filter("user_name", "slene").Update(Params{
- // "Nums": ColValue(Col_Minus, 50),
- // }) // user slene's Nums will minus 50
- // num, err = qs.Filter("UserName", "slene").Update(Params{
- // "user_name": "slene2"
- // }) // user slene's name will change to slene2
- Update(values Params) (int64, error)
- // delete from table
- //for example:
- // num ,err = qs.Filter("user_name__in", "testing1", "testing2").Delete()
- // //delete two user who's name is testing1 or testing2
- Delete() (int64, error)
- // return a insert queryer.
- // it can be used in times.
- // example:
- // i,err := sq.PrepareInsert()
- // num, err = i.Insert(&user1) // user table will add one record user1 at once
- // num, err = i.Insert(&user2) // user table will add one record user2 at once
- // err = i.Close() //don't forget call Close
- PrepareInsert() (Inserter, error)
- // query all data and map to containers.
- // cols means the columns when querying.
- // for example:
- // var users []*User
- // qs.All(&users) // users[0],users[1],users[2] ...
- All(container interface{}, cols ...string) (int64, error)
- // query one row data and map to containers.
- // cols means the columns when querying.
- // for example:
- // var user User
- // qs.One(&user) //user.UserName == "slene"
- One(container interface{}, cols ...string) error
- // query all data and map to []map[string]interface.
- // expres means condition expression.
- // it converts data to []map[column]value.
- // for example:
- // var maps []Params
- // qs.Values(&maps) //maps[0]["UserName"]=="slene"
- Values(results *[]Params, exprs ...string) (int64, error)
- // query all data and map to [][]interface
- // it converts data to [][column_index]value
- // for example:
- // var list []ParamsList
- // qs.ValuesList(&list) // list[0][1] == "slene"
- ValuesList(results *[]ParamsList, exprs ...string) (int64, error)
- // query all data and map to []interface.
- // it's designed for one column record set, auto change to []value, not [][column]value.
- // for example:
- // var list ParamsList
- // qs.ValuesFlat(&list, "UserName") // list[0] == "slene"
- ValuesFlat(result *ParamsList, expr string) (int64, error)
- // query all rows into map[string]interface with specify key and value column name.
- // keyCol = "name", valueCol = "value"
- // table data
- // name | value
- // total | 100
- // found | 200
- // to map[string]interface{}{
- // "total": 100,
- // "found": 200,
- // }
- RowsToMap(result *Params, keyCol, valueCol string) (int64, error)
- // query all rows into struct with specify key and value column name.
- // keyCol = "name", valueCol = "value"
- // table data
- // name | value
- // total | 100
- // found | 200
- // to struct {
- // Total int
- // Found int
- // }
- RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error)
-}
-
-// QueryM2Mer model to model query struct
-// all operations are on the m2m table only, will not affect the origin model table
-type QueryM2Mer interface {
- // add models to origin models when creating queryM2M.
- // example:
- // m2m := orm.QueryM2M(post,"Tag")
- // m2m.Add(&Tag1{},&Tag2{})
- // for _,tag := range post.Tags{}{ ... }
- // param could also be any of the follow
- // []*Tag{{Id:3,Name: "TestTag1"}, {Id:4,Name: "TestTag2"}}
- // &Tag{Id:5,Name: "TestTag3"}
- // []interface{}{&Tag{Id:6,Name: "TestTag4"}}
- // insert one or more rows to m2m table
- // make sure the relation is defined in post model struct tag.
- Add(...interface{}) (int64, error)
- // remove models following the origin model relationship
- // only delete rows from m2m table
- // for example:
- //tag3 := &Tag{Id:5,Name: "TestTag3"}
- //num, err = m2m.Remove(tag3)
- Remove(...interface{}) (int64, error)
- // check model is existed in relationship of origin model
- Exist(interface{}) bool
- // clean all models in related of origin model
- Clear() (int64, error)
- // count all related models of origin model
- Count() (int64, error)
-}
-
-// RawPreparer raw query statement
-type RawPreparer interface {
- Exec(...interface{}) (sql.Result, error)
- Close() error
-}
-
-// RawSeter raw query seter
-// create From Ormer.Raw
-// for example:
-// sql := fmt.Sprintf("SELECT %sid%s,%sname%s FROM %suser%s WHERE id = ?",Q,Q,Q,Q,Q,Q)
-// rs := Ormer.Raw(sql, 1)
-type RawSeter interface {
- //execute sql and get result
- Exec() (sql.Result, error)
- //query data and map to container
- //for example:
- // var name string
- // var id int
- // rs.QueryRow(&id,&name) // id==2 name=="slene"
- QueryRow(containers ...interface{}) error
-
- // query data rows and map to container
- // var ids []int
- // var names []int
- // query = fmt.Sprintf("SELECT 'id','name' FROM %suser%s", Q, Q)
- // num, err = dORM.Raw(query).QueryRows(&ids,&names) // ids=>{1,2},names=>{"nobody","slene"}
- QueryRows(containers ...interface{}) (int64, error)
- SetArgs(...interface{}) RawSeter
- // query data to []map[string]interface
- // see QuerySeter's Values
- Values(container *[]Params, cols ...string) (int64, error)
- // query data to [][]interface
- // see QuerySeter's ValuesList
- ValuesList(container *[]ParamsList, cols ...string) (int64, error)
- // query data to []interface
- // see QuerySeter's ValuesFlat
- ValuesFlat(container *ParamsList, cols ...string) (int64, error)
- // query all rows into map[string]interface with specify key and value column name.
- // keyCol = "name", valueCol = "value"
- // table data
- // name | value
- // total | 100
- // found | 200
- // to map[string]interface{}{
- // "total": 100,
- // "found": 200,
- // }
- RowsToMap(result *Params, keyCol, valueCol string) (int64, error)
- // query all rows into struct with specify key and value column name.
- // keyCol = "name", valueCol = "value"
- // table data
- // name | value
- // total | 100
- // found | 200
- // to struct {
- // Total int
- // Found int
- // }
- RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error)
-
- // return prepared raw statement for used in times.
- // for example:
- // pre, err := dORM.Raw("INSERT INTO tag (name) VALUES (?)").Prepare()
- // r, err := pre.Exec("name1") // INSERT INTO tag (name) VALUES (`name1`)
- Prepare() (RawPreparer, error)
-}
-
-// stmtQuerier statement querier
-type stmtQuerier interface {
- Close() error
- Exec(args ...interface{}) (sql.Result, error)
- //ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error)
- Query(args ...interface{}) (*sql.Rows, error)
- //QueryContext(args ...interface{}) (*sql.Rows, error)
- QueryRow(args ...interface{}) *sql.Row
- //QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row
-}
-
-// db querier
-type dbQuerier interface {
- Prepare(query string) (*sql.Stmt, error)
- PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
- Exec(query string, args ...interface{}) (sql.Result, error)
- ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
- Query(query string, args ...interface{}) (*sql.Rows, error)
- QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
- QueryRow(query string, args ...interface{}) *sql.Row
- QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
-}
-
-// type DB interface {
-// Begin() (*sql.Tx, error)
-// Prepare(query string) (stmtQuerier, error)
-// Exec(query string, args ...interface{}) (sql.Result, error)
-// Query(query string, args ...interface{}) (*sql.Rows, error)
-// QueryRow(query string, args ...interface{}) *sql.Row
-// }
-
-// transaction beginner
-type txer interface {
- Begin() (*sql.Tx, error)
- BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
-}
-
-// transaction ending
-type txEnder interface {
- Commit() error
- Rollback() error
-}
-
-// base database struct
-type dbBaser interface {
- Read(dbQuerier, *modelInfo, reflect.Value, *time.Location, []string, bool) error
- Insert(dbQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error)
- InsertOrUpdate(dbQuerier, *modelInfo, reflect.Value, *alias, ...string) (int64, error)
- InsertMulti(dbQuerier, *modelInfo, reflect.Value, int, *time.Location) (int64, error)
- InsertValue(dbQuerier, *modelInfo, bool, []string, []interface{}) (int64, error)
- InsertStmt(stmtQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error)
- Update(dbQuerier, *modelInfo, reflect.Value, *time.Location, []string) (int64, error)
- Delete(dbQuerier, *modelInfo, reflect.Value, *time.Location, []string) (int64, error)
- ReadBatch(dbQuerier, *querySet, *modelInfo, *Condition, interface{}, *time.Location, []string) (int64, error)
- SupportUpdateJoin() bool
- UpdateBatch(dbQuerier, *querySet, *modelInfo, *Condition, Params, *time.Location) (int64, error)
- DeleteBatch(dbQuerier, *querySet, *modelInfo, *Condition, *time.Location) (int64, error)
- Count(dbQuerier, *querySet, *modelInfo, *Condition, *time.Location) (int64, error)
- OperatorSQL(string) string
- GenerateOperatorSQL(*modelInfo, *fieldInfo, string, []interface{}, *time.Location) (string, []interface{})
- GenerateOperatorLeftCol(*fieldInfo, string, *string)
- PrepareInsert(dbQuerier, *modelInfo) (stmtQuerier, string, error)
- ReadValues(dbQuerier, *querySet, *modelInfo, *Condition, []string, interface{}, *time.Location) (int64, error)
- RowsTo(dbQuerier, *querySet, *modelInfo, *Condition, interface{}, string, string, *time.Location) (int64, error)
- MaxLimit() uint64
- TableQuote() string
- ReplaceMarks(*string)
- HasReturningID(*modelInfo, *string) bool
- TimeFromDB(*time.Time, *time.Location)
- TimeToDB(*time.Time, *time.Location)
- DbTypes() map[string]string
- GetTables(dbQuerier) (map[string]bool, error)
- GetColumns(dbQuerier, string) (map[string][3]string, error)
- ShowTablesQuery() string
- ShowColumnsQuery(string) string
- IndexExists(dbQuerier, string, string) bool
- collectFieldValue(*modelInfo, *fieldInfo, reflect.Value, bool, *time.Location) (interface{}, error)
- setval(dbQuerier, *modelInfo, []string) error
-}
diff --git a/orm/utils.go b/orm/utils.go
deleted file mode 100644
index 3ff76772..00000000
--- a/orm/utils.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "fmt"
- "math/big"
- "reflect"
- "strconv"
- "strings"
- "time"
-)
-
-type fn func(string) string
-
-var (
- nameStrategyMap = map[string]fn{
- defaultNameStrategy: snakeString,
- SnakeAcronymNameStrategy: snakeStringWithAcronym,
- }
- defaultNameStrategy = "snakeString"
- SnakeAcronymNameStrategy = "snakeStringWithAcronym"
- nameStrategy = defaultNameStrategy
-)
-
-// StrTo is the target string
-type StrTo string
-
-// Set string
-func (f *StrTo) Set(v string) {
- if v != "" {
- *f = StrTo(v)
- } else {
- f.Clear()
- }
-}
-
-// Clear string
-func (f *StrTo) Clear() {
- *f = StrTo(0x1E)
-}
-
-// Exist check string exist
-func (f StrTo) Exist() bool {
- return string(f) != string(0x1E)
-}
-
-// Bool string to bool
-func (f StrTo) Bool() (bool, error) {
- return strconv.ParseBool(f.String())
-}
-
-// Float32 string to float32
-func (f StrTo) Float32() (float32, error) {
- v, err := strconv.ParseFloat(f.String(), 32)
- return float32(v), err
-}
-
-// Float64 string to float64
-func (f StrTo) Float64() (float64, error) {
- return strconv.ParseFloat(f.String(), 64)
-}
-
-// Int string to int
-func (f StrTo) Int() (int, error) {
- v, err := strconv.ParseInt(f.String(), 10, 32)
- return int(v), err
-}
-
-// Int8 string to int8
-func (f StrTo) Int8() (int8, error) {
- v, err := strconv.ParseInt(f.String(), 10, 8)
- return int8(v), err
-}
-
-// Int16 string to int16
-func (f StrTo) Int16() (int16, error) {
- v, err := strconv.ParseInt(f.String(), 10, 16)
- return int16(v), err
-}
-
-// Int32 string to int32
-func (f StrTo) Int32() (int32, error) {
- v, err := strconv.ParseInt(f.String(), 10, 32)
- return int32(v), err
-}
-
-// Int64 string to int64
-func (f StrTo) Int64() (int64, error) {
- v, err := strconv.ParseInt(f.String(), 10, 64)
- if err != nil {
- i := new(big.Int)
- ni, ok := i.SetString(f.String(), 10) // octal
- if !ok {
- return v, err
- }
- return ni.Int64(), nil
- }
- return v, err
-}
-
-// Uint string to uint
-func (f StrTo) Uint() (uint, error) {
- v, err := strconv.ParseUint(f.String(), 10, 32)
- return uint(v), err
-}
-
-// Uint8 string to uint8
-func (f StrTo) Uint8() (uint8, error) {
- v, err := strconv.ParseUint(f.String(), 10, 8)
- return uint8(v), err
-}
-
-// Uint16 string to uint16
-func (f StrTo) Uint16() (uint16, error) {
- v, err := strconv.ParseUint(f.String(), 10, 16)
- return uint16(v), err
-}
-
-// Uint32 string to uint32
-func (f StrTo) Uint32() (uint32, error) {
- v, err := strconv.ParseUint(f.String(), 10, 32)
- return uint32(v), err
-}
-
-// Uint64 string to uint64
-func (f StrTo) Uint64() (uint64, error) {
- v, err := strconv.ParseUint(f.String(), 10, 64)
- if err != nil {
- i := new(big.Int)
- ni, ok := i.SetString(f.String(), 10)
- if !ok {
- return v, err
- }
- return ni.Uint64(), nil
- }
- return v, err
-}
-
-// String string to string
-func (f StrTo) String() string {
- if f.Exist() {
- return string(f)
- }
- return ""
-}
-
-// ToStr interface to string
-func ToStr(value interface{}, args ...int) (s string) {
- switch v := value.(type) {
- case bool:
- s = strconv.FormatBool(v)
- case float32:
- s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32))
- case float64:
- s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64))
- case int:
- s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
- case int8:
- s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
- case int16:
- s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
- case int32:
- s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
- case int64:
- s = strconv.FormatInt(v, argInt(args).Get(0, 10))
- case uint:
- s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
- case uint8:
- s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
- case uint16:
- s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
- case uint32:
- s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
- case uint64:
- s = strconv.FormatUint(v, argInt(args).Get(0, 10))
- case string:
- s = v
- case []byte:
- s = string(v)
- default:
- s = fmt.Sprintf("%v", v)
- }
- return s
-}
-
-// ToInt64 interface to int64
-func ToInt64(value interface{}) (d int64) {
- val := reflect.ValueOf(value)
- switch value.(type) {
- case int, int8, int16, int32, int64:
- d = val.Int()
- case uint, uint8, uint16, uint32, uint64:
- d = int64(val.Uint())
- default:
- panic(fmt.Errorf("ToInt64 need numeric not `%T`", value))
- }
- return
-}
-
-func snakeStringWithAcronym(s string) string {
- data := make([]byte, 0, len(s)*2)
- num := len(s)
- for i := 0; i < num; i++ {
- d := s[i]
- before := false
- after := false
- if i > 0 {
- before = s[i-1] >= 'a' && s[i-1] <= 'z'
- }
- if i+1 < num {
- after = s[i+1] >= 'a' && s[i+1] <= 'z'
- }
- if i > 0 && d >= 'A' && d <= 'Z' && (before || after) {
- data = append(data, '_')
- }
- data = append(data, d)
- }
- return strings.ToLower(string(data[:]))
-}
-
-// snake string, XxYy to xx_yy , XxYY to xx_y_y
-func snakeString(s string) string {
- data := make([]byte, 0, len(s)*2)
- j := false
- num := len(s)
- for i := 0; i < num; i++ {
- d := s[i]
- if i > 0 && d >= 'A' && d <= 'Z' && j {
- data = append(data, '_')
- }
- if d != '_' {
- j = true
- }
- data = append(data, d)
- }
- return strings.ToLower(string(data[:]))
-}
-
-// SetNameStrategy set different name strategy
-func SetNameStrategy(s string) {
- if SnakeAcronymNameStrategy != s {
- nameStrategy = defaultNameStrategy
- }
- nameStrategy = s
-}
-
-// camel string, xx_yy to XxYy
-func camelString(s string) string {
- data := make([]byte, 0, len(s))
- flag, num := true, len(s)-1
- for i := 0; i <= num; i++ {
- d := s[i]
- if d == '_' {
- flag = true
- continue
- } else if flag {
- if d >= 'a' && d <= 'z' {
- d = d - 32
- }
- flag = false
- }
- data = append(data, d)
- }
- return string(data[:])
-}
-
-type argString []string
-
-// get string by index from string slice
-func (a argString) Get(i int, args ...string) (r string) {
- if i >= 0 && i < len(a) {
- r = a[i]
- } else if len(args) > 0 {
- r = args[0]
- }
- return
-}
-
-type argInt []int
-
-// get int by index from int slice
-func (a argInt) Get(i int, args ...int) (r int) {
- if i >= 0 && i < len(a) {
- r = a[i]
- }
- if len(args) > 0 {
- r = args[0]
- }
- return
-}
-
-// parse time to string with location
-func timeParse(dateString, format string) (time.Time, error) {
- tp, err := time.ParseInLocation(format, dateString, DefaultTimeLoc)
- return tp, err
-}
-
-// get pointer indirect type
-func indirectType(v reflect.Type) reflect.Type {
- switch v.Kind() {
- case reflect.Ptr:
- return indirectType(v.Elem())
- default:
- return v
- }
-}
diff --git a/orm/utils_test.go b/orm/utils_test.go
deleted file mode 100644
index 7d94cada..00000000
--- a/orm/utils_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package orm
-
-import (
- "testing"
-)
-
-func TestCamelString(t *testing.T) {
- snake := []string{"pic_url", "hello_world_", "hello__World", "_HelLO_Word", "pic_url_1", "pic_url__1"}
- camel := []string{"PicUrl", "HelloWorld", "HelloWorld", "HelLOWord", "PicUrl1", "PicUrl1"}
-
- answer := make(map[string]string)
- for i, v := range snake {
- answer[v] = camel[i]
- }
-
- for _, v := range snake {
- res := camelString(v)
- if res != answer[v] {
- t.Error("Unit Test Fail:", v, res, answer[v])
- }
- }
-}
-
-func TestSnakeString(t *testing.T) {
- camel := []string{"PicUrl", "HelloWorld", "HelloWorld", "HelLOWord", "PicUrl1", "XyXX"}
- snake := []string{"pic_url", "hello_world", "hello_world", "hel_l_o_word", "pic_url1", "xy_x_x"}
-
- answer := make(map[string]string)
- for i, v := range camel {
- answer[v] = snake[i]
- }
-
- for _, v := range camel {
- res := snakeString(v)
- if res != answer[v] {
- t.Error("Unit Test Fail:", v, res, answer[v])
- }
- }
-}
-
-func TestSnakeStringWithAcronym(t *testing.T) {
- camel := []string{"ID", "PicURL", "HelloWorld", "HelloWorld", "HelLOWord", "PicUrl1", "XyXX"}
- snake := []string{"id", "pic_url", "hello_world", "hello_world", "hel_lo_word", "pic_url1", "xy_xx"}
-
- answer := make(map[string]string)
- for i, v := range camel {
- answer[v] = snake[i]
- }
-
- for _, v := range camel {
- res := snakeStringWithAcronym(v)
- if res != answer[v] {
- t.Error("Unit Test Fail:", v, res, answer[v])
- }
- }
-}
diff --git a/parser.go b/parser.go
deleted file mode 100644
index 3a311894..00000000
--- a/parser.go
+++ /dev/null
@@ -1,591 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "go/ast"
- "go/parser"
- "go/token"
- "io/ioutil"
- "os"
- "path/filepath"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "unicode"
-
- "github.com/astaxie/beego/context/param"
- "github.com/astaxie/beego/logs"
- "github.com/astaxie/beego/utils"
-)
-
-var globalRouterTemplate = `package {{.routersDir}}
-
-import (
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context/param"{{.globalimport}}
-)
-
-func init() {
-{{.globalinfo}}
-}
-`
-
-var (
- lastupdateFilename = "lastupdate.tmp"
- commentFilename string
- pkgLastupdate map[string]int64
- genInfoList map[string][]ControllerComments
-
- routerHooks = map[string]int{
- "beego.BeforeStatic": BeforeStatic,
- "beego.BeforeRouter": BeforeRouter,
- "beego.BeforeExec": BeforeExec,
- "beego.AfterExec": AfterExec,
- "beego.FinishRouter": FinishRouter,
- }
-
- routerHooksMapping = map[int]string{
- BeforeStatic: "beego.BeforeStatic",
- BeforeRouter: "beego.BeforeRouter",
- BeforeExec: "beego.BeforeExec",
- AfterExec: "beego.AfterExec",
- FinishRouter: "beego.FinishRouter",
- }
-)
-
-const commentPrefix = "commentsRouter_"
-
-func init() {
- pkgLastupdate = make(map[string]int64)
-}
-
-func parserPkg(pkgRealpath, pkgpath string) error {
- rep := strings.NewReplacer("\\", "_", "/", "_", ".", "_")
- commentFilename, _ = filepath.Rel(AppPath, pkgRealpath)
- commentFilename = commentPrefix + rep.Replace(commentFilename) + ".go"
- if !compareFile(pkgRealpath) {
- logs.Info(pkgRealpath + " no changed")
- return nil
- }
- genInfoList = make(map[string][]ControllerComments)
- fileSet := token.NewFileSet()
- astPkgs, err := parser.ParseDir(fileSet, pkgRealpath, func(info os.FileInfo) bool {
- name := info.Name()
- return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go")
- }, parser.ParseComments)
-
- if err != nil {
- return err
- }
- for _, pkg := range astPkgs {
- for _, fl := range pkg.Files {
- for _, d := range fl.Decls {
- switch specDecl := d.(type) {
- case *ast.FuncDecl:
- if specDecl.Recv != nil {
- exp, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr) // Check that the type is correct first beforing throwing to parser
- if ok {
- parserComments(specDecl, fmt.Sprint(exp.X), pkgpath)
- }
- }
- }
- }
- }
- }
- genRouterCode(pkgRealpath)
- savetoFile(pkgRealpath)
- return nil
-}
-
-type parsedComment struct {
- routerPath string
- methods []string
- params map[string]parsedParam
- filters []parsedFilter
- imports []parsedImport
-}
-
-type parsedImport struct {
- importPath string
- importAlias string
-}
-
-type parsedFilter struct {
- pattern string
- pos int
- filter string
- params []bool
-}
-
-type parsedParam struct {
- name string
- datatype string
- location string
- defValue string
- required bool
-}
-
-func parserComments(f *ast.FuncDecl, controllerName, pkgpath string) error {
- if f.Doc != nil {
- parsedComments, err := parseComment(f.Doc.List)
- if err != nil {
- return err
- }
- for _, parsedComment := range parsedComments {
- if parsedComment.routerPath != "" {
- key := pkgpath + ":" + controllerName
- cc := ControllerComments{}
- cc.Method = f.Name.String()
- cc.Router = parsedComment.routerPath
- cc.AllowHTTPMethods = parsedComment.methods
- cc.MethodParams = buildMethodParams(f.Type.Params.List, parsedComment)
- cc.FilterComments = buildFilters(parsedComment.filters)
- cc.ImportComments = buildImports(parsedComment.imports)
- genInfoList[key] = append(genInfoList[key], cc)
- }
- }
- }
- return nil
-}
-
-func buildImports(pis []parsedImport) []*ControllerImportComments {
- var importComments []*ControllerImportComments
-
- for _, pi := range pis {
- importComments = append(importComments, &ControllerImportComments{
- ImportPath: pi.importPath,
- ImportAlias: pi.importAlias,
- })
- }
-
- return importComments
-}
-
-func buildFilters(pfs []parsedFilter) []*ControllerFilterComments {
- var filterComments []*ControllerFilterComments
-
- for _, pf := range pfs {
- var (
- returnOnOutput bool
- resetParams bool
- )
-
- if len(pf.params) >= 1 {
- returnOnOutput = pf.params[0]
- }
-
- if len(pf.params) >= 2 {
- resetParams = pf.params[1]
- }
-
- filterComments = append(filterComments, &ControllerFilterComments{
- Filter: pf.filter,
- Pattern: pf.pattern,
- Pos: pf.pos,
- ReturnOnOutput: returnOnOutput,
- ResetParams: resetParams,
- })
- }
-
- return filterComments
-}
-
-func buildMethodParams(funcParams []*ast.Field, pc *parsedComment) []*param.MethodParam {
- result := make([]*param.MethodParam, 0, len(funcParams))
- for _, fparam := range funcParams {
- for _, pName := range fparam.Names {
- methodParam := buildMethodParam(fparam, pName.Name, pc)
- result = append(result, methodParam)
- }
- }
- return result
-}
-
-func buildMethodParam(fparam *ast.Field, name string, pc *parsedComment) *param.MethodParam {
- options := []param.MethodParamOption{}
- if cparam, ok := pc.params[name]; ok {
- //Build param from comment info
- name = cparam.name
- if cparam.required {
- options = append(options, param.IsRequired)
- }
- switch cparam.location {
- case "body":
- options = append(options, param.InBody)
- case "header":
- options = append(options, param.InHeader)
- case "path":
- options = append(options, param.InPath)
- }
- if cparam.defValue != "" {
- options = append(options, param.Default(cparam.defValue))
- }
- } else {
- if paramInPath(name, pc.routerPath) {
- options = append(options, param.InPath)
- }
- }
- return param.New(name, options...)
-}
-
-func paramInPath(name, route string) bool {
- return strings.HasSuffix(route, ":"+name) ||
- strings.Contains(route, ":"+name+"/")
-}
-
-var routeRegex = regexp.MustCompile(`@router\s+(\S+)(?:\s+\[(\S+)\])?`)
-
-func parseComment(lines []*ast.Comment) (pcs []*parsedComment, err error) {
- pcs = []*parsedComment{}
- params := map[string]parsedParam{}
- filters := []parsedFilter{}
- imports := []parsedImport{}
-
- for _, c := range lines {
- t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
- if strings.HasPrefix(t, "@Param") {
- pv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Param")))
- if len(pv) < 4 {
- logs.Error("Invalid @Param format. Needs at least 4 parameters")
- }
- p := parsedParam{}
- names := strings.SplitN(pv[0], "=>", 2)
- p.name = names[0]
- funcParamName := p.name
- if len(names) > 1 {
- funcParamName = names[1]
- }
- p.location = pv[1]
- p.datatype = pv[2]
- switch len(pv) {
- case 5:
- p.required, _ = strconv.ParseBool(pv[3])
- case 6:
- p.defValue = pv[3]
- p.required, _ = strconv.ParseBool(pv[4])
- }
- params[funcParamName] = p
- }
- }
-
- for _, c := range lines {
- t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
- if strings.HasPrefix(t, "@Import") {
- iv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Import")))
- if len(iv) == 0 || len(iv) > 2 {
- logs.Error("Invalid @Import format. Only accepts 1 or 2 parameters")
- continue
- }
-
- p := parsedImport{}
- p.importPath = iv[0]
-
- if len(iv) == 2 {
- p.importAlias = iv[1]
- }
-
- imports = append(imports, p)
- }
- }
-
-filterLoop:
- for _, c := range lines {
- t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
- if strings.HasPrefix(t, "@Filter") {
- fv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Filter")))
- if len(fv) < 3 {
- logs.Error("Invalid @Filter format. Needs at least 3 parameters")
- continue filterLoop
- }
-
- p := parsedFilter{}
- p.pattern = fv[0]
- posName := fv[1]
- if pos, exists := routerHooks[posName]; exists {
- p.pos = pos
- } else {
- logs.Error("Invalid @Filter pos: ", posName)
- continue filterLoop
- }
-
- p.filter = fv[2]
- fvParams := fv[3:]
- for _, fvParam := range fvParams {
- switch fvParam {
- case "true":
- p.params = append(p.params, true)
- case "false":
- p.params = append(p.params, false)
- default:
- logs.Error("Invalid @Filter param: ", fvParam)
- continue filterLoop
- }
- }
-
- filters = append(filters, p)
- }
- }
-
- for _, c := range lines {
- var pc = &parsedComment{}
- pc.params = params
- pc.filters = filters
- pc.imports = imports
-
- t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
- if strings.HasPrefix(t, "@router") {
- t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
- matches := routeRegex.FindStringSubmatch(t)
- if len(matches) == 3 {
- pc.routerPath = matches[1]
- methods := matches[2]
- if methods == "" {
- pc.methods = []string{"get"}
- //pc.hasGet = true
- } else {
- pc.methods = strings.Split(methods, ",")
- //pc.hasGet = strings.Contains(methods, "get")
- }
- pcs = append(pcs, pc)
- } else {
- return nil, errors.New("Router information is missing")
- }
- }
- }
- return
-}
-
-// direct copy from bee\g_docs.go
-// analysis params return []string
-// @Param query form string true "The email for login"
-// [query form string true "The email for login"]
-func getparams(str string) []string {
- var s []rune
- var j int
- var start bool
- var r []string
- var quoted int8
- for _, c := range str {
- if unicode.IsSpace(c) && quoted == 0 {
- if !start {
- continue
- } else {
- start = false
- j++
- r = append(r, string(s))
- s = make([]rune, 0)
- continue
- }
- }
-
- start = true
- if c == '"' {
- quoted ^= 1
- continue
- }
- s = append(s, c)
- }
- if len(s) > 0 {
- r = append(r, string(s))
- }
- return r
-}
-
-func genRouterCode(pkgRealpath string) {
- os.Mkdir(getRouterDir(pkgRealpath), 0755)
- logs.Info("generate router from comments")
- var (
- globalinfo string
- globalimport string
- sortKey []string
- )
- for k := range genInfoList {
- sortKey = append(sortKey, k)
- }
- sort.Strings(sortKey)
- for _, k := range sortKey {
- cList := genInfoList[k]
- sort.Sort(ControllerCommentsSlice(cList))
- for _, c := range cList {
- allmethod := "nil"
- if len(c.AllowHTTPMethods) > 0 {
- allmethod = "[]string{"
- for _, m := range c.AllowHTTPMethods {
- allmethod += `"` + m + `",`
- }
- allmethod = strings.TrimRight(allmethod, ",") + "}"
- }
-
- params := "nil"
- if len(c.Params) > 0 {
- params = "[]map[string]string{"
- for _, p := range c.Params {
- for k, v := range p {
- params = params + `map[string]string{` + k + `:"` + v + `"},`
- }
- }
- params = strings.TrimRight(params, ",") + "}"
- }
-
- methodParams := "param.Make("
- if len(c.MethodParams) > 0 {
- lines := make([]string, 0, len(c.MethodParams))
- for _, m := range c.MethodParams {
- lines = append(lines, fmt.Sprint(m))
- }
- methodParams += "\n " +
- strings.Join(lines, ",\n ") +
- ",\n "
- }
- methodParams += ")"
-
- imports := ""
- if len(c.ImportComments) > 0 {
- for _, i := range c.ImportComments {
- var s string
- if i.ImportAlias != "" {
- s = fmt.Sprintf(`
- %s "%s"`, i.ImportAlias, i.ImportPath)
- } else {
- s = fmt.Sprintf(`
- "%s"`, i.ImportPath)
- }
- if !strings.Contains(globalimport, s) {
- imports += s
- }
- }
- }
-
- filters := ""
- if len(c.FilterComments) > 0 {
- for _, f := range c.FilterComments {
- filters += fmt.Sprintf(` &beego.ControllerFilter{
- Pattern: "%s",
- Pos: %s,
- Filter: %s,
- ReturnOnOutput: %v,
- ResetParams: %v,
- },`, f.Pattern, routerHooksMapping[f.Pos], f.Filter, f.ReturnOnOutput, f.ResetParams)
- }
- }
-
- if filters == "" {
- filters = "nil"
- } else {
- filters = fmt.Sprintf(`[]*beego.ControllerFilter{
-%s
- }`, filters)
- }
-
- globalimport += imports
-
- globalinfo = globalinfo + `
- beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"],
- beego.ControllerComments{
- Method: "` + strings.TrimSpace(c.Method) + `",
- ` + `Router: "` + c.Router + `"` + `,
- AllowHTTPMethods: ` + allmethod + `,
- MethodParams: ` + methodParams + `,
- Filters: ` + filters + `,
- Params: ` + params + `})
-`
- }
- }
-
- if globalinfo != "" {
- f, err := os.Create(filepath.Join(getRouterDir(pkgRealpath), commentFilename))
- if err != nil {
- panic(err)
- }
- defer f.Close()
-
- routersDir := AppConfig.DefaultString("routersdir", "routers")
- content := strings.Replace(globalRouterTemplate, "{{.globalinfo}}", globalinfo, -1)
- content = strings.Replace(content, "{{.routersDir}}", routersDir, -1)
- content = strings.Replace(content, "{{.globalimport}}", globalimport, -1)
- f.WriteString(content)
- }
-}
-
-func compareFile(pkgRealpath string) bool {
- if !utils.FileExists(filepath.Join(getRouterDir(pkgRealpath), commentFilename)) {
- return true
- }
- if utils.FileExists(lastupdateFilename) {
- content, err := ioutil.ReadFile(lastupdateFilename)
- if err != nil {
- return true
- }
- json.Unmarshal(content, &pkgLastupdate)
- lastupdate, err := getpathTime(pkgRealpath)
- if err != nil {
- return true
- }
- if v, ok := pkgLastupdate[pkgRealpath]; ok {
- if lastupdate <= v {
- return false
- }
- }
- }
- return true
-}
-
-func savetoFile(pkgRealpath string) {
- lastupdate, err := getpathTime(pkgRealpath)
- if err != nil {
- return
- }
- pkgLastupdate[pkgRealpath] = lastupdate
- d, err := json.Marshal(pkgLastupdate)
- if err != nil {
- return
- }
- ioutil.WriteFile(lastupdateFilename, d, os.ModePerm)
-}
-
-func getpathTime(pkgRealpath string) (lastupdate int64, err error) {
- fl, err := ioutil.ReadDir(pkgRealpath)
- if err != nil {
- return lastupdate, err
- }
- for _, f := range fl {
- if lastupdate < f.ModTime().UnixNano() {
- lastupdate = f.ModTime().UnixNano()
- }
- }
- return lastupdate, nil
-}
-
-func getRouterDir(pkgRealpath string) string {
- dir := filepath.Dir(pkgRealpath)
- for {
- routersDir := AppConfig.DefaultString("routersdir", "routers")
- d := filepath.Join(dir, routersDir)
- if utils.FileExists(d) {
- return d
- }
-
- if r, _ := filepath.Rel(dir, AppPath); r == "." {
- return d
- }
- // Parent dir.
- dir = filepath.Dir(dir)
- }
-}
diff --git a/plugins/apiauth/apiauth.go b/plugins/apiauth/apiauth.go
deleted file mode 100644
index 10e25f3f..00000000
--- a/plugins/apiauth/apiauth.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package apiauth provides handlers to enable apiauth support.
-//
-// Simple Usage:
-// import(
-// "github.com/astaxie/beego"
-// "github.com/astaxie/beego/plugins/apiauth"
-// )
-//
-// func main(){
-// // apiauth every request
-// beego.InsertFilter("*", beego.BeforeRouter,apiauth.APIBaiscAuth("appid","appkey"))
-// beego.Run()
-// }
-//
-// Advanced Usage:
-//
-// func getAppSecret(appid string) string {
-// // get appsecret by appid
-// // maybe store in configure, maybe in database
-// }
-//
-// beego.InsertFilter("*", beego.BeforeRouter,apiauth.APISecretAuth(getAppSecret, 360))
-//
-// Information:
-//
-// In the request user should include these params in the query
-//
-// 1. appid
-//
-// appid is assigned to the application
-//
-// 2. signature
-//
-// get the signature use apiauth.Signature()
-//
-// when you send to server remember use url.QueryEscape()
-//
-// 3. timestamp:
-//
-// send the request time, the format is yyyy-mm-dd HH:ii:ss
-//
-package apiauth
-
-import (
- "bytes"
- "crypto/hmac"
- "crypto/sha256"
- "encoding/base64"
- "fmt"
- "net/url"
- "sort"
- "time"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
-)
-
-// AppIDToAppSecret is used to get appsecret throw appid
-type AppIDToAppSecret func(string) string
-
-// APIBasicAuth use the basic appid/appkey as the AppIdToAppSecret
-func APIBasicAuth(appid, appkey string) beego.FilterFunc {
- ft := func(aid string) string {
- if aid == appid {
- return appkey
- }
- return ""
- }
- return APISecretAuth(ft, 300)
-}
-
-// APIBaiscAuth calls APIBasicAuth for previous callers
-func APIBaiscAuth(appid, appkey string) beego.FilterFunc {
- return APIBasicAuth(appid, appkey)
-}
-
-// APISecretAuth use AppIdToAppSecret verify and
-func APISecretAuth(f AppIDToAppSecret, timeout int) beego.FilterFunc {
- return func(ctx *context.Context) {
- if ctx.Input.Query("appid") == "" {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("miss query param: appid")
- return
- }
- appsecret := f(ctx.Input.Query("appid"))
- if appsecret == "" {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("not exist this appid")
- return
- }
- if ctx.Input.Query("signature") == "" {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("miss query param: signature")
- return
- }
- if ctx.Input.Query("timestamp") == "" {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("miss query param: timestamp")
- return
- }
- u, err := time.Parse("2006-01-02 15:04:05", ctx.Input.Query("timestamp"))
- if err != nil {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("timestamp format is error, should 2006-01-02 15:04:05")
- return
- }
- t := time.Now()
- if t.Sub(u).Seconds() > float64(timeout) {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("timeout! the request time is long ago, please try again")
- return
- }
- if ctx.Input.Query("signature") !=
- Signature(appsecret, ctx.Input.Method(), ctx.Request.Form, ctx.Input.URL()) {
- ctx.ResponseWriter.WriteHeader(403)
- ctx.WriteString("auth failed")
- }
- }
-}
-
-// Signature used to generate signature with the appsecret/method/params/RequestURI
-func Signature(appsecret, method string, params url.Values, RequestURL string) (result string) {
- var b bytes.Buffer
- keys := make([]string, len(params))
- pa := make(map[string]string)
- for k, v := range params {
- pa[k] = v[0]
- keys = append(keys, k)
- }
-
- sort.Strings(keys)
-
- for _, key := range keys {
- if key == "signature" {
- continue
- }
-
- val := pa[key]
- if key != "" && val != "" {
- b.WriteString(key)
- b.WriteString(val)
- }
- }
-
- stringToSign := fmt.Sprintf("%v\n%v\n%v\n", method, b.String(), RequestURL)
-
- sha256 := sha256.New
- hash := hmac.New(sha256, []byte(appsecret))
- hash.Write([]byte(stringToSign))
- return base64.StdEncoding.EncodeToString(hash.Sum(nil))
-}
diff --git a/plugins/apiauth/apiauth_test.go b/plugins/apiauth/apiauth_test.go
deleted file mode 100644
index 1f56cb0f..00000000
--- a/plugins/apiauth/apiauth_test.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package apiauth
-
-import (
- "net/url"
- "testing"
-)
-
-func TestSignature(t *testing.T) {
- appsecret := "beego secret"
- method := "GET"
- RequestURL := "http://localhost/test/url"
- params := make(url.Values)
- params.Add("arg1", "hello")
- params.Add("arg2", "beego")
-
- signature := "mFdpvLh48ca4mDVEItE9++AKKQ/IVca7O/ZyyB8hR58="
- if Signature(appsecret, method, params, RequestURL) != signature {
- t.Error("Signature error")
- }
-}
diff --git a/plugins/auth/basic.go b/plugins/auth/basic.go
deleted file mode 100644
index c478044a..00000000
--- a/plugins/auth/basic.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package auth provides handlers to enable basic auth support.
-// Simple Usage:
-// import(
-// "github.com/astaxie/beego"
-// "github.com/astaxie/beego/plugins/auth"
-// )
-//
-// func main(){
-// // authenticate every request
-// beego.InsertFilter("*", beego.BeforeRouter,auth.Basic("username","secretpassword"))
-// beego.Run()
-// }
-//
-//
-// Advanced Usage:
-//
-// func SecretAuth(username, password string) bool {
-// return username == "astaxie" && password == "helloBeego"
-// }
-// authPlugin := auth.NewBasicAuthenticator(SecretAuth, "Authorization Required")
-// beego.InsertFilter("*", beego.BeforeRouter,authPlugin)
-package auth
-
-import (
- "encoding/base64"
- "net/http"
- "strings"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
-)
-
-var defaultRealm = "Authorization Required"
-
-// Basic is the http basic auth
-func Basic(username string, password string) beego.FilterFunc {
- secrets := func(user, pass string) bool {
- return user == username && pass == password
- }
- return NewBasicAuthenticator(secrets, defaultRealm)
-}
-
-// NewBasicAuthenticator return the BasicAuth
-func NewBasicAuthenticator(secrets SecretProvider, Realm string) beego.FilterFunc {
- return func(ctx *context.Context) {
- a := &BasicAuth{Secrets: secrets, Realm: Realm}
- if username := a.CheckAuth(ctx.Request); username == "" {
- a.RequireAuth(ctx.ResponseWriter, ctx.Request)
- }
- }
-}
-
-// SecretProvider is the SecretProvider function
-type SecretProvider func(user, pass string) bool
-
-// BasicAuth store the SecretProvider and Realm
-type BasicAuth struct {
- Secrets SecretProvider
- Realm string
-}
-
-// CheckAuth Checks the username/password combination from the request. Returns
-// either an empty string (authentication failed) or the name of the
-// authenticated user.
-// Supports MD5 and SHA1 password entries
-func (a *BasicAuth) CheckAuth(r *http.Request) string {
- s := strings.SplitN(r.Header.Get("Authorization"), " ", 2)
- if len(s) != 2 || s[0] != "Basic" {
- return ""
- }
-
- b, err := base64.StdEncoding.DecodeString(s[1])
- if err != nil {
- return ""
- }
- pair := strings.SplitN(string(b), ":", 2)
- if len(pair) != 2 {
- return ""
- }
-
- if a.Secrets(pair[0], pair[1]) {
- return pair[0]
- }
- return ""
-}
-
-// RequireAuth http.Handler for BasicAuth which initiates the authentication process
-// (or requires reauthentication).
-func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("WWW-Authenticate", `Basic realm="`+a.Realm+`"`)
- w.WriteHeader(401)
- w.Write([]byte("401 Unauthorized\n"))
-}
diff --git a/plugins/authz/authz.go b/plugins/authz/authz.go
deleted file mode 100644
index 9dc0db76..00000000
--- a/plugins/authz/authz.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package authz provides handlers to enable ACL, RBAC, ABAC authorization support.
-// Simple Usage:
-// import(
-// "github.com/astaxie/beego"
-// "github.com/astaxie/beego/plugins/authz"
-// "github.com/casbin/casbin"
-// )
-//
-// func main(){
-// // mediate the access for every request
-// beego.InsertFilter("*", beego.BeforeRouter, authz.NewAuthorizer(casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")))
-// beego.Run()
-// }
-//
-//
-// Advanced Usage:
-//
-// func main(){
-// e := casbin.NewEnforcer("authz_model.conf", "")
-// e.AddRoleForUser("alice", "admin")
-// e.AddPolicy(...)
-//
-// beego.InsertFilter("*", beego.BeforeRouter, authz.NewAuthorizer(e))
-// beego.Run()
-// }
-package authz
-
-import (
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- "github.com/casbin/casbin"
- "net/http"
-)
-
-// NewAuthorizer returns the authorizer.
-// Use a casbin enforcer as input
-func NewAuthorizer(e *casbin.Enforcer) beego.FilterFunc {
- return func(ctx *context.Context) {
- a := &BasicAuthorizer{enforcer: e}
-
- if !a.CheckPermission(ctx.Request) {
- a.RequirePermission(ctx.ResponseWriter)
- }
- }
-}
-
-// BasicAuthorizer stores the casbin handler
-type BasicAuthorizer struct {
- enforcer *casbin.Enforcer
-}
-
-// GetUserName gets the user name from the request.
-// Currently, only HTTP basic authentication is supported
-func (a *BasicAuthorizer) GetUserName(r *http.Request) string {
- username, _, _ := r.BasicAuth()
- return username
-}
-
-// CheckPermission checks the user/method/path combination from the request.
-// Returns true (permission granted) or false (permission forbidden)
-func (a *BasicAuthorizer) CheckPermission(r *http.Request) bool {
- user := a.GetUserName(r)
- method := r.Method
- path := r.URL.Path
- return a.enforcer.Enforce(user, path, method)
-}
-
-// RequirePermission returns the 403 Forbidden to the client
-func (a *BasicAuthorizer) RequirePermission(w http.ResponseWriter) {
- w.WriteHeader(403)
- w.Write([]byte("403 Forbidden\n"))
-}
diff --git a/plugins/authz/authz_model.conf b/plugins/authz/authz_model.conf
deleted file mode 100644
index d1b3dbd7..00000000
--- a/plugins/authz/authz_model.conf
+++ /dev/null
@@ -1,14 +0,0 @@
-[request_definition]
-r = sub, obj, act
-
-[policy_definition]
-p = sub, obj, act
-
-[role_definition]
-g = _, _
-
-[policy_effect]
-e = some(where (p.eft == allow))
-
-[matchers]
-m = g(r.sub, p.sub) && keyMatch(r.obj, p.obj) && (r.act == p.act || p.act == "*")
\ No newline at end of file
diff --git a/plugins/authz/authz_policy.csv b/plugins/authz/authz_policy.csv
deleted file mode 100644
index c062dd3e..00000000
--- a/plugins/authz/authz_policy.csv
+++ /dev/null
@@ -1,7 +0,0 @@
-p, alice, /dataset1/*, GET
-p, alice, /dataset1/resource1, POST
-p, bob, /dataset2/resource1, *
-p, bob, /dataset2/resource2, GET
-p, bob, /dataset2/folder1/*, POST
-p, dataset1_admin, /dataset1/*, *
-g, cathy, dataset1_admin
\ No newline at end of file
diff --git a/plugins/authz/authz_test.go b/plugins/authz/authz_test.go
deleted file mode 100644
index 49aed84c..00000000
--- a/plugins/authz/authz_test.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package authz
-
-import (
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
- "github.com/astaxie/beego/plugins/auth"
- "github.com/casbin/casbin"
- "net/http"
- "net/http/httptest"
- "testing"
-)
-
-func testRequest(t *testing.T, handler *beego.ControllerRegister, user string, path string, method string, code int) {
- r, _ := http.NewRequest(method, path, nil)
- r.SetBasicAuth(user, "123")
- w := httptest.NewRecorder()
- handler.ServeHTTP(w, r)
-
- if w.Code != code {
- t.Errorf("%s, %s, %s: %d, supposed to be %d", user, path, method, w.Code, code)
- }
-}
-
-func TestBasic(t *testing.T) {
- handler := beego.NewControllerRegister()
-
- handler.InsertFilter("*", beego.BeforeRouter, auth.Basic("alice", "123"))
- handler.InsertFilter("*", beego.BeforeRouter, NewAuthorizer(casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")))
-
- handler.Any("*", func(ctx *context.Context) {
- ctx.Output.SetStatus(200)
- })
-
- testRequest(t, handler, "alice", "/dataset1/resource1", "GET", 200)
- testRequest(t, handler, "alice", "/dataset1/resource1", "POST", 200)
- testRequest(t, handler, "alice", "/dataset1/resource2", "GET", 200)
- testRequest(t, handler, "alice", "/dataset1/resource2", "POST", 403)
-}
-
-func TestPathWildcard(t *testing.T) {
- handler := beego.NewControllerRegister()
-
- handler.InsertFilter("*", beego.BeforeRouter, auth.Basic("bob", "123"))
- handler.InsertFilter("*", beego.BeforeRouter, NewAuthorizer(casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")))
-
- handler.Any("*", func(ctx *context.Context) {
- ctx.Output.SetStatus(200)
- })
-
- testRequest(t, handler, "bob", "/dataset2/resource1", "GET", 200)
- testRequest(t, handler, "bob", "/dataset2/resource1", "POST", 200)
- testRequest(t, handler, "bob", "/dataset2/resource1", "DELETE", 200)
- testRequest(t, handler, "bob", "/dataset2/resource2", "GET", 200)
- testRequest(t, handler, "bob", "/dataset2/resource2", "POST", 403)
- testRequest(t, handler, "bob", "/dataset2/resource2", "DELETE", 403)
-
- testRequest(t, handler, "bob", "/dataset2/folder1/item1", "GET", 403)
- testRequest(t, handler, "bob", "/dataset2/folder1/item1", "POST", 200)
- testRequest(t, handler, "bob", "/dataset2/folder1/item1", "DELETE", 403)
- testRequest(t, handler, "bob", "/dataset2/folder1/item2", "GET", 403)
- testRequest(t, handler, "bob", "/dataset2/folder1/item2", "POST", 200)
- testRequest(t, handler, "bob", "/dataset2/folder1/item2", "DELETE", 403)
-}
-
-func TestRBAC(t *testing.T) {
- handler := beego.NewControllerRegister()
-
- handler.InsertFilter("*", beego.BeforeRouter, auth.Basic("cathy", "123"))
- e := casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")
- handler.InsertFilter("*", beego.BeforeRouter, NewAuthorizer(e))
-
- handler.Any("*", func(ctx *context.Context) {
- ctx.Output.SetStatus(200)
- })
-
- // cathy can access all /dataset1/* resources via all methods because it has the dataset1_admin role.
- testRequest(t, handler, "cathy", "/dataset1/item", "GET", 200)
- testRequest(t, handler, "cathy", "/dataset1/item", "POST", 200)
- testRequest(t, handler, "cathy", "/dataset1/item", "DELETE", 200)
- testRequest(t, handler, "cathy", "/dataset2/item", "GET", 403)
- testRequest(t, handler, "cathy", "/dataset2/item", "POST", 403)
- testRequest(t, handler, "cathy", "/dataset2/item", "DELETE", 403)
-
- // delete all roles on user cathy, so cathy cannot access any resources now.
- e.DeleteRolesForUser("cathy")
-
- testRequest(t, handler, "cathy", "/dataset1/item", "GET", 403)
- testRequest(t, handler, "cathy", "/dataset1/item", "POST", 403)
- testRequest(t, handler, "cathy", "/dataset1/item", "DELETE", 403)
- testRequest(t, handler, "cathy", "/dataset2/item", "GET", 403)
- testRequest(t, handler, "cathy", "/dataset2/item", "POST", 403)
- testRequest(t, handler, "cathy", "/dataset2/item", "DELETE", 403)
-}
diff --git a/plugins/cors/cors.go b/plugins/cors/cors.go
deleted file mode 100644
index 45c327ab..00000000
--- a/plugins/cors/cors.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cors provides handlers to enable CORS support.
-// Usage
-// import (
-// "github.com/astaxie/beego"
-// "github.com/astaxie/beego/plugins/cors"
-// )
-//
-// func main() {
-// // CORS for https://foo.* origins, allowing:
-// // - PUT and PATCH methods
-// // - Origin header
-// // - Credentials share
-// beego.InsertFilter("*", beego.BeforeRouter, cors.Allow(&cors.Options{
-// AllowOrigins: []string{"https://*.foo.com"},
-// AllowMethods: []string{"PUT", "PATCH"},
-// AllowHeaders: []string{"Origin"},
-// ExposeHeaders: []string{"Content-Length"},
-// AllowCredentials: true,
-// }))
-// beego.Run()
-// }
-package cors
-
-import (
- "net/http"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
-)
-
-const (
- headerAllowOrigin = "Access-Control-Allow-Origin"
- headerAllowCredentials = "Access-Control-Allow-Credentials"
- headerAllowHeaders = "Access-Control-Allow-Headers"
- headerAllowMethods = "Access-Control-Allow-Methods"
- headerExposeHeaders = "Access-Control-Expose-Headers"
- headerMaxAge = "Access-Control-Max-Age"
-
- headerOrigin = "Origin"
- headerRequestMethod = "Access-Control-Request-Method"
- headerRequestHeaders = "Access-Control-Request-Headers"
-)
-
-var (
- defaultAllowHeaders = []string{"Origin", "Accept", "Content-Type", "Authorization"}
- // Regex patterns are generated from AllowOrigins. These are used and generated internally.
- allowOriginPatterns = []string{}
-)
-
-// Options represents Access Control options.
-type Options struct {
- // If set, all origins are allowed.
- AllowAllOrigins bool
- // A list of allowed origins. Wild cards and FQDNs are supported.
- AllowOrigins []string
- // If set, allows to share auth credentials such as cookies.
- AllowCredentials bool
- // A list of allowed HTTP methods.
- AllowMethods []string
- // A list of allowed HTTP headers.
- AllowHeaders []string
- // A list of exposed HTTP headers.
- ExposeHeaders []string
- // Max age of the CORS headers.
- MaxAge time.Duration
-}
-
-// Header converts options into CORS headers.
-func (o *Options) Header(origin string) (headers map[string]string) {
- headers = make(map[string]string)
- // if origin is not allowed, don't extend the headers
- // with CORS headers.
- if !o.AllowAllOrigins && !o.IsOriginAllowed(origin) {
- return
- }
-
- // add allow origin
- if o.AllowAllOrigins {
- headers[headerAllowOrigin] = "*"
- } else {
- headers[headerAllowOrigin] = origin
- }
-
- // add allow credentials
- headers[headerAllowCredentials] = strconv.FormatBool(o.AllowCredentials)
-
- // add allow methods
- if len(o.AllowMethods) > 0 {
- headers[headerAllowMethods] = strings.Join(o.AllowMethods, ",")
- }
-
- // add allow headers
- if len(o.AllowHeaders) > 0 {
- headers[headerAllowHeaders] = strings.Join(o.AllowHeaders, ",")
- }
-
- // add exposed header
- if len(o.ExposeHeaders) > 0 {
- headers[headerExposeHeaders] = strings.Join(o.ExposeHeaders, ",")
- }
- // add a max age header
- if o.MaxAge > time.Duration(0) {
- headers[headerMaxAge] = strconv.FormatInt(int64(o.MaxAge/time.Second), 10)
- }
- return
-}
-
-// PreflightHeader converts options into CORS headers for a preflight response.
-func (o *Options) PreflightHeader(origin, rMethod, rHeaders string) (headers map[string]string) {
- headers = make(map[string]string)
- if !o.AllowAllOrigins && !o.IsOriginAllowed(origin) {
- return
- }
- // verify if requested method is allowed
- for _, method := range o.AllowMethods {
- if method == rMethod {
- headers[headerAllowMethods] = strings.Join(o.AllowMethods, ",")
- break
- }
- }
-
- // verify if requested headers are allowed
- var allowed []string
- for _, rHeader := range strings.Split(rHeaders, ",") {
- rHeader = strings.TrimSpace(rHeader)
- lookupLoop:
- for _, allowedHeader := range o.AllowHeaders {
- if strings.ToLower(rHeader) == strings.ToLower(allowedHeader) {
- allowed = append(allowed, rHeader)
- break lookupLoop
- }
- }
- }
-
- headers[headerAllowCredentials] = strconv.FormatBool(o.AllowCredentials)
- // add allow origin
- if o.AllowAllOrigins {
- headers[headerAllowOrigin] = "*"
- } else {
- headers[headerAllowOrigin] = origin
- }
-
- // add allowed headers
- if len(allowed) > 0 {
- headers[headerAllowHeaders] = strings.Join(allowed, ",")
- }
-
- // add exposed headers
- if len(o.ExposeHeaders) > 0 {
- headers[headerExposeHeaders] = strings.Join(o.ExposeHeaders, ",")
- }
- // add a max age header
- if o.MaxAge > time.Duration(0) {
- headers[headerMaxAge] = strconv.FormatInt(int64(o.MaxAge/time.Second), 10)
- }
- return
-}
-
-// IsOriginAllowed looks up if the origin matches one of the patterns
-// generated from Options.AllowOrigins patterns.
-func (o *Options) IsOriginAllowed(origin string) (allowed bool) {
- for _, pattern := range allowOriginPatterns {
- allowed, _ = regexp.MatchString(pattern, origin)
- if allowed {
- return
- }
- }
- return
-}
-
-// Allow enables CORS for requests those match the provided options.
-func Allow(opts *Options) beego.FilterFunc {
- // Allow default headers if nothing is specified.
- if len(opts.AllowHeaders) == 0 {
- opts.AllowHeaders = defaultAllowHeaders
- }
-
- for _, origin := range opts.AllowOrigins {
- pattern := regexp.QuoteMeta(origin)
- pattern = strings.Replace(pattern, "\\*", ".*", -1)
- pattern = strings.Replace(pattern, "\\?", ".", -1)
- allowOriginPatterns = append(allowOriginPatterns, "^"+pattern+"$")
- }
-
- return func(ctx *context.Context) {
- var (
- origin = ctx.Input.Header(headerOrigin)
- requestedMethod = ctx.Input.Header(headerRequestMethod)
- requestedHeaders = ctx.Input.Header(headerRequestHeaders)
- // additional headers to be added
- // to the response.
- headers map[string]string
- )
-
- if ctx.Input.Method() == "OPTIONS" &&
- (requestedMethod != "" || requestedHeaders != "") {
- headers = opts.PreflightHeader(origin, requestedMethod, requestedHeaders)
- for key, value := range headers {
- ctx.Output.Header(key, value)
- }
- ctx.ResponseWriter.WriteHeader(http.StatusOK)
- return
- }
- headers = opts.Header(origin)
-
- for key, value := range headers {
- ctx.Output.Header(key, value)
- }
- }
-}
diff --git a/plugins/cors/cors_test.go b/plugins/cors/cors_test.go
deleted file mode 100644
index 34039143..00000000
--- a/plugins/cors/cors_test.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cors
-
-import (
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
- "time"
-
- "github.com/astaxie/beego"
- "github.com/astaxie/beego/context"
-)
-
-// HTTPHeaderGuardRecorder is httptest.ResponseRecorder with own http.Header
-type HTTPHeaderGuardRecorder struct {
- *httptest.ResponseRecorder
- savedHeaderMap http.Header
-}
-
-// NewRecorder return HttpHeaderGuardRecorder
-func NewRecorder() *HTTPHeaderGuardRecorder {
- return &HTTPHeaderGuardRecorder{httptest.NewRecorder(), nil}
-}
-
-func (gr *HTTPHeaderGuardRecorder) WriteHeader(code int) {
- gr.ResponseRecorder.WriteHeader(code)
- gr.savedHeaderMap = gr.ResponseRecorder.Header()
-}
-
-func (gr *HTTPHeaderGuardRecorder) Header() http.Header {
- if gr.savedHeaderMap != nil {
- // headers were written. clone so we don't get updates
- clone := make(http.Header)
- for k, v := range gr.savedHeaderMap {
- clone[k] = v
- }
- return clone
- }
- return gr.ResponseRecorder.Header()
-}
-
-func Test_AllowAll(t *testing.T) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowAllOrigins: true,
- }))
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
- r, _ := http.NewRequest("PUT", "/foo", nil)
- handler.ServeHTTP(recorder, r)
-
- if recorder.HeaderMap.Get(headerAllowOrigin) != "*" {
- t.Errorf("Allow-Origin header should be *")
- }
-}
-
-func Test_AllowRegexMatch(t *testing.T) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowOrigins: []string{"https://aaa.com", "https://*.foo.com"},
- }))
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
- origin := "https://bar.foo.com"
- r, _ := http.NewRequest("PUT", "/foo", nil)
- r.Header.Add("Origin", origin)
- handler.ServeHTTP(recorder, r)
-
- headerValue := recorder.HeaderMap.Get(headerAllowOrigin)
- if headerValue != origin {
- t.Errorf("Allow-Origin header should be %v, found %v", origin, headerValue)
- }
-}
-
-func Test_AllowRegexNoMatch(t *testing.T) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowOrigins: []string{"https://*.foo.com"},
- }))
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
- origin := "https://ww.foo.com.evil.com"
- r, _ := http.NewRequest("PUT", "/foo", nil)
- r.Header.Add("Origin", origin)
- handler.ServeHTTP(recorder, r)
-
- headerValue := recorder.HeaderMap.Get(headerAllowOrigin)
- if headerValue != "" {
- t.Errorf("Allow-Origin header should not exist, found %v", headerValue)
- }
-}
-
-func Test_OtherHeaders(t *testing.T) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowAllOrigins: true,
- AllowCredentials: true,
- AllowMethods: []string{"PATCH", "GET"},
- AllowHeaders: []string{"Origin", "X-whatever"},
- ExposeHeaders: []string{"Content-Length", "Hello"},
- MaxAge: 5 * time.Minute,
- }))
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
- r, _ := http.NewRequest("PUT", "/foo", nil)
- handler.ServeHTTP(recorder, r)
-
- credentialsVal := recorder.HeaderMap.Get(headerAllowCredentials)
- methodsVal := recorder.HeaderMap.Get(headerAllowMethods)
- headersVal := recorder.HeaderMap.Get(headerAllowHeaders)
- exposedHeadersVal := recorder.HeaderMap.Get(headerExposeHeaders)
- maxAgeVal := recorder.HeaderMap.Get(headerMaxAge)
-
- if credentialsVal != "true" {
- t.Errorf("Allow-Credentials is expected to be true, found %v", credentialsVal)
- }
-
- if methodsVal != "PATCH,GET" {
- t.Errorf("Allow-Methods is expected to be PATCH,GET; found %v", methodsVal)
- }
-
- if headersVal != "Origin,X-whatever" {
- t.Errorf("Allow-Headers is expected to be Origin,X-whatever; found %v", headersVal)
- }
-
- if exposedHeadersVal != "Content-Length,Hello" {
- t.Errorf("Expose-Headers are expected to be Content-Length,Hello. Found %v", exposedHeadersVal)
- }
-
- if maxAgeVal != "300" {
- t.Errorf("Max-Age is expected to be 300, found %v", maxAgeVal)
- }
-}
-
-func Test_DefaultAllowHeaders(t *testing.T) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowAllOrigins: true,
- }))
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
-
- r, _ := http.NewRequest("PUT", "/foo", nil)
- handler.ServeHTTP(recorder, r)
-
- headersVal := recorder.HeaderMap.Get(headerAllowHeaders)
- if headersVal != "Origin,Accept,Content-Type,Authorization" {
- t.Errorf("Allow-Headers is expected to be Origin,Accept,Content-Type,Authorization; found %v", headersVal)
- }
-}
-
-func Test_Preflight(t *testing.T) {
- recorder := NewRecorder()
- handler := beego.NewControllerRegister()
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowAllOrigins: true,
- AllowMethods: []string{"PUT", "PATCH"},
- AllowHeaders: []string{"Origin", "X-whatever", "X-CaseSensitive"},
- }))
-
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(200)
- })
-
- r, _ := http.NewRequest("OPTIONS", "/foo", nil)
- r.Header.Add(headerRequestMethod, "PUT")
- r.Header.Add(headerRequestHeaders, "X-whatever, x-casesensitive")
- handler.ServeHTTP(recorder, r)
-
- headers := recorder.Header()
- methodsVal := headers.Get(headerAllowMethods)
- headersVal := headers.Get(headerAllowHeaders)
- originVal := headers.Get(headerAllowOrigin)
-
- if methodsVal != "PUT,PATCH" {
- t.Errorf("Allow-Methods is expected to be PUT,PATCH, found %v", methodsVal)
- }
-
- if !strings.Contains(headersVal, "X-whatever") {
- t.Errorf("Allow-Headers is expected to contain X-whatever, found %v", headersVal)
- }
-
- if !strings.Contains(headersVal, "x-casesensitive") {
- t.Errorf("Allow-Headers is expected to contain x-casesensitive, found %v", headersVal)
- }
-
- if originVal != "*" {
- t.Errorf("Allow-Origin is expected to be *, found %v", originVal)
- }
-
- if recorder.Code != http.StatusOK {
- t.Errorf("Status code is expected to be 200, found %d", recorder.Code)
- }
-}
-
-func Benchmark_WithoutCORS(b *testing.B) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- beego.BConfig.RunMode = beego.PROD
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
- b.ResetTimer()
- r, _ := http.NewRequest("PUT", "/foo", nil)
- for i := 0; i < b.N; i++ {
- handler.ServeHTTP(recorder, r)
- }
-}
-
-func Benchmark_WithCORS(b *testing.B) {
- recorder := httptest.NewRecorder()
- handler := beego.NewControllerRegister()
- beego.BConfig.RunMode = beego.PROD
- handler.InsertFilter("*", beego.BeforeRouter, Allow(&Options{
- AllowAllOrigins: true,
- AllowCredentials: true,
- AllowMethods: []string{"PATCH", "GET"},
- AllowHeaders: []string{"Origin", "X-whatever"},
- MaxAge: 5 * time.Minute,
- }))
- handler.Any("/foo", func(ctx *context.Context) {
- ctx.Output.SetStatus(500)
- })
- b.ResetTimer()
- r, _ := http.NewRequest("PUT", "/foo", nil)
- for i := 0; i < b.N; i++ {
- handler.ServeHTTP(recorder, r)
- }
-}
diff --git a/policy.go b/policy.go
deleted file mode 100644
index 358a0539..00000000
--- a/policy.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2016 beego authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "strings"
-
- "github.com/astaxie/beego/context"
-)
-
-// PolicyFunc defines a policy function which is invoked before the controller handler is executed.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type PolicyFunc func(*context.Context)
-
-// FindPolicy Find Router info for URL
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) FindPolicy(cont *context.Context) []PolicyFunc {
- var urlPath = cont.Input.URL()
- if !BConfig.RouterCaseSensitive {
- urlPath = strings.ToLower(urlPath)
- }
- httpMethod := cont.Input.Method()
- isWildcard := false
- // Find policy for current method
- t, ok := p.policies[httpMethod]
- // If not found - find policy for whole controller
- if !ok {
- t, ok = p.policies["*"]
- isWildcard = true
- }
- if ok {
- runObjects := t.Match(urlPath, cont)
- if r, ok := runObjects.([]PolicyFunc); ok {
- return r
- } else if !isWildcard {
- // If no policies found and we checked not for "*" method - try to find it
- t, ok = p.policies["*"]
- if ok {
- runObjects = t.Match(urlPath, cont)
- if r, ok = runObjects.([]PolicyFunc); ok {
- return r
- }
- }
- }
- }
- return nil
-}
-
-func (p *ControllerRegister) addToPolicy(method, pattern string, r ...PolicyFunc) {
- method = strings.ToUpper(method)
- p.enablePolicy = true
- if !BConfig.RouterCaseSensitive {
- pattern = strings.ToLower(pattern)
- }
- if t, ok := p.policies[method]; ok {
- t.AddRouter(pattern, r)
- } else {
- t := NewTree()
- t.AddRouter(pattern, r)
- p.policies[method] = t
- }
-}
-
-// Policy Register new policy in beego
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func Policy(pattern, method string, policy ...PolicyFunc) {
- BeeApp.Handlers.addToPolicy(method, pattern, policy...)
-}
-
-// Find policies and execute if were found
-func (p *ControllerRegister) execPolicy(cont *context.Context, urlPath string) (started bool) {
- if !p.enablePolicy {
- return false
- }
- // Find Policy for method
- policyList := p.FindPolicy(cont)
- if len(policyList) > 0 {
- // Run policies
- for _, runPolicy := range policyList {
- runPolicy(cont)
- if cont.ResponseWriter.Started {
- return true
- }
- }
- return false
- }
- return false
-}
diff --git a/router.go b/router.go
deleted file mode 100644
index 1be495ab..00000000
--- a/router.go
+++ /dev/null
@@ -1,1085 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "errors"
- "fmt"
- "net/http"
- "os"
- "path"
- "path/filepath"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "time"
-
- beecontext "github.com/astaxie/beego/context"
- "github.com/astaxie/beego/context/param"
- "github.com/astaxie/beego/logs"
- "github.com/astaxie/beego/toolbox"
- "github.com/astaxie/beego/utils"
-)
-
-// default filter execution points
-const (
- BeforeStatic = iota
- BeforeRouter
- BeforeExec
- AfterExec
- FinishRouter
-)
-
-const (
- routerTypeBeego = iota
- routerTypeRESTFul
- routerTypeHandler
-)
-
-var (
- // HTTPMETHOD list the supported http methods.
- // Deprecated: using pkg/, we will delete this in v2.1.0
- HTTPMETHOD = map[string]bool{
- "GET": true,
- "POST": true,
- "PUT": true,
- "DELETE": true,
- "PATCH": true,
- "OPTIONS": true,
- "HEAD": true,
- "TRACE": true,
- "CONNECT": true,
- "MKCOL": true,
- "COPY": true,
- "MOVE": true,
- "PROPFIND": true,
- "PROPPATCH": true,
- "LOCK": true,
- "UNLOCK": true,
- }
- // these beego.Controller's methods shouldn't reflect to AutoRouter
- exceptMethod = []string{"Init", "Prepare", "Finish", "Render", "RenderString",
- "RenderBytes", "Redirect", "Abort", "StopRun", "UrlFor", "ServeJSON", "ServeJSONP",
- "ServeYAML", "ServeXML", "Input", "ParseForm", "GetString", "GetStrings", "GetInt", "GetBool",
- "GetFloat", "GetFile", "SaveToFile", "StartSession", "SetSession", "GetSession",
- "DelSession", "SessionRegenerateID", "DestroySession", "IsAjax", "GetSecureCookie",
- "SetSecureCookie", "XsrfToken", "CheckXsrfCookie", "XsrfFormHtml",
- "GetControllerAndAction", "ServeFormatted"}
-
- urlPlaceholder = "{{placeholder}}"
- // DefaultAccessLogFilter will skip the accesslog if return true
- // Deprecated: using pkg/, we will delete this in v2.1.0
- DefaultAccessLogFilter FilterHandler = &logFilter{}
-)
-
-// FilterHandler is an interface for
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type FilterHandler interface {
- Filter(*beecontext.Context) bool
-}
-
-// default log filter static file will not show
-type logFilter struct {
-}
-
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (l *logFilter) Filter(ctx *beecontext.Context) bool {
- requestPath := path.Clean(ctx.Request.URL.Path)
- if requestPath == "/favicon.ico" || requestPath == "/robots.txt" {
- return true
- }
- for prefix := range BConfig.WebConfig.StaticDir {
- if strings.HasPrefix(requestPath, prefix) {
- return true
- }
- }
- return false
-}
-
-// ExceptMethodAppend to append a slice's value into "exceptMethod", for controller's methods shouldn't reflect to AutoRouter
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func ExceptMethodAppend(action string) {
- exceptMethod = append(exceptMethod, action)
-}
-
-// ControllerInfo holds information about the controller.
-type ControllerInfo struct {
- pattern string
- controllerType reflect.Type
- methods map[string]string
- handler http.Handler
- runFunction FilterFunc
- routerType int
- initialize func() ControllerInterface
- methodParams []*param.MethodParam
-}
-
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (c *ControllerInfo) GetPattern() string {
- return c.pattern
-}
-
-// ControllerRegister containers registered router rules, controller handlers and filters.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-type ControllerRegister struct {
- routers map[string]*Tree
- enablePolicy bool
- policies map[string]*Tree
- enableFilter bool
- filters [FinishRouter + 1][]*FilterRouter
- pool sync.Pool
-}
-
-// NewControllerRegister returns a new ControllerRegister.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func NewControllerRegister() *ControllerRegister {
- return &ControllerRegister{
- routers: make(map[string]*Tree),
- policies: make(map[string]*Tree),
- pool: sync.Pool{
- New: func() interface{} {
- return beecontext.NewContext()
- },
- },
- }
-}
-
-// Add controller handler and pattern rules to ControllerRegister.
-// usage:
-// default methods is the same name as method
-// Add("/user",&UserController{})
-// Add("/api/list",&RestController{},"*:ListFood")
-// Add("/api/create",&RestController{},"post:CreateFood")
-// Add("/api/update",&RestController{},"put:UpdateFood")
-// Add("/api/delete",&RestController{},"delete:DeleteFood")
-// Add("/api",&RestController{},"get,post:ApiFunc"
-// Add("/simple",&SimpleController{},"get:GetFunc;post:PostFunc")
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingMethods ...string) {
- p.addWithMethodParams(pattern, c, nil, mappingMethods...)
-}
-
-func (p *ControllerRegister) addWithMethodParams(pattern string, c ControllerInterface, methodParams []*param.MethodParam, mappingMethods ...string) {
- reflectVal := reflect.ValueOf(c)
- t := reflect.Indirect(reflectVal).Type()
- methods := make(map[string]string)
- if len(mappingMethods) > 0 {
- semi := strings.Split(mappingMethods[0], ";")
- for _, v := range semi {
- colon := strings.Split(v, ":")
- if len(colon) != 2 {
- panic("method mapping format is invalid")
- }
- comma := strings.Split(colon[0], ",")
- for _, m := range comma {
- if m == "*" || HTTPMETHOD[strings.ToUpper(m)] {
- if val := reflectVal.MethodByName(colon[1]); val.IsValid() {
- methods[strings.ToUpper(m)] = colon[1]
- } else {
- panic("'" + colon[1] + "' method doesn't exist in the controller " + t.Name())
- }
- } else {
- panic(v + " is an invalid method mapping. Method doesn't exist " + m)
- }
- }
- }
- }
-
- route := &ControllerInfo{}
- route.pattern = pattern
- route.methods = methods
- route.routerType = routerTypeBeego
- route.controllerType = t
- route.initialize = func() ControllerInterface {
- vc := reflect.New(route.controllerType)
- execController, ok := vc.Interface().(ControllerInterface)
- if !ok {
- panic("controller is not ControllerInterface")
- }
-
- elemVal := reflect.ValueOf(c).Elem()
- elemType := reflect.TypeOf(c).Elem()
- execElem := reflect.ValueOf(execController).Elem()
-
- numOfFields := elemVal.NumField()
- for i := 0; i < numOfFields; i++ {
- fieldType := elemType.Field(i)
- elemField := execElem.FieldByName(fieldType.Name)
- if elemField.CanSet() {
- fieldVal := elemVal.Field(i)
- elemField.Set(fieldVal)
- }
- }
-
- return execController
- }
-
- route.methodParams = methodParams
- if len(methods) == 0 {
- for m := range HTTPMETHOD {
- p.addToRouter(m, pattern, route)
- }
- } else {
- for k := range methods {
- if k == "*" {
- for m := range HTTPMETHOD {
- p.addToRouter(m, pattern, route)
- }
- } else {
- p.addToRouter(k, pattern, route)
- }
- }
- }
-}
-
-func (p *ControllerRegister) addToRouter(method, pattern string, r *ControllerInfo) {
- if !BConfig.RouterCaseSensitive {
- pattern = strings.ToLower(pattern)
- }
- if t, ok := p.routers[method]; ok {
- t.AddRouter(pattern, r)
- } else {
- t := NewTree()
- t.AddRouter(pattern, r)
- p.routers[method] = t
- }
-}
-
-// Include only when the Runmode is dev will generate router file in the router/auto.go from the controller
-// Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{})
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Include(cList ...ControllerInterface) {
- if BConfig.RunMode == DEV {
- skip := make(map[string]bool, 10)
- wgopath := utils.GetGOPATHs()
- go111module := os.Getenv(`GO111MODULE`)
- for _, c := range cList {
- reflectVal := reflect.ValueOf(c)
- t := reflect.Indirect(reflectVal).Type()
- // for go modules
- if go111module == `on` {
- pkgpath := filepath.Join(WorkPath, "..", t.PkgPath())
- if utils.FileExists(pkgpath) {
- if pkgpath != "" {
- if _, ok := skip[pkgpath]; !ok {
- skip[pkgpath] = true
- parserPkg(pkgpath, t.PkgPath())
- }
- }
- }
- } else {
- if len(wgopath) == 0 {
- panic("you are in dev mode. So please set gopath")
- }
- pkgpath := ""
- for _, wg := range wgopath {
- wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath()))
- if utils.FileExists(wg) {
- pkgpath = wg
- break
- }
- }
- if pkgpath != "" {
- if _, ok := skip[pkgpath]; !ok {
- skip[pkgpath] = true
- parserPkg(pkgpath, t.PkgPath())
- }
- }
- }
- }
- }
- for _, c := range cList {
- reflectVal := reflect.ValueOf(c)
- t := reflect.Indirect(reflectVal).Type()
- key := t.PkgPath() + ":" + t.Name()
- if comm, ok := GlobalControllerRouter[key]; ok {
- for _, a := range comm {
- for _, f := range a.Filters {
- p.InsertFilter(f.Pattern, f.Pos, f.Filter, f.ReturnOnOutput, f.ResetParams)
- }
-
- p.addWithMethodParams(a.Router, c, a.MethodParams, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method)
- }
- }
- }
-}
-
-// GetContext returns a context from pool, so usually you should remember to call Reset function to clean the context
-// And don't forget to give back context to pool
-// example:
-// ctx := p.GetContext()
-// ctx.Reset(w, q)
-// defer p.GiveBackContext(ctx)
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) GetContext() *beecontext.Context {
- return p.pool.Get().(*beecontext.Context)
-}
-
-// GiveBackContext put the ctx into pool so that it could be reuse
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) GiveBackContext(ctx *beecontext.Context) {
- // clear input cached data
- ctx.Input.Clear()
- // clear output cached data
- ctx.Output.Clear()
- p.pool.Put(ctx)
-}
-
-// Get add get method
-// usage:
-// Get("/", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Get(pattern string, f FilterFunc) {
- p.AddMethod("get", pattern, f)
-}
-
-// Post add post method
-// usage:
-// Post("/api", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Post(pattern string, f FilterFunc) {
- p.AddMethod("post", pattern, f)
-}
-
-// Put add put method
-// usage:
-// Put("/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Put(pattern string, f FilterFunc) {
- p.AddMethod("put", pattern, f)
-}
-
-// Delete add delete method
-// usage:
-// Delete("/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Delete(pattern string, f FilterFunc) {
- p.AddMethod("delete", pattern, f)
-}
-
-// Head add head method
-// usage:
-// Head("/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Head(pattern string, f FilterFunc) {
- p.AddMethod("head", pattern, f)
-}
-
-// Patch add patch method
-// usage:
-// Patch("/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Patch(pattern string, f FilterFunc) {
- p.AddMethod("patch", pattern, f)
-}
-
-// Options add options method
-// usage:
-// Options("/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Options(pattern string, f FilterFunc) {
- p.AddMethod("options", pattern, f)
-}
-
-// Any add all method
-// usage:
-// Any("/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Any(pattern string, f FilterFunc) {
- p.AddMethod("*", pattern, f)
-}
-
-// AddMethod add http method router
-// usage:
-// AddMethod("get","/api/:id", func(ctx *context.Context){
-// ctx.Output.Body("hello world")
-// })
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) AddMethod(method, pattern string, f FilterFunc) {
- method = strings.ToUpper(method)
- if method != "*" && !HTTPMETHOD[method] {
- panic("not support http method: " + method)
- }
- route := &ControllerInfo{}
- route.pattern = pattern
- route.routerType = routerTypeRESTFul
- route.runFunction = f
- methods := make(map[string]string)
- if method == "*" {
- for val := range HTTPMETHOD {
- methods[val] = val
- }
- } else {
- methods[method] = method
- }
- route.methods = methods
- for k := range methods {
- if k == "*" {
- for m := range HTTPMETHOD {
- p.addToRouter(m, pattern, route)
- }
- } else {
- p.addToRouter(k, pattern, route)
- }
- }
-}
-
-// Handler add user defined Handler
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) Handler(pattern string, h http.Handler, options ...interface{}) {
- route := &ControllerInfo{}
- route.pattern = pattern
- route.routerType = routerTypeHandler
- route.handler = h
- if len(options) > 0 {
- if _, ok := options[0].(bool); ok {
- pattern = path.Join(pattern, "?:all(.*)")
- }
- }
- for m := range HTTPMETHOD {
- p.addToRouter(m, pattern, route)
- }
-}
-
-// AddAuto router to ControllerRegister.
-// example beego.AddAuto(&MainContorlller{}),
-// MainController has method List and Page.
-// visit the url /main/list to execute List function
-// /main/page to execute Page function.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) AddAuto(c ControllerInterface) {
- p.AddAutoPrefix("/", c)
-}
-
-// AddAutoPrefix Add auto router to ControllerRegister with prefix.
-// example beego.AddAutoPrefix("/admin",&MainContorlller{}),
-// MainController has method List and Page.
-// visit the url /admin/main/list to execute List function
-// /admin/main/page to execute Page function.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) AddAutoPrefix(prefix string, c ControllerInterface) {
- reflectVal := reflect.ValueOf(c)
- rt := reflectVal.Type()
- ct := reflect.Indirect(reflectVal).Type()
- controllerName := strings.TrimSuffix(ct.Name(), "Controller")
- for i := 0; i < rt.NumMethod(); i++ {
- if !utils.InSlice(rt.Method(i).Name, exceptMethod) {
- route := &ControllerInfo{}
- route.routerType = routerTypeBeego
- route.methods = map[string]string{"*": rt.Method(i).Name}
- route.controllerType = ct
- pattern := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name), "*")
- patternInit := path.Join(prefix, controllerName, rt.Method(i).Name, "*")
- patternFix := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name))
- patternFixInit := path.Join(prefix, controllerName, rt.Method(i).Name)
- route.pattern = pattern
- for m := range HTTPMETHOD {
- p.addToRouter(m, pattern, route)
- p.addToRouter(m, patternInit, route)
- p.addToRouter(m, patternFix, route)
- p.addToRouter(m, patternFixInit, route)
- }
- }
- }
-}
-
-// InsertFilter Add a FilterFunc with pattern rule and action constant.
-// params is for:
-// 1. setting the returnOnOutput value (false allows multiple filters to execute)
-// 2. determining whether or not params need to be reset.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) error {
- mr := &FilterRouter{
- tree: NewTree(),
- pattern: pattern,
- filterFunc: filter,
- returnOnOutput: true,
- }
- if !BConfig.RouterCaseSensitive {
- mr.pattern = strings.ToLower(pattern)
- }
-
- paramsLen := len(params)
- if paramsLen > 0 {
- mr.returnOnOutput = params[0]
- }
- if paramsLen > 1 {
- mr.resetParams = params[1]
- }
- mr.tree.AddRouter(pattern, true)
- return p.insertFilterRouter(pos, mr)
-}
-
-// add Filter into
-func (p *ControllerRegister) insertFilterRouter(pos int, mr *FilterRouter) (err error) {
- if pos < BeforeStatic || pos > FinishRouter {
- return errors.New("can not find your filter position")
- }
- p.enableFilter = true
- p.filters[pos] = append(p.filters[pos], mr)
- return nil
-}
-
-// URLFor does another controller handler in this request function.
-// it can access any controller method.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) URLFor(endpoint string, values ...interface{}) string {
- paths := strings.Split(endpoint, ".")
- if len(paths) <= 1 {
- logs.Warn("urlfor endpoint must like path.controller.method")
- return ""
- }
- if len(values)%2 != 0 {
- logs.Warn("urlfor params must key-value pair")
- return ""
- }
- params := make(map[string]string)
- if len(values) > 0 {
- key := ""
- for k, v := range values {
- if k%2 == 0 {
- key = fmt.Sprint(v)
- } else {
- params[key] = fmt.Sprint(v)
- }
- }
- }
- controllerName := strings.Join(paths[:len(paths)-1], "/")
- methodName := paths[len(paths)-1]
- for m, t := range p.routers {
- ok, url := p.getURL(t, "/", controllerName, methodName, params, m)
- if ok {
- return url
- }
- }
- return ""
-}
-
-func (p *ControllerRegister) getURL(t *Tree, url, controllerName, methodName string, params map[string]string, httpMethod string) (bool, string) {
- for _, subtree := range t.fixrouters {
- u := path.Join(url, subtree.prefix)
- ok, u := p.getURL(subtree, u, controllerName, methodName, params, httpMethod)
- if ok {
- return ok, u
- }
- }
- if t.wildcard != nil {
- u := path.Join(url, urlPlaceholder)
- ok, u := p.getURL(t.wildcard, u, controllerName, methodName, params, httpMethod)
- if ok {
- return ok, u
- }
- }
- for _, l := range t.leaves {
- if c, ok := l.runObject.(*ControllerInfo); ok {
- if c.routerType == routerTypeBeego &&
- strings.HasSuffix(path.Join(c.controllerType.PkgPath(), c.controllerType.Name()), controllerName) {
- find := false
- if HTTPMETHOD[strings.ToUpper(methodName)] {
- if len(c.methods) == 0 {
- find = true
- } else if m, ok := c.methods[strings.ToUpper(methodName)]; ok && m == strings.ToUpper(methodName) {
- find = true
- } else if m, ok = c.methods["*"]; ok && m == methodName {
- find = true
- }
- }
- if !find {
- for m, md := range c.methods {
- if (m == "*" || m == httpMethod) && md == methodName {
- find = true
- }
- }
- }
- if find {
- if l.regexps == nil {
- if len(l.wildcards) == 0 {
- return true, strings.Replace(url, "/"+urlPlaceholder, "", 1) + toURL(params)
- }
- if len(l.wildcards) == 1 {
- if v, ok := params[l.wildcards[0]]; ok {
- delete(params, l.wildcards[0])
- return true, strings.Replace(url, urlPlaceholder, v, 1) + toURL(params)
- }
- return false, ""
- }
- if len(l.wildcards) == 3 && l.wildcards[0] == "." {
- if p, ok := params[":path"]; ok {
- if e, isok := params[":ext"]; isok {
- delete(params, ":path")
- delete(params, ":ext")
- return true, strings.Replace(url, urlPlaceholder, p+"."+e, -1) + toURL(params)
- }
- }
- }
- canSkip := false
- for _, v := range l.wildcards {
- if v == ":" {
- canSkip = true
- continue
- }
- if u, ok := params[v]; ok {
- delete(params, v)
- url = strings.Replace(url, urlPlaceholder, u, 1)
- } else {
- if canSkip {
- canSkip = false
- continue
- }
- return false, ""
- }
- }
- return true, url + toURL(params)
- }
- var i int
- var startReg bool
- regURL := ""
- for _, v := range strings.Trim(l.regexps.String(), "^$") {
- if v == '(' {
- startReg = true
- continue
- } else if v == ')' {
- startReg = false
- if v, ok := params[l.wildcards[i]]; ok {
- delete(params, l.wildcards[i])
- regURL = regURL + v
- i++
- } else {
- break
- }
- } else if !startReg {
- regURL = string(append([]rune(regURL), v))
- }
- }
- if l.regexps.MatchString(regURL) {
- ps := strings.Split(regURL, "/")
- for _, p := range ps {
- url = strings.Replace(url, urlPlaceholder, p, 1)
- }
- return true, url + toURL(params)
- }
- }
- }
- }
- }
-
- return false, ""
-}
-
-func (p *ControllerRegister) execFilter(context *beecontext.Context, urlPath string, pos int) (started bool) {
- var preFilterParams map[string]string
- for _, filterR := range p.filters[pos] {
- if filterR.returnOnOutput && context.ResponseWriter.Started {
- return true
- }
- if filterR.resetParams {
- preFilterParams = context.Input.Params()
- }
- if ok := filterR.ValidRouter(urlPath, context); ok {
- filterR.filterFunc(context)
- if filterR.resetParams {
- context.Input.ResetParams()
- for k, v := range preFilterParams {
- context.Input.SetParam(k, v)
- }
- }
- }
- if filterR.returnOnOutput && context.ResponseWriter.Started {
- return true
- }
- }
- return false
-}
-
-// Implement http.Handler interface.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
- startTime := time.Now()
- var (
- runRouter reflect.Type
- findRouter bool
- runMethod string
- methodParams []*param.MethodParam
- routerInfo *ControllerInfo
- isRunnable bool
- )
- context := p.GetContext()
-
- context.Reset(rw, r)
-
- defer p.GiveBackContext(context)
- if BConfig.RecoverFunc != nil {
- defer BConfig.RecoverFunc(context)
- }
-
- context.Output.EnableGzip = BConfig.EnableGzip
-
- if BConfig.RunMode == DEV {
- context.Output.Header("Server", BConfig.ServerName)
- }
-
- var urlPath = r.URL.Path
-
- if !BConfig.RouterCaseSensitive {
- urlPath = strings.ToLower(urlPath)
- }
-
- // filter wrong http method
- if !HTTPMETHOD[r.Method] {
- exception("405", context)
- goto Admin
- }
-
- // filter for static file
- if len(p.filters[BeforeStatic]) > 0 && p.execFilter(context, urlPath, BeforeStatic) {
- goto Admin
- }
-
- serverStaticRouter(context)
-
- if context.ResponseWriter.Started {
- findRouter = true
- goto Admin
- }
-
- if r.Method != http.MethodGet && r.Method != http.MethodHead {
- if BConfig.CopyRequestBody && !context.Input.IsUpload() {
- // connection will close if the incoming data are larger (RFC 7231, 6.5.11)
- if r.ContentLength > BConfig.MaxMemory {
- logs.Error(errors.New("payload too large"))
- exception("413", context)
- goto Admin
- }
- context.Input.CopyBody(BConfig.MaxMemory)
- }
- context.Input.ParseFormOrMulitForm(BConfig.MaxMemory)
- }
-
- // session init
- if BConfig.WebConfig.Session.SessionOn {
- var err error
- context.Input.CruSession, err = GlobalSessions.SessionStart(rw, r)
- if err != nil {
- logs.Error(err)
- exception("503", context)
- goto Admin
- }
- defer func() {
- if context.Input.CruSession != nil {
- context.Input.CruSession.SessionRelease(rw)
- }
- }()
- }
- if len(p.filters[BeforeRouter]) > 0 && p.execFilter(context, urlPath, BeforeRouter) {
- goto Admin
- }
- // User can define RunController and RunMethod in filter
- if context.Input.RunController != nil && context.Input.RunMethod != "" {
- findRouter = true
- runMethod = context.Input.RunMethod
- runRouter = context.Input.RunController
- } else {
- routerInfo, findRouter = p.FindRouter(context)
- }
-
- // if no matches to url, throw a not found exception
- if !findRouter {
- exception("404", context)
- goto Admin
- }
- if splat := context.Input.Param(":splat"); splat != "" {
- for k, v := range strings.Split(splat, "/") {
- context.Input.SetParam(strconv.Itoa(k), v)
- }
- }
-
- if routerInfo != nil {
- // store router pattern into context
- context.Input.SetData("RouterPattern", routerInfo.pattern)
- }
-
- // execute middleware filters
- if len(p.filters[BeforeExec]) > 0 && p.execFilter(context, urlPath, BeforeExec) {
- goto Admin
- }
-
- // check policies
- if p.execPolicy(context, urlPath) {
- goto Admin
- }
-
- if routerInfo != nil {
- if routerInfo.routerType == routerTypeRESTFul {
- if _, ok := routerInfo.methods[r.Method]; ok {
- isRunnable = true
- routerInfo.runFunction(context)
- } else {
- exception("405", context)
- goto Admin
- }
- } else if routerInfo.routerType == routerTypeHandler {
- isRunnable = true
- routerInfo.handler.ServeHTTP(context.ResponseWriter, context.Request)
- } else {
- runRouter = routerInfo.controllerType
- methodParams = routerInfo.methodParams
- method := r.Method
- if r.Method == http.MethodPost && context.Input.Query("_method") == http.MethodPut {
- method = http.MethodPut
- }
- if r.Method == http.MethodPost && context.Input.Query("_method") == http.MethodDelete {
- method = http.MethodDelete
- }
- if m, ok := routerInfo.methods[method]; ok {
- runMethod = m
- } else if m, ok = routerInfo.methods["*"]; ok {
- runMethod = m
- } else {
- runMethod = method
- }
- }
- }
-
- // also defined runRouter & runMethod from filter
- if !isRunnable {
- // Invoke the request handler
- var execController ControllerInterface
- if routerInfo != nil && routerInfo.initialize != nil {
- execController = routerInfo.initialize()
- } else {
- vc := reflect.New(runRouter)
- var ok bool
- execController, ok = vc.Interface().(ControllerInterface)
- if !ok {
- panic("controller is not ControllerInterface")
- }
- }
-
- // call the controller init function
- execController.Init(context, runRouter.Name(), runMethod, execController)
-
- // call prepare function
- execController.Prepare()
-
- // if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf
- if BConfig.WebConfig.EnableXSRF {
- execController.XSRFToken()
- if r.Method == http.MethodPost || r.Method == http.MethodDelete || r.Method == http.MethodPut ||
- (r.Method == http.MethodPost && (context.Input.Query("_method") == http.MethodDelete || context.Input.Query("_method") == http.MethodPut)) {
- execController.CheckXSRFCookie()
- }
- }
-
- execController.URLMapping()
-
- if !context.ResponseWriter.Started {
- // exec main logic
- switch runMethod {
- case http.MethodGet:
- execController.Get()
- case http.MethodPost:
- execController.Post()
- case http.MethodDelete:
- execController.Delete()
- case http.MethodPut:
- execController.Put()
- case http.MethodHead:
- execController.Head()
- case http.MethodPatch:
- execController.Patch()
- case http.MethodOptions:
- execController.Options()
- case http.MethodTrace:
- execController.Trace()
- default:
- if !execController.HandlerFunc(runMethod) {
- vc := reflect.ValueOf(execController)
- method := vc.MethodByName(runMethod)
- in := param.ConvertParams(methodParams, method.Type(), context)
- out := method.Call(in)
-
- // For backward compatibility we only handle response if we had incoming methodParams
- if methodParams != nil {
- p.handleParamResponse(context, execController, out)
- }
- }
- }
-
- // render template
- if !context.ResponseWriter.Started && context.Output.Status == 0 {
- if BConfig.WebConfig.AutoRender {
- if err := execController.Render(); err != nil {
- logs.Error(err)
- }
- }
- }
- }
-
- // finish all runRouter. release resource
- execController.Finish()
- }
-
- // execute middleware filters
- if len(p.filters[AfterExec]) > 0 && p.execFilter(context, urlPath, AfterExec) {
- goto Admin
- }
-
- if len(p.filters[FinishRouter]) > 0 && p.execFilter(context, urlPath, FinishRouter) {
- goto Admin
- }
-
-Admin:
- // admin module record QPS
-
- statusCode := context.ResponseWriter.Status
- if statusCode == 0 {
- statusCode = 200
- }
-
- LogAccess(context, &startTime, statusCode)
-
- timeDur := time.Since(startTime)
- context.ResponseWriter.Elapsed = timeDur
- if BConfig.Listen.EnableAdmin {
- pattern := ""
- if routerInfo != nil {
- pattern = routerInfo.pattern
- }
-
- if FilterMonitorFunc(r.Method, r.URL.Path, timeDur, pattern, statusCode) {
- routerName := ""
- if runRouter != nil {
- routerName = runRouter.Name()
- }
- go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, routerName, timeDur)
- }
- }
-
- if BConfig.RunMode == DEV && !BConfig.Log.AccessLogs {
- match := map[bool]string{true: "match", false: "nomatch"}
- devInfo := fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s",
- context.Input.IP(),
- logs.ColorByStatus(statusCode), statusCode, logs.ResetColor(),
- timeDur.String(),
- match[findRouter],
- logs.ColorByMethod(r.Method), r.Method, logs.ResetColor(),
- r.URL.Path)
- if routerInfo != nil {
- devInfo += fmt.Sprintf(" r:%s", routerInfo.pattern)
- }
-
- logs.Debug(devInfo)
- }
- // Call WriteHeader if status code has been set changed
- if context.Output.Status != 0 {
- context.ResponseWriter.WriteHeader(context.Output.Status)
- }
-}
-
-func (p *ControllerRegister) handleParamResponse(context *beecontext.Context, execController ControllerInterface, results []reflect.Value) {
- // looping in reverse order for the case when both error and value are returned and error sets the response status code
- for i := len(results) - 1; i >= 0; i-- {
- result := results[i]
- if result.Kind() != reflect.Interface || !result.IsNil() {
- resultValue := result.Interface()
- context.RenderMethodResult(resultValue)
- }
- }
- if !context.ResponseWriter.Started && len(results) > 0 && context.Output.Status == 0 {
- context.Output.SetStatus(200)
- }
-}
-
-// FindRouter Find Router info for URL
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func (p *ControllerRegister) FindRouter(context *beecontext.Context) (routerInfo *ControllerInfo, isFind bool) {
- var urlPath = context.Input.URL()
- if !BConfig.RouterCaseSensitive {
- urlPath = strings.ToLower(urlPath)
- }
- httpMethod := context.Input.Method()
- if t, ok := p.routers[httpMethod]; ok {
- runObject := t.Match(urlPath, context)
- if r, ok := runObject.(*ControllerInfo); ok {
- return r, true
- }
- }
- return
-}
-
-func toURL(params map[string]string) string {
- if len(params) == 0 {
- return ""
- }
- u := "?"
- for k, v := range params {
- u += k + "=" + v + "&"
- }
- return strings.TrimRight(u, "&")
-}
-
-// LogAccess logging info HTTP Access
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func LogAccess(ctx *beecontext.Context, startTime *time.Time, statusCode int) {
- // Skip logging if AccessLogs config is false
- if !BConfig.Log.AccessLogs {
- return
- }
- // Skip logging static requests unless EnableStaticLogs config is true
- if !BConfig.Log.EnableStaticLogs && DefaultAccessLogFilter.Filter(ctx) {
- return
- }
- var (
- requestTime time.Time
- elapsedTime time.Duration
- r = ctx.Request
- )
- if startTime != nil {
- requestTime = *startTime
- elapsedTime = time.Since(*startTime)
- }
- record := &logs.AccessLogRecord{
- RemoteAddr: ctx.Input.IP(),
- RequestTime: requestTime,
- RequestMethod: r.Method,
- Request: fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto),
- ServerProtocol: r.Proto,
- Host: r.Host,
- Status: statusCode,
- ElapsedTime: elapsedTime,
- HTTPReferrer: r.Header.Get("Referer"),
- HTTPUserAgent: r.Header.Get("User-Agent"),
- RemoteUser: r.Header.Get("Remote-User"),
- BodyBytesSent: r.ContentLength,
- }
- logs.AccessLog(record, BConfig.Log.AccessLogsFormat)
-}
diff --git a/router_test.go b/router_test.go
deleted file mode 100644
index 8ec7927a..00000000
--- a/router_test.go
+++ /dev/null
@@ -1,732 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "bytes"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-
- "github.com/astaxie/beego/context"
- "github.com/astaxie/beego/logs"
-)
-
-type TestController struct {
- Controller
-}
-
-func (tc *TestController) Get() {
- tc.Data["Username"] = "astaxie"
- tc.Ctx.Output.Body([]byte("ok"))
-}
-
-func (tc *TestController) Post() {
- tc.Ctx.Output.Body([]byte(tc.Ctx.Input.Query(":name")))
-}
-
-func (tc *TestController) Param() {
- tc.Ctx.Output.Body([]byte(tc.Ctx.Input.Query(":name")))
-}
-
-func (tc *TestController) List() {
- tc.Ctx.Output.Body([]byte("i am list"))
-}
-
-func (tc *TestController) Params() {
- tc.Ctx.Output.Body([]byte(tc.Ctx.Input.Param("0") + tc.Ctx.Input.Param("1") + tc.Ctx.Input.Param("2")))
-}
-
-func (tc *TestController) Myext() {
- tc.Ctx.Output.Body([]byte(tc.Ctx.Input.Param(":ext")))
-}
-
-func (tc *TestController) GetURL() {
- tc.Ctx.Output.Body([]byte(tc.URLFor(".Myext")))
-}
-
-func (tc *TestController) GetParams() {
- tc.Ctx.WriteString(tc.Ctx.Input.Query(":last") + "+" +
- tc.Ctx.Input.Query(":first") + "+" + tc.Ctx.Input.Query("learn"))
-}
-
-func (tc *TestController) GetManyRouter() {
- tc.Ctx.WriteString(tc.Ctx.Input.Query(":id") + tc.Ctx.Input.Query(":page"))
-}
-
-func (tc *TestController) GetEmptyBody() {
- var res []byte
- tc.Ctx.Output.Body(res)
-}
-
-type JSONController struct {
- Controller
-}
-
-func (jc *JSONController) Prepare() {
- jc.Data["json"] = "prepare"
- jc.ServeJSON(true)
-}
-
-func (jc *JSONController) Get() {
- jc.Data["Username"] = "astaxie"
- jc.Ctx.Output.Body([]byte("ok"))
-}
-
-func TestUrlFor(t *testing.T) {
- handler := NewControllerRegister()
- handler.Add("/api/list", &TestController{}, "*:List")
- handler.Add("/person/:last/:first", &TestController{}, "*:Param")
- if a := handler.URLFor("TestController.List"); a != "/api/list" {
- logs.Info(a)
- t.Errorf("TestController.List must equal to /api/list")
- }
- if a := handler.URLFor("TestController.Param", ":last", "xie", ":first", "asta"); a != "/person/xie/asta" {
- t.Errorf("TestController.Param must equal to /person/xie/asta, but get " + a)
- }
-}
-
-func TestUrlFor3(t *testing.T) {
- handler := NewControllerRegister()
- handler.AddAuto(&TestController{})
- if a := handler.URLFor("TestController.Myext"); a != "/test/myext" && a != "/Test/Myext" {
- t.Errorf("TestController.Myext must equal to /test/myext, but get " + a)
- }
- if a := handler.URLFor("TestController.GetURL"); a != "/test/geturl" && a != "/Test/GetURL" {
- t.Errorf("TestController.GetURL must equal to /test/geturl, but get " + a)
- }
-}
-
-func TestUrlFor2(t *testing.T) {
- handler := NewControllerRegister()
- handler.Add("/v1/:v/cms_:id(.+)_:page(.+).html", &TestController{}, "*:List")
- handler.Add("/v1/:username/edit", &TestController{}, "get:GetURL")
- handler.Add("/v1/:v(.+)_cms/ttt_:id(.+)_:page(.+).html", &TestController{}, "*:Param")
- handler.Add("/:year:int/:month:int/:title/:entid", &TestController{})
- if handler.URLFor("TestController.GetURL", ":username", "astaxie") != "/v1/astaxie/edit" {
- logs.Info(handler.URLFor("TestController.GetURL"))
- t.Errorf("TestController.List must equal to /v1/astaxie/edit")
- }
-
- if handler.URLFor("TestController.List", ":v", "za", ":id", "12", ":page", "123") !=
- "/v1/za/cms_12_123.html" {
- logs.Info(handler.URLFor("TestController.List"))
- t.Errorf("TestController.List must equal to /v1/za/cms_12_123.html")
- }
- if handler.URLFor("TestController.Param", ":v", "za", ":id", "12", ":page", "123") !=
- "/v1/za_cms/ttt_12_123.html" {
- logs.Info(handler.URLFor("TestController.Param"))
- t.Errorf("TestController.List must equal to /v1/za_cms/ttt_12_123.html")
- }
- if handler.URLFor("TestController.Get", ":year", "1111", ":month", "11",
- ":title", "aaaa", ":entid", "aaaa") !=
- "/1111/11/aaaa/aaaa" {
- logs.Info(handler.URLFor("TestController.Get"))
- t.Errorf("TestController.Get must equal to /1111/11/aaaa/aaaa")
- }
-}
-
-func TestUserFunc(t *testing.T) {
- r, _ := http.NewRequest("GET", "/api/list", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/api/list", &TestController{}, "*:List")
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am list" {
- t.Errorf("user define func can't run")
- }
-}
-
-func TestPostFunc(t *testing.T) {
- r, _ := http.NewRequest("POST", "/astaxie", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/:name", &TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "astaxie" {
- t.Errorf("post func should astaxie")
- }
-}
-
-func TestAutoFunc(t *testing.T) {
- r, _ := http.NewRequest("GET", "/test/list", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.AddAuto(&TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am list" {
- t.Errorf("user define func can't run")
- }
-}
-
-func TestAutoFunc2(t *testing.T) {
- r, _ := http.NewRequest("GET", "/Test/List", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.AddAuto(&TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am list" {
- t.Errorf("user define func can't run")
- }
-}
-
-func TestAutoFuncParams(t *testing.T) {
- r, _ := http.NewRequest("GET", "/test/params/2009/11/12", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.AddAuto(&TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "20091112" {
- t.Errorf("user define func can't run")
- }
-}
-
-func TestAutoExtFunc(t *testing.T) {
- r, _ := http.NewRequest("GET", "/test/myext.json", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.AddAuto(&TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "json" {
- t.Errorf("user define func can't run")
- }
-}
-
-func TestRouteOk(t *testing.T) {
-
- r, _ := http.NewRequest("GET", "/person/anderson/thomas?learn=kungfu", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/person/:last/:first", &TestController{}, "get:GetParams")
- handler.ServeHTTP(w, r)
- body := w.Body.String()
- if body != "anderson+thomas+kungfu" {
- t.Errorf("url param set to [%s];", body)
- }
-}
-
-func TestManyRoute(t *testing.T) {
-
- r, _ := http.NewRequest("GET", "/beego32-12.html", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/beego:id([0-9]+)-:page([0-9]+).html", &TestController{}, "get:GetManyRouter")
- handler.ServeHTTP(w, r)
-
- body := w.Body.String()
-
- if body != "3212" {
- t.Errorf("url param set to [%s];", body)
- }
-}
-
-// Test for issue #1669
-func TestEmptyResponse(t *testing.T) {
-
- r, _ := http.NewRequest("GET", "/beego-empty.html", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/beego-empty.html", &TestController{}, "get:GetEmptyBody")
- handler.ServeHTTP(w, r)
-
- if body := w.Body.String(); body != "" {
- t.Error("want empty body")
- }
-}
-
-func TestNotFound(t *testing.T) {
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.ServeHTTP(w, r)
-
- if w.Code != http.StatusNotFound {
- t.Errorf("Code set to [%v]; want [%v]", w.Code, http.StatusNotFound)
- }
-}
-
-// TestStatic tests the ability to serve static
-// content from the filesystem
-func TestStatic(t *testing.T) {
- r, _ := http.NewRequest("GET", "/static/js/jquery.js", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.ServeHTTP(w, r)
-
- if w.Code != 404 {
- t.Errorf("handler.Static failed to serve file")
- }
-}
-
-func TestPrepare(t *testing.T) {
- r, _ := http.NewRequest("GET", "/json/list", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/json/list", &JSONController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != `"prepare"` {
- t.Errorf(w.Body.String() + "user define func can't run")
- }
-}
-
-func TestAutoPrefix(t *testing.T) {
- r, _ := http.NewRequest("GET", "/admin/test/list", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.AddAutoPrefix("/admin", &TestController{})
- handler.ServeHTTP(w, r)
- if w.Body.String() != "i am list" {
- t.Errorf("TestAutoPrefix can't run")
- }
-}
-
-func TestRouterGet(t *testing.T) {
- r, _ := http.NewRequest("GET", "/user", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Get("/user", func(ctx *context.Context) {
- ctx.Output.Body([]byte("Get userlist"))
- })
- handler.ServeHTTP(w, r)
- if w.Body.String() != "Get userlist" {
- t.Errorf("TestRouterGet can't run")
- }
-}
-
-func TestRouterPost(t *testing.T) {
- r, _ := http.NewRequest("POST", "/user/123", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Post("/user/:id", func(ctx *context.Context) {
- ctx.Output.Body([]byte(ctx.Input.Param(":id")))
- })
- handler.ServeHTTP(w, r)
- if w.Body.String() != "123" {
- t.Errorf("TestRouterPost can't run")
- }
-}
-
-func sayhello(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("sayhello"))
-}
-
-func TestRouterHandler(t *testing.T) {
- r, _ := http.NewRequest("POST", "/sayhi", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Handler("/sayhi", http.HandlerFunc(sayhello))
- handler.ServeHTTP(w, r)
- if w.Body.String() != "sayhello" {
- t.Errorf("TestRouterHandler can't run")
- }
-}
-
-func TestRouterHandlerAll(t *testing.T) {
- r, _ := http.NewRequest("POST", "/sayhi/a/b/c", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Handler("/sayhi", http.HandlerFunc(sayhello), true)
- handler.ServeHTTP(w, r)
- if w.Body.String() != "sayhello" {
- t.Errorf("TestRouterHandler can't run")
- }
-}
-
-//
-// Benchmarks NewApp:
-//
-
-func beegoFilterFunc(ctx *context.Context) {
- ctx.WriteString("hello")
-}
-
-type AdminController struct {
- Controller
-}
-
-func (a *AdminController) Get() {
- a.Ctx.WriteString("hello")
-}
-
-func TestRouterFunc(t *testing.T) {
- mux := NewControllerRegister()
- mux.Get("/action", beegoFilterFunc)
- mux.Post("/action", beegoFilterFunc)
- rw, r := testRequest("GET", "/action")
- mux.ServeHTTP(rw, r)
- if rw.Body.String() != "hello" {
- t.Errorf("TestRouterFunc can't run")
- }
-}
-
-func BenchmarkFunc(b *testing.B) {
- mux := NewControllerRegister()
- mux.Get("/action", beegoFilterFunc)
- rw, r := testRequest("GET", "/action")
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- mux.ServeHTTP(rw, r)
- }
-}
-
-func BenchmarkController(b *testing.B) {
- mux := NewControllerRegister()
- mux.Add("/action", &AdminController{})
- rw, r := testRequest("GET", "/action")
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- mux.ServeHTTP(rw, r)
- }
-}
-
-func testRequest(method, path string) (*httptest.ResponseRecorder, *http.Request) {
- request, _ := http.NewRequest(method, path, nil)
- recorder := httptest.NewRecorder()
-
- return recorder, request
-}
-
-// Expectation: A Filter with the correct configuration should be created given
-// specific parameters.
-func TestInsertFilter(t *testing.T) {
- testName := "TestInsertFilter"
-
- mux := NewControllerRegister()
- mux.InsertFilter("*", BeforeRouter, func(*context.Context) {})
- if !mux.filters[BeforeRouter][0].returnOnOutput {
- t.Errorf(
- "%s: passing no variadic params should set returnOnOutput to true",
- testName)
- }
- if mux.filters[BeforeRouter][0].resetParams {
- t.Errorf(
- "%s: passing no variadic params should set resetParams to false",
- testName)
- }
-
- mux = NewControllerRegister()
- mux.InsertFilter("*", BeforeRouter, func(*context.Context) {}, false)
- if mux.filters[BeforeRouter][0].returnOnOutput {
- t.Errorf(
- "%s: passing false as 1st variadic param should set returnOnOutput to false",
- testName)
- }
-
- mux = NewControllerRegister()
- mux.InsertFilter("*", BeforeRouter, func(*context.Context) {}, true, true)
- if !mux.filters[BeforeRouter][0].resetParams {
- t.Errorf(
- "%s: passing true as 2nd variadic param should set resetParams to true",
- testName)
- }
-}
-
-// Expectation: the second variadic arg should cause the execution of the filter
-// to preserve the parameters from before its execution.
-func TestParamResetFilter(t *testing.T) {
- testName := "TestParamResetFilter"
- route := "/beego/*" // splat
- path := "/beego/routes/routes"
-
- mux := NewControllerRegister()
-
- mux.InsertFilter("*", BeforeExec, beegoResetParams, true, true)
-
- mux.Get(route, beegoHandleResetParams)
-
- rw, r := testRequest("GET", path)
- mux.ServeHTTP(rw, r)
-
- // The two functions, `beegoResetParams` and `beegoHandleResetParams` add
- // a response header of `Splat`. The expectation here is that that Header
- // value should match what the _request's_ router set, not the filter's.
-
- headers := rw.Result().Header
- if len(headers["Splat"]) != 1 {
- t.Errorf(
- "%s: There was an error in the test. Splat param not set in Header",
- testName)
- }
- if headers["Splat"][0] != "routes/routes" {
- t.Errorf(
- "%s: expected `:splat` param to be [routes/routes] but it was [%s]",
- testName, headers["Splat"][0])
- }
-}
-
-// Execution point: BeforeRouter
-// expectation: only BeforeRouter function is executed, notmatch output as router doesn't handle
-func TestFilterBeforeRouter(t *testing.T) {
- testName := "TestFilterBeforeRouter"
- url := "/beforeRouter"
-
- mux := NewControllerRegister()
- mux.InsertFilter(url, BeforeRouter, beegoBeforeRouter1)
-
- mux.Get(url, beegoFilterFunc)
-
- rw, r := testRequest("GET", url)
- mux.ServeHTTP(rw, r)
-
- if !strings.Contains(rw.Body.String(), "BeforeRouter1") {
- t.Errorf(testName + " BeforeRouter did not run")
- }
- if strings.Contains(rw.Body.String(), "hello") {
- t.Errorf(testName + " BeforeRouter did not return properly")
- }
-}
-
-// Execution point: BeforeExec
-// expectation: only BeforeExec function is executed, match as router determines route only
-func TestFilterBeforeExec(t *testing.T) {
- testName := "TestFilterBeforeExec"
- url := "/beforeExec"
-
- mux := NewControllerRegister()
- mux.InsertFilter(url, BeforeRouter, beegoFilterNoOutput)
- mux.InsertFilter(url, BeforeExec, beegoBeforeExec1)
-
- mux.Get(url, beegoFilterFunc)
-
- rw, r := testRequest("GET", url)
- mux.ServeHTTP(rw, r)
-
- if !strings.Contains(rw.Body.String(), "BeforeExec1") {
- t.Errorf(testName + " BeforeExec did not run")
- }
- if strings.Contains(rw.Body.String(), "hello") {
- t.Errorf(testName + " BeforeExec did not return properly")
- }
- if strings.Contains(rw.Body.String(), "BeforeRouter") {
- t.Errorf(testName + " BeforeRouter ran in error")
- }
-}
-
-// Execution point: AfterExec
-// expectation: only AfterExec function is executed, match as router handles
-func TestFilterAfterExec(t *testing.T) {
- testName := "TestFilterAfterExec"
- url := "/afterExec"
-
- mux := NewControllerRegister()
- mux.InsertFilter(url, BeforeRouter, beegoFilterNoOutput)
- mux.InsertFilter(url, BeforeExec, beegoFilterNoOutput)
- mux.InsertFilter(url, AfterExec, beegoAfterExec1, false)
-
- mux.Get(url, beegoFilterFunc)
-
- rw, r := testRequest("GET", url)
- mux.ServeHTTP(rw, r)
-
- if !strings.Contains(rw.Body.String(), "AfterExec1") {
- t.Errorf(testName + " AfterExec did not run")
- }
- if !strings.Contains(rw.Body.String(), "hello") {
- t.Errorf(testName + " handler did not run properly")
- }
- if strings.Contains(rw.Body.String(), "BeforeRouter") {
- t.Errorf(testName + " BeforeRouter ran in error")
- }
- if strings.Contains(rw.Body.String(), "BeforeExec") {
- t.Errorf(testName + " BeforeExec ran in error")
- }
-}
-
-// Execution point: FinishRouter
-// expectation: only FinishRouter function is executed, match as router handles
-func TestFilterFinishRouter(t *testing.T) {
- testName := "TestFilterFinishRouter"
- url := "/finishRouter"
-
- mux := NewControllerRegister()
- mux.InsertFilter(url, BeforeRouter, beegoFilterNoOutput)
- mux.InsertFilter(url, BeforeExec, beegoFilterNoOutput)
- mux.InsertFilter(url, AfterExec, beegoFilterNoOutput)
- mux.InsertFilter(url, FinishRouter, beegoFinishRouter1)
-
- mux.Get(url, beegoFilterFunc)
-
- rw, r := testRequest("GET", url)
- mux.ServeHTTP(rw, r)
-
- if strings.Contains(rw.Body.String(), "FinishRouter1") {
- t.Errorf(testName + " FinishRouter did not run")
- }
- if !strings.Contains(rw.Body.String(), "hello") {
- t.Errorf(testName + " handler did not run properly")
- }
- if strings.Contains(rw.Body.String(), "AfterExec1") {
- t.Errorf(testName + " AfterExec ran in error")
- }
- if strings.Contains(rw.Body.String(), "BeforeRouter") {
- t.Errorf(testName + " BeforeRouter ran in error")
- }
- if strings.Contains(rw.Body.String(), "BeforeExec") {
- t.Errorf(testName + " BeforeExec ran in error")
- }
-}
-
-// Execution point: FinishRouter
-// expectation: only first FinishRouter function is executed, match as router handles
-func TestFilterFinishRouterMultiFirstOnly(t *testing.T) {
- testName := "TestFilterFinishRouterMultiFirstOnly"
- url := "/finishRouterMultiFirstOnly"
-
- mux := NewControllerRegister()
- mux.InsertFilter(url, FinishRouter, beegoFinishRouter1, false)
- mux.InsertFilter(url, FinishRouter, beegoFinishRouter2)
-
- mux.Get(url, beegoFilterFunc)
-
- rw, r := testRequest("GET", url)
- mux.ServeHTTP(rw, r)
-
- if !strings.Contains(rw.Body.String(), "FinishRouter1") {
- t.Errorf(testName + " FinishRouter1 did not run")
- }
- if !strings.Contains(rw.Body.String(), "hello") {
- t.Errorf(testName + " handler did not run properly")
- }
- // not expected in body
- if strings.Contains(rw.Body.String(), "FinishRouter2") {
- t.Errorf(testName + " FinishRouter2 did run")
- }
-}
-
-// Execution point: FinishRouter
-// expectation: both FinishRouter functions execute, match as router handles
-func TestFilterFinishRouterMulti(t *testing.T) {
- testName := "TestFilterFinishRouterMulti"
- url := "/finishRouterMulti"
-
- mux := NewControllerRegister()
- mux.InsertFilter(url, FinishRouter, beegoFinishRouter1, false)
- mux.InsertFilter(url, FinishRouter, beegoFinishRouter2, false)
-
- mux.Get(url, beegoFilterFunc)
-
- rw, r := testRequest("GET", url)
- mux.ServeHTTP(rw, r)
-
- if !strings.Contains(rw.Body.String(), "FinishRouter1") {
- t.Errorf(testName + " FinishRouter1 did not run")
- }
- if !strings.Contains(rw.Body.String(), "hello") {
- t.Errorf(testName + " handler did not run properly")
- }
- if !strings.Contains(rw.Body.String(), "FinishRouter2") {
- t.Errorf(testName + " FinishRouter2 did not run properly")
- }
-}
-
-func beegoFilterNoOutput(ctx *context.Context) {
-}
-
-func beegoBeforeRouter1(ctx *context.Context) {
- ctx.WriteString("|BeforeRouter1")
-}
-
-func beegoBeforeExec1(ctx *context.Context) {
- ctx.WriteString("|BeforeExec1")
-}
-
-func beegoAfterExec1(ctx *context.Context) {
- ctx.WriteString("|AfterExec1")
-}
-
-func beegoFinishRouter1(ctx *context.Context) {
- ctx.WriteString("|FinishRouter1")
-}
-
-func beegoFinishRouter2(ctx *context.Context) {
- ctx.WriteString("|FinishRouter2")
-}
-
-func beegoResetParams(ctx *context.Context) {
- ctx.ResponseWriter.Header().Set("splat", ctx.Input.Param(":splat"))
-}
-
-func beegoHandleResetParams(ctx *context.Context) {
- ctx.ResponseWriter.Header().Set("splat", ctx.Input.Param(":splat"))
-}
-
-// YAML
-type YAMLController struct {
- Controller
-}
-
-func (jc *YAMLController) Prepare() {
- jc.Data["yaml"] = "prepare"
- jc.ServeYAML()
-}
-
-func (jc *YAMLController) Get() {
- jc.Data["Username"] = "astaxie"
- jc.Ctx.Output.Body([]byte("ok"))
-}
-
-func TestYAMLPrepare(t *testing.T) {
- r, _ := http.NewRequest("GET", "/yaml/list", nil)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Add("/yaml/list", &YAMLController{})
- handler.ServeHTTP(w, r)
- if strings.TrimSpace(w.Body.String()) != "prepare" {
- t.Errorf(w.Body.String())
- }
-}
-
-func TestRouterEntityTooLargeCopyBody(t *testing.T) {
- _MaxMemory := BConfig.MaxMemory
- _CopyRequestBody := BConfig.CopyRequestBody
- BConfig.CopyRequestBody = true
- BConfig.MaxMemory = 20
-
- b := bytes.NewBuffer([]byte("barbarbarbarbarbarbarbarbarbar"))
- r, _ := http.NewRequest("POST", "/user/123", b)
- w := httptest.NewRecorder()
-
- handler := NewControllerRegister()
- handler.Post("/user/:id", func(ctx *context.Context) {
- ctx.Output.Body([]byte(ctx.Input.Param(":id")))
- })
- handler.ServeHTTP(w, r)
-
- BConfig.CopyRequestBody = _CopyRequestBody
- BConfig.MaxMemory = _MaxMemory
-
- if w.Code != http.StatusRequestEntityTooLarge {
- t.Errorf("TestRouterRequestEntityTooLarge can't run")
- }
-}
diff --git a/session/README.md b/session/README.md
deleted file mode 100644
index 6d0a297e..00000000
--- a/session/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-session
-==============
-
-session is a Go session manager. It can use many session providers. Just like the `database/sql` and `database/sql/driver`.
-
-## How to install?
-
- go get github.com/astaxie/beego/session
-
-
-## What providers are supported?
-
-As of now this session manager support memory, file, Redis and MySQL.
-
-
-## How to use it?
-
-First you must import it
-
- import (
- "github.com/astaxie/beego/session"
- )
-
-Then in you web app init the global session manager
-
- var globalSessions *session.Manager
-
-* Use **memory** as provider:
-
- func init() {
- globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid","gclifetime":3600}`)
- go globalSessions.GC()
- }
-
-* Use **file** as provider, the last param is the path where you want file to be stored:
-
- func init() {
- globalSessions, _ = session.NewManager("file",`{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"./tmp"}`)
- go globalSessions.GC()
- }
-
-* Use **Redis** as provider, the last param is the Redis conn address,poolsize,password:
-
- func init() {
- globalSessions, _ = session.NewManager("redis", `{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:6379,100,astaxie"}`)
- go globalSessions.GC()
- }
-
-* Use **MySQL** as provider, the last param is the DSN, learn more from [mysql](https://github.com/go-sql-driver/mysql#dsn-data-source-name):
-
- func init() {
- globalSessions, _ = session.NewManager(
- "mysql", `{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"username:password@protocol(address)/dbname?param=value"}`)
- go globalSessions.GC()
- }
-
-* Use **Cookie** as provider:
-
- func init() {
- globalSessions, _ = session.NewManager(
- "cookie", `{"cookieName":"gosessionid","enableSetCookie":false,"gclifetime":3600,"ProviderConfig":"{\"cookieName\":\"gosessionid\",\"securityKey\":\"beegocookiehashkey\"}"}`)
- go globalSessions.GC()
- }
-
-
-Finally in the handlerfunc you can use it like this
-
- func login(w http.ResponseWriter, r *http.Request) {
- sess := globalSessions.SessionStart(w, r)
- defer sess.SessionRelease(w)
- username := sess.Get("username")
- fmt.Println(username)
- if r.Method == "GET" {
- t, _ := template.ParseFiles("login.gtpl")
- t.Execute(w, nil)
- } else {
- fmt.Println("username:", r.Form["username"])
- sess.Set("username", r.Form["username"])
- fmt.Println("password:", r.Form["password"])
- }
- }
-
-
-## How to write own provider?
-
-When you develop a web app, maybe you want to write own provider because you must meet the requirements.
-
-Writing a provider is easy. You only need to define two struct types
-(Session and Provider), which satisfy the interface definition.
-Maybe you will find the **memory** provider is a good example.
-
- type SessionStore interface {
- Set(key, value interface{}) error //set session value
- Get(key interface{}) interface{} //get session value
- Delete(key interface{}) error //delete session value
- SessionID() string //back current sessionID
- SessionRelease(w http.ResponseWriter) // release the resource & save data to provider & return the data
- Flush() error //delete all data
- }
-
- type Provider interface {
- SessionInit(gclifetime int64, config string) error
- SessionRead(sid string) (SessionStore, error)
- SessionExist(sid string) bool
- SessionRegenerate(oldsid, sid string) (SessionStore, error)
- SessionDestroy(sid string) error
- SessionAll() int //get all active session
- SessionGC()
- }
-
-
-## LICENSE
-
-BSD License http://creativecommons.org/licenses/BSD/
diff --git a/session/couchbase/sess_couchbase.go b/session/couchbase/sess_couchbase.go
deleted file mode 100644
index 707d042c..00000000
--- a/session/couchbase/sess_couchbase.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package couchbase for session provider
-//
-// depend on github.com/couchbaselabs/go-couchbasee
-//
-// go install github.com/couchbaselabs/go-couchbase
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/couchbase"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("couchbase", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"http://host:port/, Pool, Bucket"}``)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package couchbase
-
-import (
- "net/http"
- "strings"
- "sync"
-
- couchbase "github.com/couchbase/go-couchbase"
-
- "github.com/astaxie/beego/session"
-)
-
-var couchbpder = &Provider{}
-
-// SessionStore store each session
-type SessionStore struct {
- b *couchbase.Bucket
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxlifetime int64
-}
-
-// Provider couchabse provided
-type Provider struct {
- maxlifetime int64
- savePath string
- pool string
- bucket string
- b *couchbase.Bucket
-}
-
-// Set value to couchabse session
-func (cs *SessionStore) Set(key, value interface{}) error {
- cs.lock.Lock()
- defer cs.lock.Unlock()
- cs.values[key] = value
- return nil
-}
-
-// Get value from couchabse session
-func (cs *SessionStore) Get(key interface{}) interface{} {
- cs.lock.RLock()
- defer cs.lock.RUnlock()
- if v, ok := cs.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in couchbase session by given key
-func (cs *SessionStore) Delete(key interface{}) error {
- cs.lock.Lock()
- defer cs.lock.Unlock()
- delete(cs.values, key)
- return nil
-}
-
-// Flush Clean all values in couchbase session
-func (cs *SessionStore) Flush() error {
- cs.lock.Lock()
- defer cs.lock.Unlock()
- cs.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID Get couchbase session store id
-func (cs *SessionStore) SessionID() string {
- return cs.sid
-}
-
-// SessionRelease Write couchbase session with Gob string
-func (cs *SessionStore) SessionRelease(w http.ResponseWriter) {
- defer cs.b.Close()
-
- bo, err := session.EncodeGob(cs.values)
- if err != nil {
- return
- }
-
- cs.b.Set(cs.sid, int(cs.maxlifetime), bo)
-}
-
-func (cp *Provider) getBucket() *couchbase.Bucket {
- c, err := couchbase.Connect(cp.savePath)
- if err != nil {
- return nil
- }
-
- pool, err := c.GetPool(cp.pool)
- if err != nil {
- return nil
- }
-
- bucket, err := pool.GetBucket(cp.bucket)
- if err != nil {
- return nil
- }
-
- return bucket
-}
-
-// SessionInit init couchbase session
-// savepath like couchbase server REST/JSON URL
-// e.g. http://host:port/, Pool, Bucket
-func (cp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- cp.maxlifetime = maxlifetime
- configs := strings.Split(savePath, ",")
- if len(configs) > 0 {
- cp.savePath = configs[0]
- }
- if len(configs) > 1 {
- cp.pool = configs[1]
- }
- if len(configs) > 2 {
- cp.bucket = configs[2]
- }
-
- return nil
-}
-
-// SessionRead read couchbase session by sid
-func (cp *Provider) SessionRead(sid string) (session.Store, error) {
- cp.b = cp.getBucket()
-
- var (
- kv map[interface{}]interface{}
- err error
- doc []byte
- )
-
- err = cp.b.Get(sid, &doc)
- if err != nil {
- return nil, err
- } else if doc == nil {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(doc)
- if err != nil {
- return nil, err
- }
- }
-
- cs := &SessionStore{b: cp.b, sid: sid, values: kv, maxlifetime: cp.maxlifetime}
- return cs, nil
-}
-
-// SessionExist Check couchbase session exist.
-// it checkes sid exist or not.
-func (cp *Provider) SessionExist(sid string) bool {
- cp.b = cp.getBucket()
- defer cp.b.Close()
-
- var doc []byte
-
- if err := cp.b.Get(sid, &doc); err != nil || doc == nil {
- return false
- }
- return true
-}
-
-// SessionRegenerate remove oldsid and use sid to generate new session
-func (cp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- cp.b = cp.getBucket()
-
- var doc []byte
- if err := cp.b.Get(oldsid, &doc); err != nil || doc == nil {
- cp.b.Set(sid, int(cp.maxlifetime), "")
- } else {
- err := cp.b.Delete(oldsid)
- if err != nil {
- return nil, err
- }
- _, _ = cp.b.Add(sid, int(cp.maxlifetime), doc)
- }
-
- err := cp.b.Get(sid, &doc)
- if err != nil {
- return nil, err
- }
- var kv map[interface{}]interface{}
- if doc == nil {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(doc)
- if err != nil {
- return nil, err
- }
- }
-
- cs := &SessionStore{b: cp.b, sid: sid, values: kv, maxlifetime: cp.maxlifetime}
- return cs, nil
-}
-
-// SessionDestroy Remove bucket in this couchbase
-func (cp *Provider) SessionDestroy(sid string) error {
- cp.b = cp.getBucket()
- defer cp.b.Close()
-
- cp.b.Delete(sid)
- return nil
-}
-
-// SessionGC Recycle
-func (cp *Provider) SessionGC() {
-}
-
-// SessionAll return all active session
-func (cp *Provider) SessionAll() int {
- return 0
-}
-
-func init() {
- session.Register("couchbase", couchbpder)
-}
diff --git a/session/ledis/ledis_session.go b/session/ledis/ledis_session.go
deleted file mode 100644
index ee81df67..00000000
--- a/session/ledis/ledis_session.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Package ledis provide session Provider
-package ledis
-
-import (
- "net/http"
- "strconv"
- "strings"
- "sync"
-
- "github.com/ledisdb/ledisdb/config"
- "github.com/ledisdb/ledisdb/ledis"
-
- "github.com/astaxie/beego/session"
-)
-
-var (
- ledispder = &Provider{}
- c *ledis.DB
-)
-
-// SessionStore ledis session store
-type SessionStore struct {
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxlifetime int64
-}
-
-// Set value in ledis session
-func (ls *SessionStore) Set(key, value interface{}) error {
- ls.lock.Lock()
- defer ls.lock.Unlock()
- ls.values[key] = value
- return nil
-}
-
-// Get value in ledis session
-func (ls *SessionStore) Get(key interface{}) interface{} {
- ls.lock.RLock()
- defer ls.lock.RUnlock()
- if v, ok := ls.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in ledis session
-func (ls *SessionStore) Delete(key interface{}) error {
- ls.lock.Lock()
- defer ls.lock.Unlock()
- delete(ls.values, key)
- return nil
-}
-
-// Flush clear all values in ledis session
-func (ls *SessionStore) Flush() error {
- ls.lock.Lock()
- defer ls.lock.Unlock()
- ls.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get ledis session id
-func (ls *SessionStore) SessionID() string {
- return ls.sid
-}
-
-// SessionRelease save session values to ledis
-func (ls *SessionStore) SessionRelease(w http.ResponseWriter) {
- b, err := session.EncodeGob(ls.values)
- if err != nil {
- return
- }
- c.Set([]byte(ls.sid), b)
- c.Expire([]byte(ls.sid), ls.maxlifetime)
-}
-
-// Provider ledis session provider
-type Provider struct {
- maxlifetime int64
- savePath string
- db int
-}
-
-// SessionInit init ledis session
-// savepath like ledis server saveDataPath,pool size
-// e.g. 127.0.0.1:6379,100,astaxie
-func (lp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- var err error
- lp.maxlifetime = maxlifetime
- configs := strings.Split(savePath, ",")
- if len(configs) == 1 {
- lp.savePath = configs[0]
- } else if len(configs) == 2 {
- lp.savePath = configs[0]
- lp.db, err = strconv.Atoi(configs[1])
- if err != nil {
- return err
- }
- }
- cfg := new(config.Config)
- cfg.DataDir = lp.savePath
-
- var ledisInstance *ledis.Ledis
- ledisInstance, err = ledis.Open(cfg)
- if err != nil {
- return err
- }
- c, err = ledisInstance.Select(lp.db)
- return err
-}
-
-// SessionRead read ledis session by sid
-func (lp *Provider) SessionRead(sid string) (session.Store, error) {
- var (
- kv map[interface{}]interface{}
- err error
- )
-
- kvs, _ := c.Get([]byte(sid))
-
- if len(kvs) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- if kv, err = session.DecodeGob(kvs); err != nil {
- return nil, err
- }
- }
-
- ls := &SessionStore{sid: sid, values: kv, maxlifetime: lp.maxlifetime}
- return ls, nil
-}
-
-// SessionExist check ledis session exist by sid
-func (lp *Provider) SessionExist(sid string) bool {
- count, _ := c.Exists([]byte(sid))
- return count != 0
-}
-
-// SessionRegenerate generate new sid for ledis session
-func (lp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- count, _ := c.Exists([]byte(sid))
- if count == 0 {
- // oldsid doesn't exists, set the new sid directly
- // ignore error here, since if it return error
- // the existed value will be 0
- c.Set([]byte(sid), []byte(""))
- c.Expire([]byte(sid), lp.maxlifetime)
- } else {
- data, _ := c.Get([]byte(oldsid))
- c.Set([]byte(sid), data)
- c.Expire([]byte(sid), lp.maxlifetime)
- }
- return lp.SessionRead(sid)
-}
-
-// SessionDestroy delete ledis session by id
-func (lp *Provider) SessionDestroy(sid string) error {
- c.Del([]byte(sid))
- return nil
-}
-
-// SessionGC Impelment method, no used.
-func (lp *Provider) SessionGC() {
-}
-
-// SessionAll return all active session
-func (lp *Provider) SessionAll() int {
- return 0
-}
-func init() {
- session.Register("ledis", ledispder)
-}
diff --git a/session/memcache/sess_memcache.go b/session/memcache/sess_memcache.go
deleted file mode 100644
index 85a2d815..00000000
--- a/session/memcache/sess_memcache.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package memcache for session provider
-//
-// depend on github.com/bradfitz/gomemcache/memcache
-//
-// go install github.com/bradfitz/gomemcache/memcache
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/memcache"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("memcache", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:11211"}``)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package memcache
-
-import (
- "net/http"
- "strings"
- "sync"
-
- "github.com/astaxie/beego/session"
-
- "github.com/bradfitz/gomemcache/memcache"
-)
-
-var mempder = &MemProvider{}
-var client *memcache.Client
-
-// SessionStore memcache session store
-type SessionStore struct {
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxlifetime int64
-}
-
-// Set value in memcache session
-func (rs *SessionStore) Set(key, value interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values[key] = value
- return nil
-}
-
-// Get value in memcache session
-func (rs *SessionStore) Get(key interface{}) interface{} {
- rs.lock.RLock()
- defer rs.lock.RUnlock()
- if v, ok := rs.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in memcache session
-func (rs *SessionStore) Delete(key interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- delete(rs.values, key)
- return nil
-}
-
-// Flush clear all values in memcache session
-func (rs *SessionStore) Flush() error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get memcache session id
-func (rs *SessionStore) SessionID() string {
- return rs.sid
-}
-
-// SessionRelease save session values to memcache
-func (rs *SessionStore) SessionRelease(w http.ResponseWriter) {
- b, err := session.EncodeGob(rs.values)
- if err != nil {
- return
- }
- item := memcache.Item{Key: rs.sid, Value: b, Expiration: int32(rs.maxlifetime)}
- client.Set(&item)
-}
-
-// MemProvider memcache session provider
-type MemProvider struct {
- maxlifetime int64
- conninfo []string
- poolsize int
- password string
-}
-
-// SessionInit init memcache session
-// savepath like
-// e.g. 127.0.0.1:9090
-func (rp *MemProvider) SessionInit(maxlifetime int64, savePath string) error {
- rp.maxlifetime = maxlifetime
- rp.conninfo = strings.Split(savePath, ";")
- client = memcache.New(rp.conninfo...)
- return nil
-}
-
-// SessionRead read memcache session by sid
-func (rp *MemProvider) SessionRead(sid string) (session.Store, error) {
- if client == nil {
- if err := rp.connectInit(); err != nil {
- return nil, err
- }
- }
- item, err := client.Get(sid)
- if err != nil {
- if err == memcache.ErrCacheMiss {
- rs := &SessionStore{sid: sid, values: make(map[interface{}]interface{}), maxlifetime: rp.maxlifetime}
- return rs, nil
- }
- return nil, err
- }
- var kv map[interface{}]interface{}
- if len(item.Value) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(item.Value)
- if err != nil {
- return nil, err
- }
- }
- rs := &SessionStore{sid: sid, values: kv, maxlifetime: rp.maxlifetime}
- return rs, nil
-}
-
-// SessionExist check memcache session exist by sid
-func (rp *MemProvider) SessionExist(sid string) bool {
- if client == nil {
- if err := rp.connectInit(); err != nil {
- return false
- }
- }
- if item, err := client.Get(sid); err != nil || len(item.Value) == 0 {
- return false
- }
- return true
-}
-
-// SessionRegenerate generate new sid for memcache session
-func (rp *MemProvider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- if client == nil {
- if err := rp.connectInit(); err != nil {
- return nil, err
- }
- }
- var contain []byte
- if item, err := client.Get(sid); err != nil || len(item.Value) == 0 {
- // oldsid doesn't exists, set the new sid directly
- // ignore error here, since if it return error
- // the existed value will be 0
- item.Key = sid
- item.Value = []byte("")
- item.Expiration = int32(rp.maxlifetime)
- client.Set(item)
- } else {
- client.Delete(oldsid)
- item.Key = sid
- item.Expiration = int32(rp.maxlifetime)
- client.Set(item)
- contain = item.Value
- }
-
- var kv map[interface{}]interface{}
- if len(contain) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- var err error
- kv, err = session.DecodeGob(contain)
- if err != nil {
- return nil, err
- }
- }
-
- rs := &SessionStore{sid: sid, values: kv, maxlifetime: rp.maxlifetime}
- return rs, nil
-}
-
-// SessionDestroy delete memcache session by id
-func (rp *MemProvider) SessionDestroy(sid string) error {
- if client == nil {
- if err := rp.connectInit(); err != nil {
- return err
- }
- }
-
- return client.Delete(sid)
-}
-
-func (rp *MemProvider) connectInit() error {
- client = memcache.New(rp.conninfo...)
- return nil
-}
-
-// SessionGC Impelment method, no used.
-func (rp *MemProvider) SessionGC() {
-}
-
-// SessionAll return all activeSession
-func (rp *MemProvider) SessionAll() int {
- return 0
-}
-
-func init() {
- session.Register("memcache", mempder)
-}
diff --git a/session/mysql/sess_mysql.go b/session/mysql/sess_mysql.go
deleted file mode 100644
index 301353ab..00000000
--- a/session/mysql/sess_mysql.go
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package mysql for session provider
-//
-// depends on github.com/go-sql-driver/mysql:
-//
-// go install github.com/go-sql-driver/mysql
-//
-// mysql session support need create table as sql:
-// CREATE TABLE `session` (
-// `session_key` char(64) NOT NULL,
-// `session_data` blob,
-// `session_expiry` int(11) unsigned NOT NULL,
-// PRIMARY KEY (`session_key`)
-// ) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/mysql"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("mysql", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]"}``)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package mysql
-
-import (
- "database/sql"
- "net/http"
- "sync"
- "time"
-
- "github.com/astaxie/beego/session"
- // import mysql driver
- _ "github.com/go-sql-driver/mysql"
-)
-
-var (
- // TableName store the session in MySQL
- TableName = "session"
- mysqlpder = &Provider{}
-)
-
-// SessionStore mysql session store
-type SessionStore struct {
- c *sql.DB
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
-}
-
-// Set value in mysql session.
-// it is temp value in map.
-func (st *SessionStore) Set(key, value interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.values[key] = value
- return nil
-}
-
-// Get value from mysql session
-func (st *SessionStore) Get(key interface{}) interface{} {
- st.lock.RLock()
- defer st.lock.RUnlock()
- if v, ok := st.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in mysql session
-func (st *SessionStore) Delete(key interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- delete(st.values, key)
- return nil
-}
-
-// Flush clear all values in mysql session
-func (st *SessionStore) Flush() error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get session id of this mysql session store
-func (st *SessionStore) SessionID() string {
- return st.sid
-}
-
-// SessionRelease save mysql session values to database.
-// must call this method to save values to database.
-func (st *SessionStore) SessionRelease(w http.ResponseWriter) {
- defer st.c.Close()
- b, err := session.EncodeGob(st.values)
- if err != nil {
- return
- }
- st.c.Exec("UPDATE "+TableName+" set `session_data`=?, `session_expiry`=? where session_key=?",
- b, time.Now().Unix(), st.sid)
-}
-
-// Provider mysql session provider
-type Provider struct {
- maxlifetime int64
- savePath string
-}
-
-// connect to mysql
-func (mp *Provider) connectInit() *sql.DB {
- db, e := sql.Open("mysql", mp.savePath)
- if e != nil {
- return nil
- }
- return db
-}
-
-// SessionInit init mysql session.
-// savepath is the connection string of mysql.
-func (mp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- mp.maxlifetime = maxlifetime
- mp.savePath = savePath
- return nil
-}
-
-// SessionRead get mysql session by sid
-func (mp *Provider) SessionRead(sid string) (session.Store, error) {
- c := mp.connectInit()
- row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid)
- var sessiondata []byte
- err := row.Scan(&sessiondata)
- if err == sql.ErrNoRows {
- c.Exec("insert into "+TableName+"(`session_key`,`session_data`,`session_expiry`) values(?,?,?)",
- sid, "", time.Now().Unix())
- }
- var kv map[interface{}]interface{}
- if len(sessiondata) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(sessiondata)
- if err != nil {
- return nil, err
- }
- }
- rs := &SessionStore{c: c, sid: sid, values: kv}
- return rs, nil
-}
-
-// SessionExist check mysql session exist
-func (mp *Provider) SessionExist(sid string) bool {
- c := mp.connectInit()
- defer c.Close()
- row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid)
- var sessiondata []byte
- err := row.Scan(&sessiondata)
- return err != sql.ErrNoRows
-}
-
-// SessionRegenerate generate new sid for mysql session
-func (mp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- c := mp.connectInit()
- row := c.QueryRow("select session_data from "+TableName+" where session_key=?", oldsid)
- var sessiondata []byte
- err := row.Scan(&sessiondata)
- if err == sql.ErrNoRows {
- c.Exec("insert into "+TableName+"(`session_key`,`session_data`,`session_expiry`) values(?,?,?)", oldsid, "", time.Now().Unix())
- }
- c.Exec("update "+TableName+" set `session_key`=? where session_key=?", sid, oldsid)
- var kv map[interface{}]interface{}
- if len(sessiondata) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(sessiondata)
- if err != nil {
- return nil, err
- }
- }
- rs := &SessionStore{c: c, sid: sid, values: kv}
- return rs, nil
-}
-
-// SessionDestroy delete mysql session by sid
-func (mp *Provider) SessionDestroy(sid string) error {
- c := mp.connectInit()
- c.Exec("DELETE FROM "+TableName+" where session_key=?", sid)
- c.Close()
- return nil
-}
-
-// SessionGC delete expired values in mysql session
-func (mp *Provider) SessionGC() {
- c := mp.connectInit()
- c.Exec("DELETE from "+TableName+" where session_expiry < ?", time.Now().Unix()-mp.maxlifetime)
- c.Close()
-}
-
-// SessionAll count values in mysql session
-func (mp *Provider) SessionAll() int {
- c := mp.connectInit()
- defer c.Close()
- var total int
- err := c.QueryRow("SELECT count(*) as num from " + TableName).Scan(&total)
- if err != nil {
- return 0
- }
- return total
-}
-
-func init() {
- session.Register("mysql", mysqlpder)
-}
diff --git a/session/postgres/sess_postgresql.go b/session/postgres/sess_postgresql.go
deleted file mode 100644
index 0b8b9645..00000000
--- a/session/postgres/sess_postgresql.go
+++ /dev/null
@@ -1,243 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package postgres for session provider
-//
-// depends on github.com/lib/pq:
-//
-// go install github.com/lib/pq
-//
-//
-// needs this table in your database:
-//
-// CREATE TABLE session (
-// session_key char(64) NOT NULL,
-// session_data bytea,
-// session_expiry timestamp NOT NULL,
-// CONSTRAINT session_key PRIMARY KEY(session_key)
-// );
-//
-// will be activated with these settings in app.conf:
-//
-// SessionOn = true
-// SessionProvider = postgresql
-// SessionSavePath = "user=a password=b dbname=c sslmode=disable"
-// SessionName = session
-//
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/postgresql"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("postgresql", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"user=pqgotest dbname=pqgotest sslmode=verify-full"}``)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package postgres
-
-import (
- "database/sql"
- "net/http"
- "sync"
- "time"
-
- "github.com/astaxie/beego/session"
- // import postgresql Driver
- _ "github.com/lib/pq"
-)
-
-var postgresqlpder = &Provider{}
-
-// SessionStore postgresql session store
-type SessionStore struct {
- c *sql.DB
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
-}
-
-// Set value in postgresql session.
-// it is temp value in map.
-func (st *SessionStore) Set(key, value interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.values[key] = value
- return nil
-}
-
-// Get value from postgresql session
-func (st *SessionStore) Get(key interface{}) interface{} {
- st.lock.RLock()
- defer st.lock.RUnlock()
- if v, ok := st.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in postgresql session
-func (st *SessionStore) Delete(key interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- delete(st.values, key)
- return nil
-}
-
-// Flush clear all values in postgresql session
-func (st *SessionStore) Flush() error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get session id of this postgresql session store
-func (st *SessionStore) SessionID() string {
- return st.sid
-}
-
-// SessionRelease save postgresql session values to database.
-// must call this method to save values to database.
-func (st *SessionStore) SessionRelease(w http.ResponseWriter) {
- defer st.c.Close()
- b, err := session.EncodeGob(st.values)
- if err != nil {
- return
- }
- st.c.Exec("UPDATE session set session_data=$1, session_expiry=$2 where session_key=$3",
- b, time.Now().Format(time.RFC3339), st.sid)
-
-}
-
-// Provider postgresql session provider
-type Provider struct {
- maxlifetime int64
- savePath string
-}
-
-// connect to postgresql
-func (mp *Provider) connectInit() *sql.DB {
- db, e := sql.Open("postgres", mp.savePath)
- if e != nil {
- return nil
- }
- return db
-}
-
-// SessionInit init postgresql session.
-// savepath is the connection string of postgresql.
-func (mp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- mp.maxlifetime = maxlifetime
- mp.savePath = savePath
- return nil
-}
-
-// SessionRead get postgresql session by sid
-func (mp *Provider) SessionRead(sid string) (session.Store, error) {
- c := mp.connectInit()
- row := c.QueryRow("select session_data from session where session_key=$1", sid)
- var sessiondata []byte
- err := row.Scan(&sessiondata)
- if err == sql.ErrNoRows {
- _, err = c.Exec("insert into session(session_key,session_data,session_expiry) values($1,$2,$3)",
- sid, "", time.Now().Format(time.RFC3339))
-
- if err != nil {
- return nil, err
- }
- } else if err != nil {
- return nil, err
- }
-
- var kv map[interface{}]interface{}
- if len(sessiondata) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(sessiondata)
- if err != nil {
- return nil, err
- }
- }
- rs := &SessionStore{c: c, sid: sid, values: kv}
- return rs, nil
-}
-
-// SessionExist check postgresql session exist
-func (mp *Provider) SessionExist(sid string) bool {
- c := mp.connectInit()
- defer c.Close()
- row := c.QueryRow("select session_data from session where session_key=$1", sid)
- var sessiondata []byte
- err := row.Scan(&sessiondata)
- return err != sql.ErrNoRows
-}
-
-// SessionRegenerate generate new sid for postgresql session
-func (mp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- c := mp.connectInit()
- row := c.QueryRow("select session_data from session where session_key=$1", oldsid)
- var sessiondata []byte
- err := row.Scan(&sessiondata)
- if err == sql.ErrNoRows {
- c.Exec("insert into session(session_key,session_data,session_expiry) values($1,$2,$3)",
- oldsid, "", time.Now().Format(time.RFC3339))
- }
- c.Exec("update session set session_key=$1 where session_key=$2", sid, oldsid)
- var kv map[interface{}]interface{}
- if len(sessiondata) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob(sessiondata)
- if err != nil {
- return nil, err
- }
- }
- rs := &SessionStore{c: c, sid: sid, values: kv}
- return rs, nil
-}
-
-// SessionDestroy delete postgresql session by sid
-func (mp *Provider) SessionDestroy(sid string) error {
- c := mp.connectInit()
- c.Exec("DELETE FROM session where session_key=$1", sid)
- c.Close()
- return nil
-}
-
-// SessionGC delete expired values in postgresql session
-func (mp *Provider) SessionGC() {
- c := mp.connectInit()
- c.Exec("DELETE from session where EXTRACT(EPOCH FROM (current_timestamp - session_expiry)) > $1", mp.maxlifetime)
- c.Close()
-}
-
-// SessionAll count values in postgresql session
-func (mp *Provider) SessionAll() int {
- c := mp.connectInit()
- defer c.Close()
- var total int
- err := c.QueryRow("SELECT count(*) as num from session").Scan(&total)
- if err != nil {
- return 0
- }
- return total
-}
-
-func init() {
- session.Register("postgresql", postgresqlpder)
-}
diff --git a/session/redis/sess_redis.go b/session/redis/sess_redis.go
deleted file mode 100644
index 5c382d61..00000000
--- a/session/redis/sess_redis.go
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package redis for session provider
-//
-// depend on github.com/gomodule/redigo/redis
-//
-// go install github.com/gomodule/redigo/redis
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/redis"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("redis", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:7070"}``)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package redis
-
-import (
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/astaxie/beego/session"
-
- "github.com/gomodule/redigo/redis"
-)
-
-var redispder = &Provider{}
-
-// MaxPoolSize redis max pool size
-var MaxPoolSize = 100
-
-// SessionStore redis session store
-type SessionStore struct {
- p *redis.Pool
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxlifetime int64
-}
-
-// Set value in redis session
-func (rs *SessionStore) Set(key, value interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values[key] = value
- return nil
-}
-
-// Get value in redis session
-func (rs *SessionStore) Get(key interface{}) interface{} {
- rs.lock.RLock()
- defer rs.lock.RUnlock()
- if v, ok := rs.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in redis session
-func (rs *SessionStore) Delete(key interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- delete(rs.values, key)
- return nil
-}
-
-// Flush clear all values in redis session
-func (rs *SessionStore) Flush() error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get redis session id
-func (rs *SessionStore) SessionID() string {
- return rs.sid
-}
-
-// SessionRelease save session values to redis
-func (rs *SessionStore) SessionRelease(w http.ResponseWriter) {
- b, err := session.EncodeGob(rs.values)
- if err != nil {
- return
- }
- c := rs.p.Get()
- defer c.Close()
- c.Do("SETEX", rs.sid, rs.maxlifetime, string(b))
-}
-
-// Provider redis session provider
-type Provider struct {
- maxlifetime int64
- savePath string
- poolsize int
- password string
- dbNum int
- poollist *redis.Pool
-}
-
-// SessionInit init redis session
-// savepath like redis server addr,pool size,password,dbnum,IdleTimeout second
-// e.g. 127.0.0.1:6379,100,astaxie,0,30
-func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- rp.maxlifetime = maxlifetime
- configs := strings.Split(savePath, ",")
- if len(configs) > 0 {
- rp.savePath = configs[0]
- }
- if len(configs) > 1 {
- poolsize, err := strconv.Atoi(configs[1])
- if err != nil || poolsize < 0 {
- rp.poolsize = MaxPoolSize
- } else {
- rp.poolsize = poolsize
- }
- } else {
- rp.poolsize = MaxPoolSize
- }
- if len(configs) > 2 {
- rp.password = configs[2]
- }
- if len(configs) > 3 {
- dbnum, err := strconv.Atoi(configs[3])
- if err != nil || dbnum < 0 {
- rp.dbNum = 0
- } else {
- rp.dbNum = dbnum
- }
- } else {
- rp.dbNum = 0
- }
- var idleTimeout time.Duration = 0
- if len(configs) > 4 {
- timeout, err := strconv.Atoi(configs[4])
- if err == nil && timeout > 0 {
- idleTimeout = time.Duration(timeout) * time.Second
- }
- }
- rp.poollist = &redis.Pool{
- Dial: func() (redis.Conn, error) {
- c, err := redis.Dial("tcp", rp.savePath)
- if err != nil {
- return nil, err
- }
- if rp.password != "" {
- if _, err = c.Do("AUTH", rp.password); err != nil {
- c.Close()
- return nil, err
- }
- }
- // some redis proxy such as twemproxy is not support select command
- if rp.dbNum > 0 {
- _, err = c.Do("SELECT", rp.dbNum)
- if err != nil {
- c.Close()
- return nil, err
- }
- }
- return c, err
- },
- MaxIdle: rp.poolsize,
- }
-
- rp.poollist.IdleTimeout = idleTimeout
-
- return rp.poollist.Get().Err()
-}
-
-// SessionRead read redis session by sid
-func (rp *Provider) SessionRead(sid string) (session.Store, error) {
- c := rp.poollist.Get()
- defer c.Close()
-
- var kv map[interface{}]interface{}
-
- kvs, err := redis.String(c.Do("GET", sid))
- if err != nil && err != redis.ErrNil {
- return nil, err
- }
- if len(kvs) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- if kv, err = session.DecodeGob([]byte(kvs)); err != nil {
- return nil, err
- }
- }
-
- rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime}
- return rs, nil
-}
-
-// SessionExist check redis session exist by sid
-func (rp *Provider) SessionExist(sid string) bool {
- c := rp.poollist.Get()
- defer c.Close()
-
- if existed, err := redis.Int(c.Do("EXISTS", sid)); err != nil || existed == 0 {
- return false
- }
- return true
-}
-
-// SessionRegenerate generate new sid for redis session
-func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- c := rp.poollist.Get()
- defer c.Close()
-
- if existed, _ := redis.Int(c.Do("EXISTS", oldsid)); existed == 0 {
- // oldsid doesn't exists, set the new sid directly
- // ignore error here, since if it return error
- // the existed value will be 0
- c.Do("SET", sid, "", "EX", rp.maxlifetime)
- } else {
- c.Do("RENAME", oldsid, sid)
- c.Do("EXPIRE", sid, rp.maxlifetime)
- }
- return rp.SessionRead(sid)
-}
-
-// SessionDestroy delete redis session by id
-func (rp *Provider) SessionDestroy(sid string) error {
- c := rp.poollist.Get()
- defer c.Close()
-
- c.Do("DEL", sid)
- return nil
-}
-
-// SessionGC Impelment method, no used.
-func (rp *Provider) SessionGC() {
-}
-
-// SessionAll return all activeSession
-func (rp *Provider) SessionAll() int {
- return 0
-}
-
-func init() {
- session.Register("redis", redispder)
-}
diff --git a/session/redis_cluster/redis_cluster.go b/session/redis_cluster/redis_cluster.go
deleted file mode 100644
index 262fa2e3..00000000
--- a/session/redis_cluster/redis_cluster.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package redis for session provider
-//
-// depend on github.com/go-redis/redis
-//
-// go install github.com/go-redis/redis
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/redis_cluster"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("redis_cluster", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:7070;127.0.0.1:7071"}``)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package redis_cluster
-
-import (
- "github.com/astaxie/beego/session"
- rediss "github.com/go-redis/redis"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-var redispder = &Provider{}
-
-// MaxPoolSize redis_cluster max pool size
-var MaxPoolSize = 1000
-
-// SessionStore redis_cluster session store
-type SessionStore struct {
- p *rediss.ClusterClient
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxlifetime int64
-}
-
-// Set value in redis_cluster session
-func (rs *SessionStore) Set(key, value interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values[key] = value
- return nil
-}
-
-// Get value in redis_cluster session
-func (rs *SessionStore) Get(key interface{}) interface{} {
- rs.lock.RLock()
- defer rs.lock.RUnlock()
- if v, ok := rs.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in redis_cluster session
-func (rs *SessionStore) Delete(key interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- delete(rs.values, key)
- return nil
-}
-
-// Flush clear all values in redis_cluster session
-func (rs *SessionStore) Flush() error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get redis_cluster session id
-func (rs *SessionStore) SessionID() string {
- return rs.sid
-}
-
-// SessionRelease save session values to redis_cluster
-func (rs *SessionStore) SessionRelease(w http.ResponseWriter) {
- b, err := session.EncodeGob(rs.values)
- if err != nil {
- return
- }
- c := rs.p
- c.Set(rs.sid, string(b), time.Duration(rs.maxlifetime)*time.Second)
-}
-
-// Provider redis_cluster session provider
-type Provider struct {
- maxlifetime int64
- savePath string
- poolsize int
- password string
- dbNum int
- poollist *rediss.ClusterClient
-}
-
-// SessionInit init redis_cluster session
-// savepath like redis server addr,pool size,password,dbnum
-// e.g. 127.0.0.1:6379;127.0.0.1:6380,100,test,0
-func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- rp.maxlifetime = maxlifetime
- configs := strings.Split(savePath, ",")
- if len(configs) > 0 {
- rp.savePath = configs[0]
- }
- if len(configs) > 1 {
- poolsize, err := strconv.Atoi(configs[1])
- if err != nil || poolsize < 0 {
- rp.poolsize = MaxPoolSize
- } else {
- rp.poolsize = poolsize
- }
- } else {
- rp.poolsize = MaxPoolSize
- }
- if len(configs) > 2 {
- rp.password = configs[2]
- }
- if len(configs) > 3 {
- dbnum, err := strconv.Atoi(configs[3])
- if err != nil || dbnum < 0 {
- rp.dbNum = 0
- } else {
- rp.dbNum = dbnum
- }
- } else {
- rp.dbNum = 0
- }
-
- rp.poollist = rediss.NewClusterClient(&rediss.ClusterOptions{
- Addrs: strings.Split(rp.savePath, ";"),
- Password: rp.password,
- PoolSize: rp.poolsize,
- })
- return rp.poollist.Ping().Err()
-}
-
-// SessionRead read redis_cluster session by sid
-func (rp *Provider) SessionRead(sid string) (session.Store, error) {
- var kv map[interface{}]interface{}
- kvs, err := rp.poollist.Get(sid).Result()
- if err != nil && err != rediss.Nil {
- return nil, err
- }
- if len(kvs) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- if kv, err = session.DecodeGob([]byte(kvs)); err != nil {
- return nil, err
- }
- }
-
- rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime}
- return rs, nil
-}
-
-// SessionExist check redis_cluster session exist by sid
-func (rp *Provider) SessionExist(sid string) bool {
- c := rp.poollist
- if existed, err := c.Exists(sid).Result(); err != nil || existed == 0 {
- return false
- }
- return true
-}
-
-// SessionRegenerate generate new sid for redis_cluster session
-func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- c := rp.poollist
-
- if existed, err := c.Exists(oldsid).Result(); err != nil || existed == 0 {
- // oldsid doesn't exists, set the new sid directly
- // ignore error here, since if it return error
- // the existed value will be 0
- c.Set(sid, "", time.Duration(rp.maxlifetime)*time.Second)
- } else {
- c.Rename(oldsid, sid)
- c.Expire(sid, time.Duration(rp.maxlifetime)*time.Second)
- }
- return rp.SessionRead(sid)
-}
-
-// SessionDestroy delete redis session by id
-func (rp *Provider) SessionDestroy(sid string) error {
- c := rp.poollist
- c.Del(sid)
- return nil
-}
-
-// SessionGC Impelment method, no used.
-func (rp *Provider) SessionGC() {
-}
-
-// SessionAll return all activeSession
-func (rp *Provider) SessionAll() int {
- return 0
-}
-
-func init() {
- session.Register("redis_cluster", redispder)
-}
diff --git a/session/redis_sentinel/sess_redis_sentinel.go b/session/redis_sentinel/sess_redis_sentinel.go
deleted file mode 100644
index 6ecb2977..00000000
--- a/session/redis_sentinel/sess_redis_sentinel.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package redis for session provider
-//
-// depend on github.com/go-redis/redis
-//
-// go install github.com/go-redis/redis
-//
-// Usage:
-// import(
-// _ "github.com/astaxie/beego/session/redis_sentinel"
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("redis_sentinel", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:26379;127.0.0.2:26379"}``)
-// go globalSessions.GC()
-// }
-//
-// more detail about params: please check the notes on the function SessionInit in this package
-package redis_sentinel
-
-import (
- "github.com/astaxie/beego/session"
- "github.com/go-redis/redis"
- "net/http"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-var redispder = &Provider{}
-
-// DefaultPoolSize redis_sentinel default pool size
-var DefaultPoolSize = 100
-
-// SessionStore redis_sentinel session store
-type SessionStore struct {
- p *redis.Client
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxlifetime int64
-}
-
-// Set value in redis_sentinel session
-func (rs *SessionStore) Set(key, value interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values[key] = value
- return nil
-}
-
-// Get value in redis_sentinel session
-func (rs *SessionStore) Get(key interface{}) interface{} {
- rs.lock.RLock()
- defer rs.lock.RUnlock()
- if v, ok := rs.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in redis_sentinel session
-func (rs *SessionStore) Delete(key interface{}) error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- delete(rs.values, key)
- return nil
-}
-
-// Flush clear all values in redis_sentinel session
-func (rs *SessionStore) Flush() error {
- rs.lock.Lock()
- defer rs.lock.Unlock()
- rs.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get redis_sentinel session id
-func (rs *SessionStore) SessionID() string {
- return rs.sid
-}
-
-// SessionRelease save session values to redis_sentinel
-func (rs *SessionStore) SessionRelease(w http.ResponseWriter) {
- b, err := session.EncodeGob(rs.values)
- if err != nil {
- return
- }
- c := rs.p
- c.Set(rs.sid, string(b), time.Duration(rs.maxlifetime)*time.Second)
-}
-
-// Provider redis_sentinel session provider
-type Provider struct {
- maxlifetime int64
- savePath string
- poolsize int
- password string
- dbNum int
- poollist *redis.Client
- masterName string
-}
-
-// SessionInit init redis_sentinel session
-// savepath like redis sentinel addr,pool size,password,dbnum,masterName
-// e.g. 127.0.0.1:26379;127.0.0.2:26379,100,1qaz2wsx,0,mymaster
-func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
- rp.maxlifetime = maxlifetime
- configs := strings.Split(savePath, ",")
- if len(configs) > 0 {
- rp.savePath = configs[0]
- }
- if len(configs) > 1 {
- poolsize, err := strconv.Atoi(configs[1])
- if err != nil || poolsize < 0 {
- rp.poolsize = DefaultPoolSize
- } else {
- rp.poolsize = poolsize
- }
- } else {
- rp.poolsize = DefaultPoolSize
- }
- if len(configs) > 2 {
- rp.password = configs[2]
- }
- if len(configs) > 3 {
- dbnum, err := strconv.Atoi(configs[3])
- if err != nil || dbnum < 0 {
- rp.dbNum = 0
- } else {
- rp.dbNum = dbnum
- }
- } else {
- rp.dbNum = 0
- }
- if len(configs) > 4 {
- if configs[4] != "" {
- rp.masterName = configs[4]
- } else {
- rp.masterName = "mymaster"
- }
- } else {
- rp.masterName = "mymaster"
- }
-
- rp.poollist = redis.NewFailoverClient(&redis.FailoverOptions{
- SentinelAddrs: strings.Split(rp.savePath, ";"),
- Password: rp.password,
- PoolSize: rp.poolsize,
- DB: rp.dbNum,
- MasterName: rp.masterName,
- })
-
- return rp.poollist.Ping().Err()
-}
-
-// SessionRead read redis_sentinel session by sid
-func (rp *Provider) SessionRead(sid string) (session.Store, error) {
- var kv map[interface{}]interface{}
- kvs, err := rp.poollist.Get(sid).Result()
- if err != nil && err != redis.Nil {
- return nil, err
- }
- if len(kvs) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- if kv, err = session.DecodeGob([]byte(kvs)); err != nil {
- return nil, err
- }
- }
-
- rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime}
- return rs, nil
-}
-
-// SessionExist check redis_sentinel session exist by sid
-func (rp *Provider) SessionExist(sid string) bool {
- c := rp.poollist
- if existed, err := c.Exists(sid).Result(); err != nil || existed == 0 {
- return false
- }
- return true
-}
-
-// SessionRegenerate generate new sid for redis_sentinel session
-func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- c := rp.poollist
-
- if existed, err := c.Exists(oldsid).Result(); err != nil || existed == 0 {
- // oldsid doesn't exists, set the new sid directly
- // ignore error here, since if it return error
- // the existed value will be 0
- c.Set(sid, "", time.Duration(rp.maxlifetime)*time.Second)
- } else {
- c.Rename(oldsid, sid)
- c.Expire(sid, time.Duration(rp.maxlifetime)*time.Second)
- }
- return rp.SessionRead(sid)
-}
-
-// SessionDestroy delete redis session by id
-func (rp *Provider) SessionDestroy(sid string) error {
- c := rp.poollist
- c.Del(sid)
- return nil
-}
-
-// SessionGC Impelment method, no used.
-func (rp *Provider) SessionGC() {
-}
-
-// SessionAll return all activeSession
-func (rp *Provider) SessionAll() int {
- return 0
-}
-
-func init() {
- session.Register("redis_sentinel", redispder)
-}
diff --git a/session/redis_sentinel/sess_redis_sentinel_test.go b/session/redis_sentinel/sess_redis_sentinel_test.go
deleted file mode 100644
index fd4155c6..00000000
--- a/session/redis_sentinel/sess_redis_sentinel_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package redis_sentinel
-
-import (
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/astaxie/beego/session"
-)
-
-func TestRedisSentinel(t *testing.T) {
- sessionConfig := &session.ManagerConfig{
- CookieName: "gosessionid",
- EnableSetCookie: true,
- Gclifetime: 3600,
- Maxlifetime: 3600,
- Secure: false,
- CookieLifeTime: 3600,
- ProviderConfig: "127.0.0.1:6379,100,,0,master",
- }
- globalSessions, e := session.NewManager("redis_sentinel", sessionConfig)
- if e != nil {
- t.Log(e)
- return
- }
- //todo test if e==nil
- go globalSessions.GC()
-
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
-
- sess, err := globalSessions.SessionStart(w, r)
- if err != nil {
- t.Fatal("session start failed:", err)
- }
- defer sess.SessionRelease(w)
-
- // SET AND GET
- err = sess.Set("username", "astaxie")
- if err != nil {
- t.Fatal("set username failed:", err)
- }
- username := sess.Get("username")
- if username != "astaxie" {
- t.Fatal("get username failed")
- }
-
- // DELETE
- err = sess.Delete("username")
- if err != nil {
- t.Fatal("delete username failed:", err)
- }
- username = sess.Get("username")
- if username != nil {
- t.Fatal("delete username failed")
- }
-
- // FLUSH
- err = sess.Set("username", "astaxie")
- if err != nil {
- t.Fatal("set failed:", err)
- }
- err = sess.Set("password", "1qaz2wsx")
- if err != nil {
- t.Fatal("set failed:", err)
- }
- username = sess.Get("username")
- if username != "astaxie" {
- t.Fatal("get username failed")
- }
- password := sess.Get("password")
- if password != "1qaz2wsx" {
- t.Fatal("get password failed")
- }
- err = sess.Flush()
- if err != nil {
- t.Fatal("flush failed:", err)
- }
- username = sess.Get("username")
- if username != nil {
- t.Fatal("flush failed")
- }
- password = sess.Get("password")
- if password != nil {
- t.Fatal("flush failed")
- }
-
- sess.SessionRelease(w)
-
-}
diff --git a/session/sess_cookie.go b/session/sess_cookie.go
deleted file mode 100644
index 6ad5debc..00000000
--- a/session/sess_cookie.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "encoding/json"
- "net/http"
- "net/url"
- "sync"
-)
-
-var cookiepder = &CookieProvider{}
-
-// CookieSessionStore Cookie SessionStore
-type CookieSessionStore struct {
- sid string
- values map[interface{}]interface{} // session data
- lock sync.RWMutex
-}
-
-// Set value to cookie session.
-// the value are encoded as gob with hash block string.
-func (st *CookieSessionStore) Set(key, value interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.values[key] = value
- return nil
-}
-
-// Get value from cookie session
-func (st *CookieSessionStore) Get(key interface{}) interface{} {
- st.lock.RLock()
- defer st.lock.RUnlock()
- if v, ok := st.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in cookie session
-func (st *CookieSessionStore) Delete(key interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- delete(st.values, key)
- return nil
-}
-
-// Flush Clean all values in cookie session
-func (st *CookieSessionStore) Flush() error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID Return id of this cookie session
-func (st *CookieSessionStore) SessionID() string {
- return st.sid
-}
-
-// SessionRelease Write cookie session to http response cookie
-func (st *CookieSessionStore) SessionRelease(w http.ResponseWriter) {
- st.lock.Lock()
- encodedCookie, err := encodeCookie(cookiepder.block, cookiepder.config.SecurityKey, cookiepder.config.SecurityName, st.values)
- st.lock.Unlock()
- if err == nil {
- cookie := &http.Cookie{Name: cookiepder.config.CookieName,
- Value: url.QueryEscape(encodedCookie),
- Path: "/",
- HttpOnly: true,
- Secure: cookiepder.config.Secure,
- MaxAge: cookiepder.config.Maxage}
- http.SetCookie(w, cookie)
- }
-}
-
-type cookieConfig struct {
- SecurityKey string `json:"securityKey"`
- BlockKey string `json:"blockKey"`
- SecurityName string `json:"securityName"`
- CookieName string `json:"cookieName"`
- Secure bool `json:"secure"`
- Maxage int `json:"maxage"`
-}
-
-// CookieProvider Cookie session provider
-type CookieProvider struct {
- maxlifetime int64
- config *cookieConfig
- block cipher.Block
-}
-
-// SessionInit Init cookie session provider with max lifetime and config json.
-// maxlifetime is ignored.
-// json config:
-// securityKey - hash string
-// blockKey - gob encode hash string. it's saved as aes crypto.
-// securityName - recognized name in encoded cookie string
-// cookieName - cookie name
-// maxage - cookie max life time.
-func (pder *CookieProvider) SessionInit(maxlifetime int64, config string) error {
- pder.config = &cookieConfig{}
- err := json.Unmarshal([]byte(config), pder.config)
- if err != nil {
- return err
- }
- if pder.config.BlockKey == "" {
- pder.config.BlockKey = string(generateRandomKey(16))
- }
- if pder.config.SecurityName == "" {
- pder.config.SecurityName = string(generateRandomKey(20))
- }
- pder.block, err = aes.NewCipher([]byte(pder.config.BlockKey))
- if err != nil {
- return err
- }
- pder.maxlifetime = maxlifetime
- return nil
-}
-
-// SessionRead Get SessionStore in cooke.
-// decode cooke string to map and put into SessionStore with sid.
-func (pder *CookieProvider) SessionRead(sid string) (Store, error) {
- maps, _ := decodeCookie(pder.block,
- pder.config.SecurityKey,
- pder.config.SecurityName,
- sid, pder.maxlifetime)
- if maps == nil {
- maps = make(map[interface{}]interface{})
- }
- rs := &CookieSessionStore{sid: sid, values: maps}
- return rs, nil
-}
-
-// SessionExist Cookie session is always existed
-func (pder *CookieProvider) SessionExist(sid string) bool {
- return true
-}
-
-// SessionRegenerate Implement method, no used.
-func (pder *CookieProvider) SessionRegenerate(oldsid, sid string) (Store, error) {
- return nil, nil
-}
-
-// SessionDestroy Implement method, no used.
-func (pder *CookieProvider) SessionDestroy(sid string) error {
- return nil
-}
-
-// SessionGC Implement method, no used.
-func (pder *CookieProvider) SessionGC() {
-}
-
-// SessionAll Implement method, return 0.
-func (pder *CookieProvider) SessionAll() int {
- return 0
-}
-
-// SessionUpdate Implement method, no used.
-func (pder *CookieProvider) SessionUpdate(sid string) error {
- return nil
-}
-
-func init() {
- Register("cookie", cookiepder)
-}
diff --git a/session/sess_cookie_test.go b/session/sess_cookie_test.go
deleted file mode 100644
index b6726005..00000000
--- a/session/sess_cookie_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "encoding/json"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-)
-
-func TestCookie(t *testing.T) {
- config := `{"cookieName":"gosessionid","enableSetCookie":false,"gclifetime":3600,"ProviderConfig":"{\"cookieName\":\"gosessionid\",\"securityKey\":\"beegocookiehashkey\"}"}`
- conf := new(ManagerConfig)
- if err := json.Unmarshal([]byte(config), conf); err != nil {
- t.Fatal("json decode error", err)
- }
- globalSessions, err := NewManager("cookie", conf)
- if err != nil {
- t.Fatal("init cookie session err", err)
- }
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
- sess, err := globalSessions.SessionStart(w, r)
- if err != nil {
- t.Fatal("set error,", err)
- }
- err = sess.Set("username", "astaxie")
- if err != nil {
- t.Fatal("set error,", err)
- }
- if username := sess.Get("username"); username != "astaxie" {
- t.Fatal("get username error")
- }
- sess.SessionRelease(w)
- if cookiestr := w.Header().Get("Set-Cookie"); cookiestr == "" {
- t.Fatal("setcookie error")
- } else {
- parts := strings.Split(strings.TrimSpace(cookiestr), ";")
- for k, v := range parts {
- nameval := strings.Split(v, "=")
- if k == 0 && nameval[0] != "gosessionid" {
- t.Fatal("error")
- }
- }
- }
-}
-
-func TestDestorySessionCookie(t *testing.T) {
- config := `{"cookieName":"gosessionid","enableSetCookie":true,"gclifetime":3600,"ProviderConfig":"{\"cookieName\":\"gosessionid\",\"securityKey\":\"beegocookiehashkey\"}"}`
- conf := new(ManagerConfig)
- if err := json.Unmarshal([]byte(config), conf); err != nil {
- t.Fatal("json decode error", err)
- }
- globalSessions, err := NewManager("cookie", conf)
- if err != nil {
- t.Fatal("init cookie session err", err)
- }
-
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
- session, err := globalSessions.SessionStart(w, r)
- if err != nil {
- t.Fatal("session start err,", err)
- }
-
- // request again ,will get same sesssion id .
- r1, _ := http.NewRequest("GET", "/", nil)
- r1.Header.Set("Cookie", w.Header().Get("Set-Cookie"))
- w = httptest.NewRecorder()
- newSession, err := globalSessions.SessionStart(w, r1)
- if err != nil {
- t.Fatal("session start err,", err)
- }
- if newSession.SessionID() != session.SessionID() {
- t.Fatal("get cookie session id is not the same again.")
- }
-
- // After destroy session , will get a new session id .
- globalSessions.SessionDestroy(w, r1)
- r2, _ := http.NewRequest("GET", "/", nil)
- r2.Header.Set("Cookie", w.Header().Get("Set-Cookie"))
-
- w = httptest.NewRecorder()
- newSession, err = globalSessions.SessionStart(w, r2)
- if err != nil {
- t.Fatal("session start error")
- }
- if newSession.SessionID() == session.SessionID() {
- t.Fatal("after destroy session and reqeust again ,get cookie session id is same.")
- }
-}
diff --git a/session/sess_file.go b/session/sess_file.go
deleted file mode 100644
index 47ad54a7..00000000
--- a/session/sess_file.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
- "path"
- "path/filepath"
- "strings"
- "sync"
- "time"
-)
-
-var (
- filepder = &FileProvider{}
- gcmaxlifetime int64
-)
-
-// FileSessionStore File session store
-type FileSessionStore struct {
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
-}
-
-// Set value to file session
-func (fs *FileSessionStore) Set(key, value interface{}) error {
- fs.lock.Lock()
- defer fs.lock.Unlock()
- fs.values[key] = value
- return nil
-}
-
-// Get value from file session
-func (fs *FileSessionStore) Get(key interface{}) interface{} {
- fs.lock.RLock()
- defer fs.lock.RUnlock()
- if v, ok := fs.values[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete value in file session by given key
-func (fs *FileSessionStore) Delete(key interface{}) error {
- fs.lock.Lock()
- defer fs.lock.Unlock()
- delete(fs.values, key)
- return nil
-}
-
-// Flush Clean all values in file session
-func (fs *FileSessionStore) Flush() error {
- fs.lock.Lock()
- defer fs.lock.Unlock()
- fs.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID Get file session store id
-func (fs *FileSessionStore) SessionID() string {
- return fs.sid
-}
-
-// SessionRelease Write file session to local file with Gob string
-func (fs *FileSessionStore) SessionRelease(w http.ResponseWriter) {
- filepder.lock.Lock()
- defer filepder.lock.Unlock()
- b, err := EncodeGob(fs.values)
- if err != nil {
- SLogger.Println(err)
- return
- }
- _, err = os.Stat(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid))
- var f *os.File
- if err == nil {
- f, err = os.OpenFile(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid), os.O_RDWR, 0777)
- if err != nil {
- SLogger.Println(err)
- return
- }
- } else if os.IsNotExist(err) {
- f, err = os.Create(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid))
- if err != nil {
- SLogger.Println(err)
- return
- }
- } else {
- return
- }
- f.Truncate(0)
- f.Seek(0, 0)
- f.Write(b)
- f.Close()
-}
-
-// FileProvider File session provider
-type FileProvider struct {
- lock sync.RWMutex
- maxlifetime int64
- savePath string
-}
-
-// SessionInit Init file session provider.
-// savePath sets the session files path.
-func (fp *FileProvider) SessionInit(maxlifetime int64, savePath string) error {
- fp.maxlifetime = maxlifetime
- fp.savePath = savePath
- return nil
-}
-
-// SessionRead Read file session by sid.
-// if file is not exist, create it.
-// the file path is generated from sid string.
-func (fp *FileProvider) SessionRead(sid string) (Store, error) {
- invalidChars := "./"
- if strings.ContainsAny(sid, invalidChars) {
- return nil, errors.New("the sid shouldn't have following characters: " + invalidChars)
- }
- if len(sid) < 2 {
- return nil, errors.New("length of the sid is less than 2")
- }
- filepder.lock.Lock()
- defer filepder.lock.Unlock()
-
- err := os.MkdirAll(path.Join(fp.savePath, string(sid[0]), string(sid[1])), 0755)
- if err != nil {
- SLogger.Println(err.Error())
- }
- _, err = os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid))
- var f *os.File
- if err == nil {
- f, err = os.OpenFile(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), os.O_RDWR, 0777)
- } else if os.IsNotExist(err) {
- f, err = os.Create(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid))
- } else {
- return nil, err
- }
-
- defer f.Close()
-
- os.Chtimes(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), time.Now(), time.Now())
- var kv map[interface{}]interface{}
- b, err := ioutil.ReadAll(f)
- if err != nil {
- return nil, err
- }
- if len(b) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = DecodeGob(b)
- if err != nil {
- return nil, err
- }
- }
-
- ss := &FileSessionStore{sid: sid, values: kv}
- return ss, nil
-}
-
-// SessionExist Check file session exist.
-// it checks the file named from sid exist or not.
-func (fp *FileProvider) SessionExist(sid string) bool {
- filepder.lock.Lock()
- defer filepder.lock.Unlock()
-
- if len(sid) < 2 {
- SLogger.Println("min length of session id is 2", sid)
- return false
- }
-
- _, err := os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid))
- return err == nil
-}
-
-// SessionDestroy Remove all files in this save path
-func (fp *FileProvider) SessionDestroy(sid string) error {
- filepder.lock.Lock()
- defer filepder.lock.Unlock()
- os.Remove(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid))
- return nil
-}
-
-// SessionGC Recycle files in save path
-func (fp *FileProvider) SessionGC() {
- filepder.lock.Lock()
- defer filepder.lock.Unlock()
-
- gcmaxlifetime = fp.maxlifetime
- filepath.Walk(fp.savePath, gcpath)
-}
-
-// SessionAll Get active file session number.
-// it walks save path to count files.
-func (fp *FileProvider) SessionAll() int {
- a := &activeSession{}
- err := filepath.Walk(fp.savePath, func(path string, f os.FileInfo, err error) error {
- return a.visit(path, f, err)
- })
- if err != nil {
- SLogger.Printf("filepath.Walk() returned %v\n", err)
- return 0
- }
- return a.total
-}
-
-// SessionRegenerate Generate new sid for file session.
-// it delete old file and create new file named from new sid.
-func (fp *FileProvider) SessionRegenerate(oldsid, sid string) (Store, error) {
- filepder.lock.Lock()
- defer filepder.lock.Unlock()
-
- oldPath := path.Join(fp.savePath, string(oldsid[0]), string(oldsid[1]))
- oldSidFile := path.Join(oldPath, oldsid)
- newPath := path.Join(fp.savePath, string(sid[0]), string(sid[1]))
- newSidFile := path.Join(newPath, sid)
-
- // new sid file is exist
- _, err := os.Stat(newSidFile)
- if err == nil {
- return nil, fmt.Errorf("newsid %s exist", newSidFile)
- }
-
- err = os.MkdirAll(newPath, 0755)
- if err != nil {
- SLogger.Println(err.Error())
- }
-
- // if old sid file exist
- // 1.read and parse file content
- // 2.write content to new sid file
- // 3.remove old sid file, change new sid file atime and ctime
- // 4.return FileSessionStore
- _, err = os.Stat(oldSidFile)
- if err == nil {
- b, err := ioutil.ReadFile(oldSidFile)
- if err != nil {
- return nil, err
- }
-
- var kv map[interface{}]interface{}
- if len(b) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = DecodeGob(b)
- if err != nil {
- return nil, err
- }
- }
-
- ioutil.WriteFile(newSidFile, b, 0777)
- os.Remove(oldSidFile)
- os.Chtimes(newSidFile, time.Now(), time.Now())
- ss := &FileSessionStore{sid: sid, values: kv}
- return ss, nil
- }
-
- // if old sid file not exist, just create new sid file and return
- newf, err := os.Create(newSidFile)
- if err != nil {
- return nil, err
- }
- newf.Close()
- ss := &FileSessionStore{sid: sid, values: make(map[interface{}]interface{})}
- return ss, nil
-}
-
-// remove file in save path if expired
-func gcpath(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
- return nil
- }
- if (info.ModTime().Unix() + gcmaxlifetime) < time.Now().Unix() {
- os.Remove(path)
- }
- return nil
-}
-
-type activeSession struct {
- total int
-}
-
-func (as *activeSession) visit(paths string, f os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if f.IsDir() {
- return nil
- }
- as.total = as.total + 1
- return nil
-}
-
-func init() {
- Register("file", filepder)
-}
diff --git a/session/sess_file_test.go b/session/sess_file_test.go
deleted file mode 100644
index 021c43fc..00000000
--- a/session/sess_file_test.go
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "fmt"
- "os"
- "sync"
- "testing"
- "time"
-)
-
-const sid = "Session_id"
-const sidNew = "Session_id_new"
-const sessionPath = "./_session_runtime"
-
-var (
- mutex sync.Mutex
-)
-
-func TestFileProvider_SessionInit(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
- if fp.maxlifetime != 180 {
- t.Error()
- }
-
- if fp.savePath != sessionPath {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionExist(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- if fp.SessionExist(sid) {
- t.Error()
- }
-
- _, err := fp.SessionRead(sid)
- if err != nil {
- t.Error(err)
- }
-
- if !fp.SessionExist(sid) {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionExist2(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- if fp.SessionExist(sid) {
- t.Error()
- }
-
- if fp.SessionExist("") {
- t.Error()
- }
-
- if fp.SessionExist("1") {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionRead(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- s, err := fp.SessionRead(sid)
- if err != nil {
- t.Error(err)
- }
-
- _ = s.Set("sessionValue", 18975)
- v := s.Get("sessionValue")
-
- if v.(int) != 18975 {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionRead1(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- _, err := fp.SessionRead("")
- if err == nil {
- t.Error(err)
- }
-
- _, err = fp.SessionRead("1")
- if err == nil {
- t.Error(err)
- }
-}
-
-func TestFileProvider_SessionAll(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- sessionCount := 546
-
- for i := 1; i <= sessionCount; i++ {
- _, err := fp.SessionRead(fmt.Sprintf("%s_%d", sid, i))
- if err != nil {
- t.Error(err)
- }
- }
-
- if fp.SessionAll() != sessionCount {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionRegenerate(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- _, err := fp.SessionRead(sid)
- if err != nil {
- t.Error(err)
- }
-
- if !fp.SessionExist(sid) {
- t.Error()
- }
-
- _, err = fp.SessionRegenerate(sid, sidNew)
- if err != nil {
- t.Error(err)
- }
-
- if fp.SessionExist(sid) {
- t.Error()
- }
-
- if !fp.SessionExist(sidNew) {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionDestroy(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- _, err := fp.SessionRead(sid)
- if err != nil {
- t.Error(err)
- }
-
- if !fp.SessionExist(sid) {
- t.Error()
- }
-
- err = fp.SessionDestroy(sid)
- if err != nil {
- t.Error(err)
- }
-
- if fp.SessionExist(sid) {
- t.Error()
- }
-}
-
-func TestFileProvider_SessionGC(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(1, sessionPath)
-
- sessionCount := 412
-
- for i := 1; i <= sessionCount; i++ {
- _, err := fp.SessionRead(fmt.Sprintf("%s_%d", sid, i))
- if err != nil {
- t.Error(err)
- }
- }
-
- time.Sleep(2 * time.Second)
-
- fp.SessionGC()
- if fp.SessionAll() != 0 {
- t.Error()
- }
-}
-
-func TestFileSessionStore_Set(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- sessionCount := 100
- s, _ := fp.SessionRead(sid)
- for i := 1; i <= sessionCount; i++ {
- err := s.Set(i, i)
- if err != nil {
- t.Error(err)
- }
- }
-}
-
-func TestFileSessionStore_Get(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- sessionCount := 100
- s, _ := fp.SessionRead(sid)
- for i := 1; i <= sessionCount; i++ {
- _ = s.Set(i, i)
-
- v := s.Get(i)
- if v.(int) != i {
- t.Error()
- }
- }
-}
-
-func TestFileSessionStore_Delete(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- s, _ := fp.SessionRead(sid)
- s.Set("1", 1)
-
- if s.Get("1") == nil {
- t.Error()
- }
-
- s.Delete("1")
-
- if s.Get("1") != nil {
- t.Error()
- }
-}
-
-func TestFileSessionStore_Flush(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- sessionCount := 100
- s, _ := fp.SessionRead(sid)
- for i := 1; i <= sessionCount; i++ {
- _ = s.Set(i, i)
- }
-
- _ = s.Flush()
-
- for i := 1; i <= sessionCount; i++ {
- if s.Get(i) != nil {
- t.Error()
- }
- }
-}
-
-func TestFileSessionStore_SessionID(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
-
- sessionCount := 85
-
- for i := 1; i <= sessionCount; i++ {
- s, err := fp.SessionRead(fmt.Sprintf("%s_%d", sid, i))
- if err != nil {
- t.Error(err)
- }
- if s.SessionID() != fmt.Sprintf("%s_%d", sid, i) {
- t.Error(err)
- }
- }
-}
-
-func TestFileSessionStore_SessionRelease(t *testing.T) {
- mutex.Lock()
- defer mutex.Unlock()
- os.RemoveAll(sessionPath)
- defer os.RemoveAll(sessionPath)
- fp := &FileProvider{}
-
- _ = fp.SessionInit(180, sessionPath)
- filepder.savePath = sessionPath
- sessionCount := 85
-
- for i := 1; i <= sessionCount; i++ {
- s, err := fp.SessionRead(fmt.Sprintf("%s_%d", sid, i))
- if err != nil {
- t.Error(err)
- }
-
- s.Set(i, i)
- s.SessionRelease(nil)
- }
-
- for i := 1; i <= sessionCount; i++ {
- s, err := fp.SessionRead(fmt.Sprintf("%s_%d", sid, i))
- if err != nil {
- t.Error(err)
- }
-
- if s.Get(i).(int) != i {
- t.Error()
- }
- }
-}
diff --git a/session/sess_mem.go b/session/sess_mem.go
deleted file mode 100644
index 64d8b056..00000000
--- a/session/sess_mem.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "container/list"
- "net/http"
- "sync"
- "time"
-)
-
-var mempder = &MemProvider{list: list.New(), sessions: make(map[string]*list.Element)}
-
-// MemSessionStore memory session store.
-// it saved sessions in a map in memory.
-type MemSessionStore struct {
- sid string //session id
- timeAccessed time.Time //last access time
- value map[interface{}]interface{} //session store
- lock sync.RWMutex
-}
-
-// Set value to memory session
-func (st *MemSessionStore) Set(key, value interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.value[key] = value
- return nil
-}
-
-// Get value from memory session by key
-func (st *MemSessionStore) Get(key interface{}) interface{} {
- st.lock.RLock()
- defer st.lock.RUnlock()
- if v, ok := st.value[key]; ok {
- return v
- }
- return nil
-}
-
-// Delete in memory session by key
-func (st *MemSessionStore) Delete(key interface{}) error {
- st.lock.Lock()
- defer st.lock.Unlock()
- delete(st.value, key)
- return nil
-}
-
-// Flush clear all values in memory session
-func (st *MemSessionStore) Flush() error {
- st.lock.Lock()
- defer st.lock.Unlock()
- st.value = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID get this id of memory session store
-func (st *MemSessionStore) SessionID() string {
- return st.sid
-}
-
-// SessionRelease Implement method, no used.
-func (st *MemSessionStore) SessionRelease(w http.ResponseWriter) {
-}
-
-// MemProvider Implement the provider interface
-type MemProvider struct {
- lock sync.RWMutex // locker
- sessions map[string]*list.Element // map in memory
- list *list.List // for gc
- maxlifetime int64
- savePath string
-}
-
-// SessionInit init memory session
-func (pder *MemProvider) SessionInit(maxlifetime int64, savePath string) error {
- pder.maxlifetime = maxlifetime
- pder.savePath = savePath
- return nil
-}
-
-// SessionRead get memory session store by sid
-func (pder *MemProvider) SessionRead(sid string) (Store, error) {
- pder.lock.RLock()
- if element, ok := pder.sessions[sid]; ok {
- go pder.SessionUpdate(sid)
- pder.lock.RUnlock()
- return element.Value.(*MemSessionStore), nil
- }
- pder.lock.RUnlock()
- pder.lock.Lock()
- newsess := &MemSessionStore{sid: sid, timeAccessed: time.Now(), value: make(map[interface{}]interface{})}
- element := pder.list.PushFront(newsess)
- pder.sessions[sid] = element
- pder.lock.Unlock()
- return newsess, nil
-}
-
-// SessionExist check session store exist in memory session by sid
-func (pder *MemProvider) SessionExist(sid string) bool {
- pder.lock.RLock()
- defer pder.lock.RUnlock()
- if _, ok := pder.sessions[sid]; ok {
- return true
- }
- return false
-}
-
-// SessionRegenerate generate new sid for session store in memory session
-func (pder *MemProvider) SessionRegenerate(oldsid, sid string) (Store, error) {
- pder.lock.RLock()
- if element, ok := pder.sessions[oldsid]; ok {
- go pder.SessionUpdate(oldsid)
- pder.lock.RUnlock()
- pder.lock.Lock()
- element.Value.(*MemSessionStore).sid = sid
- pder.sessions[sid] = element
- delete(pder.sessions, oldsid)
- pder.lock.Unlock()
- return element.Value.(*MemSessionStore), nil
- }
- pder.lock.RUnlock()
- pder.lock.Lock()
- newsess := &MemSessionStore{sid: sid, timeAccessed: time.Now(), value: make(map[interface{}]interface{})}
- element := pder.list.PushFront(newsess)
- pder.sessions[sid] = element
- pder.lock.Unlock()
- return newsess, nil
-}
-
-// SessionDestroy delete session store in memory session by id
-func (pder *MemProvider) SessionDestroy(sid string) error {
- pder.lock.Lock()
- defer pder.lock.Unlock()
- if element, ok := pder.sessions[sid]; ok {
- delete(pder.sessions, sid)
- pder.list.Remove(element)
- return nil
- }
- return nil
-}
-
-// SessionGC clean expired session stores in memory session
-func (pder *MemProvider) SessionGC() {
- pder.lock.RLock()
- for {
- element := pder.list.Back()
- if element == nil {
- break
- }
- if (element.Value.(*MemSessionStore).timeAccessed.Unix() + pder.maxlifetime) < time.Now().Unix() {
- pder.lock.RUnlock()
- pder.lock.Lock()
- pder.list.Remove(element)
- delete(pder.sessions, element.Value.(*MemSessionStore).sid)
- pder.lock.Unlock()
- pder.lock.RLock()
- } else {
- break
- }
- }
- pder.lock.RUnlock()
-}
-
-// SessionAll get count number of memory session
-func (pder *MemProvider) SessionAll() int {
- return pder.list.Len()
-}
-
-// SessionUpdate expand time of session store by id in memory session
-func (pder *MemProvider) SessionUpdate(sid string) error {
- pder.lock.Lock()
- defer pder.lock.Unlock()
- if element, ok := pder.sessions[sid]; ok {
- element.Value.(*MemSessionStore).timeAccessed = time.Now()
- pder.list.MoveToFront(element)
- return nil
- }
- return nil
-}
-
-func init() {
- Register("memory", mempder)
-}
diff --git a/session/sess_mem_test.go b/session/sess_mem_test.go
deleted file mode 100644
index 2e8934b8..00000000
--- a/session/sess_mem_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "encoding/json"
- "net/http"
- "net/http/httptest"
- "strings"
- "testing"
-)
-
-func TestMem(t *testing.T) {
- config := `{"cookieName":"gosessionid","gclifetime":10, "enableSetCookie":true}`
- conf := new(ManagerConfig)
- if err := json.Unmarshal([]byte(config), conf); err != nil {
- t.Fatal("json decode error", err)
- }
- globalSessions, _ := NewManager("memory", conf)
- go globalSessions.GC()
- r, _ := http.NewRequest("GET", "/", nil)
- w := httptest.NewRecorder()
- sess, err := globalSessions.SessionStart(w, r)
- if err != nil {
- t.Fatal("set error,", err)
- }
- defer sess.SessionRelease(w)
- err = sess.Set("username", "astaxie")
- if err != nil {
- t.Fatal("set error,", err)
- }
- if username := sess.Get("username"); username != "astaxie" {
- t.Fatal("get username error")
- }
- if cookiestr := w.Header().Get("Set-Cookie"); cookiestr == "" {
- t.Fatal("setcookie error")
- } else {
- parts := strings.Split(strings.TrimSpace(cookiestr), ";")
- for k, v := range parts {
- nameval := strings.Split(v, "=")
- if k == 0 && nameval[0] != "gosessionid" {
- t.Fatal("error")
- }
- }
- }
-}
diff --git a/session/sess_test.go b/session/sess_test.go
deleted file mode 100644
index 906abec2..00000000
--- a/session/sess_test.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "crypto/aes"
- "encoding/json"
- "testing"
-)
-
-func Test_gob(t *testing.T) {
- a := make(map[interface{}]interface{})
- a["username"] = "astaxie"
- a[12] = 234
- a["user"] = User{"asta", "xie"}
- b, err := EncodeGob(a)
- if err != nil {
- t.Error(err)
- }
- c, err := DecodeGob(b)
- if err != nil {
- t.Error(err)
- }
- if len(c) == 0 {
- t.Error("decodeGob empty")
- }
- if c["username"] != "astaxie" {
- t.Error("decode string error")
- }
- if c[12] != 234 {
- t.Error("decode int error")
- }
- if c["user"].(User).Username != "asta" {
- t.Error("decode struct error")
- }
-}
-
-type User struct {
- Username string
- NickName string
-}
-
-func TestGenerate(t *testing.T) {
- str := generateRandomKey(20)
- if len(str) != 20 {
- t.Fatal("generate length is not equal to 20")
- }
-}
-
-func TestCookieEncodeDecode(t *testing.T) {
- hashKey := "testhashKey"
- blockkey := generateRandomKey(16)
- block, err := aes.NewCipher(blockkey)
- if err != nil {
- t.Fatal("NewCipher:", err)
- }
- securityName := string(generateRandomKey(20))
- val := make(map[interface{}]interface{})
- val["name"] = "astaxie"
- val["gender"] = "male"
- str, err := encodeCookie(block, hashKey, securityName, val)
- if err != nil {
- t.Fatal("encodeCookie:", err)
- }
- dst, err := decodeCookie(block, hashKey, securityName, str, 3600)
- if err != nil {
- t.Fatal("decodeCookie", err)
- }
- if dst["name"] != "astaxie" {
- t.Fatal("dst get map error")
- }
- if dst["gender"] != "male" {
- t.Fatal("dst get map error")
- }
-}
-
-func TestParseConfig(t *testing.T) {
- s := `{"cookieName":"gosessionid","gclifetime":3600}`
- cf := new(ManagerConfig)
- cf.EnableSetCookie = true
- err := json.Unmarshal([]byte(s), cf)
- if err != nil {
- t.Fatal("parse json error,", err)
- }
- if cf.CookieName != "gosessionid" {
- t.Fatal("parseconfig get cookiename error")
- }
- if cf.Gclifetime != 3600 {
- t.Fatal("parseconfig get gclifetime error")
- }
-
- cc := `{"cookieName":"gosessionid","enableSetCookie":false,"gclifetime":3600,"ProviderConfig":"{\"cookieName\":\"gosessionid\",\"securityKey\":\"beegocookiehashkey\"}"}`
- cf2 := new(ManagerConfig)
- cf2.EnableSetCookie = true
- err = json.Unmarshal([]byte(cc), cf2)
- if err != nil {
- t.Fatal("parse json error,", err)
- }
- if cf2.CookieName != "gosessionid" {
- t.Fatal("parseconfig get cookiename error")
- }
- if cf2.Gclifetime != 3600 {
- t.Fatal("parseconfig get gclifetime error")
- }
- if cf2.EnableSetCookie {
- t.Fatal("parseconfig get enableSetCookie error")
- }
- cconfig := new(cookieConfig)
- err = json.Unmarshal([]byte(cf2.ProviderConfig), cconfig)
- if err != nil {
- t.Fatal("parse ProviderConfig err,", err)
- }
- if cconfig.CookieName != "gosessionid" {
- t.Fatal("ProviderConfig get cookieName error")
- }
- if cconfig.SecurityKey != "beegocookiehashkey" {
- t.Fatal("ProviderConfig get securityKey error")
- }
-}
diff --git a/session/sess_utils.go b/session/sess_utils.go
deleted file mode 100644
index 20915bb6..00000000
--- a/session/sess_utils.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package session
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/hmac"
- "crypto/rand"
- "crypto/sha256"
- "crypto/subtle"
- "encoding/base64"
- "encoding/gob"
- "errors"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "github.com/astaxie/beego/utils"
-)
-
-func init() {
- gob.Register([]interface{}{})
- gob.Register(map[int]interface{}{})
- gob.Register(map[string]interface{}{})
- gob.Register(map[interface{}]interface{}{})
- gob.Register(map[string]string{})
- gob.Register(map[int]string{})
- gob.Register(map[int]int{})
- gob.Register(map[int]int64{})
-}
-
-// EncodeGob encode the obj to gob
-func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) {
- for _, v := range obj {
- gob.Register(v)
- }
- buf := bytes.NewBuffer(nil)
- enc := gob.NewEncoder(buf)
- err := enc.Encode(obj)
- if err != nil {
- return []byte(""), err
- }
- return buf.Bytes(), nil
-}
-
-// DecodeGob decode data to map
-func DecodeGob(encoded []byte) (map[interface{}]interface{}, error) {
- buf := bytes.NewBuffer(encoded)
- dec := gob.NewDecoder(buf)
- var out map[interface{}]interface{}
- err := dec.Decode(&out)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// generateRandomKey creates a random key with the given strength.
-func generateRandomKey(strength int) []byte {
- k := make([]byte, strength)
- if n, err := io.ReadFull(rand.Reader, k); n != strength || err != nil {
- return utils.RandomCreateBytes(strength)
- }
- return k
-}
-
-// Encryption -----------------------------------------------------------------
-
-// encrypt encrypts a value using the given block in counter mode.
-//
-// A random initialization vector (http://goo.gl/zF67k) with the length of the
-// block size is prepended to the resulting ciphertext.
-func encrypt(block cipher.Block, value []byte) ([]byte, error) {
- iv := generateRandomKey(block.BlockSize())
- if iv == nil {
- return nil, errors.New("encrypt: failed to generate random iv")
- }
- // Encrypt it.
- stream := cipher.NewCTR(block, iv)
- stream.XORKeyStream(value, value)
- // Return iv + ciphertext.
- return append(iv, value...), nil
-}
-
-// decrypt decrypts a value using the given block in counter mode.
-//
-// The value to be decrypted must be prepended by a initialization vector
-// (http://goo.gl/zF67k) with the length of the block size.
-func decrypt(block cipher.Block, value []byte) ([]byte, error) {
- size := block.BlockSize()
- if len(value) > size {
- // Extract iv.
- iv := value[:size]
- // Extract ciphertext.
- value = value[size:]
- // Decrypt it.
- stream := cipher.NewCTR(block, iv)
- stream.XORKeyStream(value, value)
- return value, nil
- }
- return nil, errors.New("decrypt: the value could not be decrypted")
-}
-
-func encodeCookie(block cipher.Block, hashKey, name string, value map[interface{}]interface{}) (string, error) {
- var err error
- var b []byte
- // 1. EncodeGob.
- if b, err = EncodeGob(value); err != nil {
- return "", err
- }
- // 2. Encrypt (optional).
- if b, err = encrypt(block, b); err != nil {
- return "", err
- }
- b = encode(b)
- // 3. Create MAC for "name|date|value". Extra pipe to be used later.
- b = []byte(fmt.Sprintf("%s|%d|%s|", name, time.Now().UTC().Unix(), b))
- h := hmac.New(sha256.New, []byte(hashKey))
- h.Write(b)
- sig := h.Sum(nil)
- // Append mac, remove name.
- b = append(b, sig...)[len(name)+1:]
- // 4. Encode to base64.
- b = encode(b)
- // Done.
- return string(b), nil
-}
-
-func decodeCookie(block cipher.Block, hashKey, name, value string, gcmaxlifetime int64) (map[interface{}]interface{}, error) {
- // 1. Decode from base64.
- b, err := decode([]byte(value))
- if err != nil {
- return nil, err
- }
- // 2. Verify MAC. Value is "date|value|mac".
- parts := bytes.SplitN(b, []byte("|"), 3)
- if len(parts) != 3 {
- return nil, errors.New("Decode: invalid value format")
- }
-
- b = append([]byte(name+"|"), b[:len(b)-len(parts[2])]...)
- h := hmac.New(sha256.New, []byte(hashKey))
- h.Write(b)
- sig := h.Sum(nil)
- if len(sig) != len(parts[2]) || subtle.ConstantTimeCompare(sig, parts[2]) != 1 {
- return nil, errors.New("Decode: the value is not valid")
- }
- // 3. Verify date ranges.
- var t1 int64
- if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil {
- return nil, errors.New("Decode: invalid timestamp")
- }
- t2 := time.Now().UTC().Unix()
- if t1 > t2 {
- return nil, errors.New("Decode: timestamp is too new")
- }
- if t1 < t2-gcmaxlifetime {
- return nil, errors.New("Decode: expired timestamp")
- }
- // 4. Decrypt (optional).
- b, err = decode(parts[1])
- if err != nil {
- return nil, err
- }
- if b, err = decrypt(block, b); err != nil {
- return nil, err
- }
- // 5. DecodeGob.
- dst, err := DecodeGob(b)
- if err != nil {
- return nil, err
- }
- return dst, nil
-}
-
-// Encoding -------------------------------------------------------------------
-
-// encode encodes a value using base64.
-func encode(value []byte) []byte {
- encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value)))
- base64.URLEncoding.Encode(encoded, value)
- return encoded
-}
-
-// decode decodes a cookie using base64.
-func decode(value []byte) ([]byte, error) {
- decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value)))
- b, err := base64.URLEncoding.Decode(decoded, value)
- if err != nil {
- return nil, err
- }
- return decoded[:b], nil
-}
diff --git a/session/session.go b/session/session.go
deleted file mode 100644
index eb85360a..00000000
--- a/session/session.go
+++ /dev/null
@@ -1,377 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package session provider
-//
-// Usage:
-// import(
-// "github.com/astaxie/beego/session"
-// )
-//
-// func init() {
-// globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid", "enableSetCookie,omitempty": true, "gclifetime":3600, "maxLifetime": 3600, "secure": false, "cookieLifeTime": 3600, "providerConfig": ""}`)
-// go globalSessions.GC()
-// }
-//
-// more docs: http://beego.me/docs/module/session.md
-package session
-
-import (
- "crypto/rand"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "log"
- "net/http"
- "net/textproto"
- "net/url"
- "os"
- "time"
-)
-
-// Store contains all data for one session process with specific id.
-type Store interface {
- Set(key, value interface{}) error //set session value
- Get(key interface{}) interface{} //get session value
- Delete(key interface{}) error //delete session value
- SessionID() string //back current sessionID
- SessionRelease(w http.ResponseWriter) // release the resource & save data to provider & return the data
- Flush() error //delete all data
-}
-
-// Provider contains global session methods and saved SessionStores.
-// it can operate a SessionStore by its id.
-type Provider interface {
- SessionInit(gclifetime int64, config string) error
- SessionRead(sid string) (Store, error)
- SessionExist(sid string) bool
- SessionRegenerate(oldsid, sid string) (Store, error)
- SessionDestroy(sid string) error
- SessionAll() int //get all active session
- SessionGC()
-}
-
-var provides = make(map[string]Provider)
-
-// SLogger a helpful variable to log information about session
-var SLogger = NewSessionLog(os.Stderr)
-
-// Register makes a session provide available by the provided name.
-// If Register is called twice with the same name or if driver is nil,
-// it panics.
-func Register(name string, provide Provider) {
- if provide == nil {
- panic("session: Register provide is nil")
- }
- if _, dup := provides[name]; dup {
- panic("session: Register called twice for provider " + name)
- }
- provides[name] = provide
-}
-
-//GetProvider
-func GetProvider(name string) (Provider, error) {
- provider, ok := provides[name]
- if !ok {
- return nil, fmt.Errorf("session: unknown provide %q (forgotten import?)", name)
- }
- return provider, nil
-}
-
-// ManagerConfig define the session config
-type ManagerConfig struct {
- CookieName string `json:"cookieName"`
- EnableSetCookie bool `json:"enableSetCookie,omitempty"`
- Gclifetime int64 `json:"gclifetime"`
- Maxlifetime int64 `json:"maxLifetime"`
- DisableHTTPOnly bool `json:"disableHTTPOnly"`
- Secure bool `json:"secure"`
- CookieLifeTime int `json:"cookieLifeTime"`
- ProviderConfig string `json:"providerConfig"`
- Domain string `json:"domain"`
- SessionIDLength int64 `json:"sessionIDLength"`
- EnableSidInHTTPHeader bool `json:"EnableSidInHTTPHeader"`
- SessionNameInHTTPHeader string `json:"SessionNameInHTTPHeader"`
- EnableSidInURLQuery bool `json:"EnableSidInURLQuery"`
- SessionIDPrefix string `json:"sessionIDPrefix"`
-}
-
-// Manager contains Provider and its configuration.
-type Manager struct {
- provider Provider
- config *ManagerConfig
-}
-
-// NewManager Create new Manager with provider name and json config string.
-// provider name:
-// 1. cookie
-// 2. file
-// 3. memory
-// 4. redis
-// 5. mysql
-// json config:
-// 1. is https default false
-// 2. hashfunc default sha1
-// 3. hashkey default beegosessionkey
-// 4. maxage default is none
-func NewManager(provideName string, cf *ManagerConfig) (*Manager, error) {
- provider, ok := provides[provideName]
- if !ok {
- return nil, fmt.Errorf("session: unknown provide %q (forgotten import?)", provideName)
- }
-
- if cf.Maxlifetime == 0 {
- cf.Maxlifetime = cf.Gclifetime
- }
-
- if cf.EnableSidInHTTPHeader {
- if cf.SessionNameInHTTPHeader == "" {
- panic(errors.New("SessionNameInHTTPHeader is empty"))
- }
-
- strMimeHeader := textproto.CanonicalMIMEHeaderKey(cf.SessionNameInHTTPHeader)
- if cf.SessionNameInHTTPHeader != strMimeHeader {
- strErrMsg := "SessionNameInHTTPHeader (" + cf.SessionNameInHTTPHeader + ") has the wrong format, it should be like this : " + strMimeHeader
- panic(errors.New(strErrMsg))
- }
- }
-
- err := provider.SessionInit(cf.Maxlifetime, cf.ProviderConfig)
- if err != nil {
- return nil, err
- }
-
- if cf.SessionIDLength == 0 {
- cf.SessionIDLength = 16
- }
-
- return &Manager{
- provider,
- cf,
- }, nil
-}
-
-// GetProvider return current manager's provider
-func (manager *Manager) GetProvider() Provider {
- return manager.provider
-}
-
-// getSid retrieves session identifier from HTTP Request.
-// First try to retrieve id by reading from cookie, session cookie name is configurable,
-// if not exist, then retrieve id from querying parameters.
-//
-// error is not nil when there is anything wrong.
-// sid is empty when need to generate a new session id
-// otherwise return an valid session id.
-func (manager *Manager) getSid(r *http.Request) (string, error) {
- cookie, errs := r.Cookie(manager.config.CookieName)
- if errs != nil || cookie.Value == "" {
- var sid string
- if manager.config.EnableSidInURLQuery {
- errs := r.ParseForm()
- if errs != nil {
- return "", errs
- }
-
- sid = r.FormValue(manager.config.CookieName)
- }
-
- // if not found in Cookie / param, then read it from request headers
- if manager.config.EnableSidInHTTPHeader && sid == "" {
- sids, isFound := r.Header[manager.config.SessionNameInHTTPHeader]
- if isFound && len(sids) != 0 {
- return sids[0], nil
- }
- }
-
- return sid, nil
- }
-
- // HTTP Request contains cookie for sessionid info.
- return url.QueryUnescape(cookie.Value)
-}
-
-// SessionStart generate or read the session id from http request.
-// if session id exists, return SessionStore with this id.
-func (manager *Manager) SessionStart(w http.ResponseWriter, r *http.Request) (session Store, err error) {
- sid, errs := manager.getSid(r)
- if errs != nil {
- return nil, errs
- }
-
- if sid != "" && manager.provider.SessionExist(sid) {
- return manager.provider.SessionRead(sid)
- }
-
- // Generate a new session
- sid, errs = manager.sessionID()
- if errs != nil {
- return nil, errs
- }
-
- session, err = manager.provider.SessionRead(sid)
- if err != nil {
- return nil, err
- }
- cookie := &http.Cookie{
- Name: manager.config.CookieName,
- Value: url.QueryEscape(sid),
- Path: "/",
- HttpOnly: !manager.config.DisableHTTPOnly,
- Secure: manager.isSecure(r),
- Domain: manager.config.Domain,
- }
- if manager.config.CookieLifeTime > 0 {
- cookie.MaxAge = manager.config.CookieLifeTime
- cookie.Expires = time.Now().Add(time.Duration(manager.config.CookieLifeTime) * time.Second)
- }
- if manager.config.EnableSetCookie {
- http.SetCookie(w, cookie)
- }
- r.AddCookie(cookie)
-
- if manager.config.EnableSidInHTTPHeader {
- r.Header.Set(manager.config.SessionNameInHTTPHeader, sid)
- w.Header().Set(manager.config.SessionNameInHTTPHeader, sid)
- }
-
- return
-}
-
-// SessionDestroy Destroy session by its id in http request cookie.
-func (manager *Manager) SessionDestroy(w http.ResponseWriter, r *http.Request) {
- if manager.config.EnableSidInHTTPHeader {
- r.Header.Del(manager.config.SessionNameInHTTPHeader)
- w.Header().Del(manager.config.SessionNameInHTTPHeader)
- }
-
- cookie, err := r.Cookie(manager.config.CookieName)
- if err != nil || cookie.Value == "" {
- return
- }
-
- sid, _ := url.QueryUnescape(cookie.Value)
- manager.provider.SessionDestroy(sid)
- if manager.config.EnableSetCookie {
- expiration := time.Now()
- cookie = &http.Cookie{Name: manager.config.CookieName,
- Path: "/",
- HttpOnly: !manager.config.DisableHTTPOnly,
- Expires: expiration,
- MaxAge: -1,
- Domain: manager.config.Domain}
-
- http.SetCookie(w, cookie)
- }
-}
-
-// GetSessionStore Get SessionStore by its id.
-func (manager *Manager) GetSessionStore(sid string) (sessions Store, err error) {
- sessions, err = manager.provider.SessionRead(sid)
- return
-}
-
-// GC Start session gc process.
-// it can do gc in times after gc lifetime.
-func (manager *Manager) GC() {
- manager.provider.SessionGC()
- time.AfterFunc(time.Duration(manager.config.Gclifetime)*time.Second, func() { manager.GC() })
-}
-
-// SessionRegenerateID Regenerate a session id for this SessionStore who's id is saving in http request.
-func (manager *Manager) SessionRegenerateID(w http.ResponseWriter, r *http.Request) (session Store) {
- sid, err := manager.sessionID()
- if err != nil {
- return
- }
- cookie, err := r.Cookie(manager.config.CookieName)
- if err != nil || cookie.Value == "" {
- //delete old cookie
- session, _ = manager.provider.SessionRead(sid)
- cookie = &http.Cookie{Name: manager.config.CookieName,
- Value: url.QueryEscape(sid),
- Path: "/",
- HttpOnly: !manager.config.DisableHTTPOnly,
- Secure: manager.isSecure(r),
- Domain: manager.config.Domain,
- }
- } else {
- oldsid, _ := url.QueryUnescape(cookie.Value)
- session, _ = manager.provider.SessionRegenerate(oldsid, sid)
- cookie.Value = url.QueryEscape(sid)
- cookie.HttpOnly = true
- cookie.Path = "/"
- }
- if manager.config.CookieLifeTime > 0 {
- cookie.MaxAge = manager.config.CookieLifeTime
- cookie.Expires = time.Now().Add(time.Duration(manager.config.CookieLifeTime) * time.Second)
- }
- if manager.config.EnableSetCookie {
- http.SetCookie(w, cookie)
- }
- r.AddCookie(cookie)
-
- if manager.config.EnableSidInHTTPHeader {
- r.Header.Set(manager.config.SessionNameInHTTPHeader, sid)
- w.Header().Set(manager.config.SessionNameInHTTPHeader, sid)
- }
-
- return
-}
-
-// GetActiveSession Get all active sessions count number.
-func (manager *Manager) GetActiveSession() int {
- return manager.provider.SessionAll()
-}
-
-// SetSecure Set cookie with https.
-func (manager *Manager) SetSecure(secure bool) {
- manager.config.Secure = secure
-}
-
-func (manager *Manager) sessionID() (string, error) {
- b := make([]byte, manager.config.SessionIDLength)
- n, err := rand.Read(b)
- if n != len(b) || err != nil {
- return "", fmt.Errorf("Could not successfully read from the system CSPRNG")
- }
- return manager.config.SessionIDPrefix + hex.EncodeToString(b), nil
-}
-
-// Set cookie with https.
-func (manager *Manager) isSecure(req *http.Request) bool {
- if !manager.config.Secure {
- return false
- }
- if req.URL.Scheme != "" {
- return req.URL.Scheme == "https"
- }
- if req.TLS == nil {
- return false
- }
- return true
-}
-
-// Log implement the log.Logger
-type Log struct {
- *log.Logger
-}
-
-// NewSessionLog set io.Writer to create a Logger for session.
-func NewSessionLog(out io.Writer) *Log {
- sl := new(Log)
- sl.Logger = log.New(out, "[SESSION]", 1e9)
- return sl
-}
diff --git a/session/ssdb/sess_ssdb.go b/session/ssdb/sess_ssdb.go
deleted file mode 100644
index de0c6360..00000000
--- a/session/ssdb/sess_ssdb.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package ssdb
-
-import (
- "errors"
- "net/http"
- "strconv"
- "strings"
- "sync"
-
- "github.com/astaxie/beego/session"
- "github.com/ssdb/gossdb/ssdb"
-)
-
-var ssdbProvider = &Provider{}
-
-// Provider holds ssdb client and configs
-type Provider struct {
- client *ssdb.Client
- host string
- port int
- maxLifetime int64
-}
-
-func (p *Provider) connectInit() error {
- var err error
- if p.host == "" || p.port == 0 {
- return errors.New("SessionInit First")
- }
- p.client, err = ssdb.Connect(p.host, p.port)
- return err
-}
-
-// SessionInit init the ssdb with the config
-func (p *Provider) SessionInit(maxLifetime int64, savePath string) error {
- p.maxLifetime = maxLifetime
- address := strings.Split(savePath, ":")
- p.host = address[0]
-
- var err error
- if p.port, err = strconv.Atoi(address[1]); err != nil {
- return err
- }
- return p.connectInit()
-}
-
-// SessionRead return a ssdb client session Store
-func (p *Provider) SessionRead(sid string) (session.Store, error) {
- if p.client == nil {
- if err := p.connectInit(); err != nil {
- return nil, err
- }
- }
- var kv map[interface{}]interface{}
- value, err := p.client.Get(sid)
- if err != nil {
- return nil, err
- }
- if value == nil || len(value.(string)) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob([]byte(value.(string)))
- if err != nil {
- return nil, err
- }
- }
- rs := &SessionStore{sid: sid, values: kv, maxLifetime: p.maxLifetime, client: p.client}
- return rs, nil
-}
-
-// SessionExist judged whether sid is exist in session
-func (p *Provider) SessionExist(sid string) bool {
- if p.client == nil {
- if err := p.connectInit(); err != nil {
- panic(err)
- }
- }
- value, err := p.client.Get(sid)
- if err != nil {
- panic(err)
- }
- if value == nil || len(value.(string)) == 0 {
- return false
- }
- return true
-}
-
-// SessionRegenerate regenerate session with new sid and delete oldsid
-func (p *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
- //conn.Do("setx", key, v, ttl)
- if p.client == nil {
- if err := p.connectInit(); err != nil {
- return nil, err
- }
- }
- value, err := p.client.Get(oldsid)
- if err != nil {
- return nil, err
- }
- var kv map[interface{}]interface{}
- if value == nil || len(value.(string)) == 0 {
- kv = make(map[interface{}]interface{})
- } else {
- kv, err = session.DecodeGob([]byte(value.(string)))
- if err != nil {
- return nil, err
- }
- _, err = p.client.Del(oldsid)
- if err != nil {
- return nil, err
- }
- }
- _, e := p.client.Do("setx", sid, value, p.maxLifetime)
- if e != nil {
- return nil, e
- }
- rs := &SessionStore{sid: sid, values: kv, maxLifetime: p.maxLifetime, client: p.client}
- return rs, nil
-}
-
-// SessionDestroy destroy the sid
-func (p *Provider) SessionDestroy(sid string) error {
- if p.client == nil {
- if err := p.connectInit(); err != nil {
- return err
- }
- }
- _, err := p.client.Del(sid)
- return err
-}
-
-// SessionGC not implemented
-func (p *Provider) SessionGC() {
-}
-
-// SessionAll not implemented
-func (p *Provider) SessionAll() int {
- return 0
-}
-
-// SessionStore holds the session information which stored in ssdb
-type SessionStore struct {
- sid string
- lock sync.RWMutex
- values map[interface{}]interface{}
- maxLifetime int64
- client *ssdb.Client
-}
-
-// Set the key and value
-func (s *SessionStore) Set(key, value interface{}) error {
- s.lock.Lock()
- defer s.lock.Unlock()
- s.values[key] = value
- return nil
-}
-
-// Get return the value by the key
-func (s *SessionStore) Get(key interface{}) interface{} {
- s.lock.Lock()
- defer s.lock.Unlock()
- if value, ok := s.values[key]; ok {
- return value
- }
- return nil
-}
-
-// Delete the key in session store
-func (s *SessionStore) Delete(key interface{}) error {
- s.lock.Lock()
- defer s.lock.Unlock()
- delete(s.values, key)
- return nil
-}
-
-// Flush delete all keys and values
-func (s *SessionStore) Flush() error {
- s.lock.Lock()
- defer s.lock.Unlock()
- s.values = make(map[interface{}]interface{})
- return nil
-}
-
-// SessionID return the sessionID
-func (s *SessionStore) SessionID() string {
- return s.sid
-}
-
-// SessionRelease Store the keyvalues into ssdb
-func (s *SessionStore) SessionRelease(w http.ResponseWriter) {
- b, err := session.EncodeGob(s.values)
- if err != nil {
- return
- }
- s.client.Do("setx", s.sid, string(b), s.maxLifetime)
-}
-
-func init() {
- session.Register("ssdb", ssdbProvider)
-}
diff --git a/staticfile.go b/staticfile.go
deleted file mode 100644
index e26776c5..00000000
--- a/staticfile.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "bytes"
- "errors"
- "net/http"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/astaxie/beego/context"
- "github.com/astaxie/beego/logs"
- "github.com/hashicorp/golang-lru"
-)
-
-var errNotStaticRequest = errors.New("request not a static file request")
-
-func serverStaticRouter(ctx *context.Context) {
- if ctx.Input.Method() != "GET" && ctx.Input.Method() != "HEAD" {
- return
- }
-
- forbidden, filePath, fileInfo, err := lookupFile(ctx)
- if err == errNotStaticRequest {
- return
- }
-
- if forbidden {
- exception("403", ctx)
- return
- }
-
- if filePath == "" || fileInfo == nil {
- if BConfig.RunMode == DEV {
- logs.Warn("Can't find/open the file:", filePath, err)
- }
- http.NotFound(ctx.ResponseWriter, ctx.Request)
- return
- }
- if fileInfo.IsDir() {
- requestURL := ctx.Input.URL()
- if requestURL[len(requestURL)-1] != '/' {
- redirectURL := requestURL + "/"
- if ctx.Request.URL.RawQuery != "" {
- redirectURL = redirectURL + "?" + ctx.Request.URL.RawQuery
- }
- ctx.Redirect(302, redirectURL)
- } else {
- //serveFile will list dir
- http.ServeFile(ctx.ResponseWriter, ctx.Request, filePath)
- }
- return
- } else if fileInfo.Size() > int64(BConfig.WebConfig.StaticCacheFileSize) {
- //over size file serve with http module
- http.ServeFile(ctx.ResponseWriter, ctx.Request, filePath)
- return
- }
-
- var enableCompress = BConfig.EnableGzip && isStaticCompress(filePath)
- var acceptEncoding string
- if enableCompress {
- acceptEncoding = context.ParseEncoding(ctx.Request)
- }
- b, n, sch, reader, err := openFile(filePath, fileInfo, acceptEncoding)
- if err != nil {
- if BConfig.RunMode == DEV {
- logs.Warn("Can't compress the file:", filePath, err)
- }
- http.NotFound(ctx.ResponseWriter, ctx.Request)
- return
- }
-
- if b {
- ctx.Output.Header("Content-Encoding", n)
- } else {
- ctx.Output.Header("Content-Length", strconv.FormatInt(sch.size, 10))
- }
-
- http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, reader)
-}
-
-type serveContentHolder struct {
- data []byte
- modTime time.Time
- size int64
- originSize int64 //original file size:to judge file changed
- encoding string
-}
-
-type serveContentReader struct {
- *bytes.Reader
-}
-
-var (
- staticFileLruCache *lru.Cache
- lruLock sync.RWMutex
-)
-
-func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, *serveContentReader, error) {
- if staticFileLruCache == nil {
- //avoid lru cache error
- if BConfig.WebConfig.StaticCacheFileNum >= 1 {
- staticFileLruCache, _ = lru.New(BConfig.WebConfig.StaticCacheFileNum)
- } else {
- staticFileLruCache, _ = lru.New(1)
- }
- }
- mapKey := acceptEncoding + ":" + filePath
- lruLock.RLock()
- var mapFile *serveContentHolder
- if cacheItem, ok := staticFileLruCache.Get(mapKey); ok {
- mapFile = cacheItem.(*serveContentHolder)
- }
- lruLock.RUnlock()
- if isOk(mapFile, fi) {
- reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)}
- return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil
- }
- lruLock.Lock()
- defer lruLock.Unlock()
- if cacheItem, ok := staticFileLruCache.Get(mapKey); ok {
- mapFile = cacheItem.(*serveContentHolder)
- }
- if !isOk(mapFile, fi) {
- file, err := os.Open(filePath)
- if err != nil {
- return false, "", nil, nil, err
- }
- defer file.Close()
- var bufferWriter bytes.Buffer
- _, n, err := context.WriteFile(acceptEncoding, &bufferWriter, file)
- if err != nil {
- return false, "", nil, nil, err
- }
- mapFile = &serveContentHolder{data: bufferWriter.Bytes(), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), originSize: fi.Size(), encoding: n}
- if isOk(mapFile, fi) {
- staticFileLruCache.Add(mapKey, mapFile)
- }
- }
-
- reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)}
- return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil
-}
-
-func isOk(s *serveContentHolder, fi os.FileInfo) bool {
- if s == nil {
- return false
- } else if s.size > int64(BConfig.WebConfig.StaticCacheFileSize) {
- return false
- }
- return s.modTime == fi.ModTime() && s.originSize == fi.Size()
-}
-
-// isStaticCompress detect static files
-func isStaticCompress(filePath string) bool {
- for _, statExtension := range BConfig.WebConfig.StaticExtensionsToGzip {
- if strings.HasSuffix(strings.ToLower(filePath), strings.ToLower(statExtension)) {
- return true
- }
- }
- return false
-}
-
-// searchFile search the file by url path
-// if none the static file prefix matches ,return notStaticRequestErr
-func searchFile(ctx *context.Context) (string, os.FileInfo, error) {
- requestPath := filepath.ToSlash(filepath.Clean(ctx.Request.URL.Path))
- // special processing : favicon.ico/robots.txt can be in any static dir
- if requestPath == "/favicon.ico" || requestPath == "/robots.txt" {
- file := path.Join(".", requestPath)
- if fi, _ := os.Stat(file); fi != nil {
- return file, fi, nil
- }
- for _, staticDir := range BConfig.WebConfig.StaticDir {
- filePath := path.Join(staticDir, requestPath)
- if fi, _ := os.Stat(filePath); fi != nil {
- return filePath, fi, nil
- }
- }
- return "", nil, errNotStaticRequest
- }
-
- for prefix, staticDir := range BConfig.WebConfig.StaticDir {
- if !strings.Contains(requestPath, prefix) {
- continue
- }
- if prefix != "/" && len(requestPath) > len(prefix) && requestPath[len(prefix)] != '/' {
- continue
- }
- filePath := path.Join(staticDir, requestPath[len(prefix):])
- if fi, err := os.Stat(filePath); fi != nil {
- return filePath, fi, err
- }
- }
- return "", nil, errNotStaticRequest
-}
-
-// lookupFile find the file to serve
-// if the file is dir ,search the index.html as default file( MUST NOT A DIR also)
-// if the index.html not exist or is a dir, give a forbidden response depending on DirectoryIndex
-func lookupFile(ctx *context.Context) (bool, string, os.FileInfo, error) {
- fp, fi, err := searchFile(ctx)
- if fp == "" || fi == nil {
- return false, "", nil, err
- }
- if !fi.IsDir() {
- return false, fp, fi, err
- }
- if requestURL := ctx.Input.URL(); requestURL[len(requestURL)-1] == '/' {
- ifp := filepath.Join(fp, "index.html")
- if ifi, _ := os.Stat(ifp); ifi != nil && ifi.Mode().IsRegular() {
- return false, ifp, ifi, err
- }
- }
- return !BConfig.WebConfig.DirectoryIndex, fp, fi, err
-}
diff --git a/staticfile_test.go b/staticfile_test.go
deleted file mode 100644
index e46c13ec..00000000
--- a/staticfile_test.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package beego
-
-import (
- "bytes"
- "compress/gzip"
- "compress/zlib"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "testing"
-)
-
-var currentWorkDir, _ = os.Getwd()
-var licenseFile = filepath.Join(currentWorkDir, "LICENSE")
-
-func testOpenFile(encoding string, content []byte, t *testing.T) {
- fi, _ := os.Stat(licenseFile)
- b, n, sch, reader, err := openFile(licenseFile, fi, encoding)
- if err != nil {
- t.Log(err)
- t.Fail()
- }
-
- t.Log("open static file encoding "+n, b)
-
- assetOpenFileAndContent(sch, reader, content, t)
-}
-func TestOpenStaticFile_1(t *testing.T) {
- file, _ := os.Open(licenseFile)
- content, _ := ioutil.ReadAll(file)
- testOpenFile("", content, t)
-}
-
-func TestOpenStaticFileGzip_1(t *testing.T) {
- file, _ := os.Open(licenseFile)
- var zipBuf bytes.Buffer
- fileWriter, _ := gzip.NewWriterLevel(&zipBuf, gzip.BestCompression)
- io.Copy(fileWriter, file)
- fileWriter.Close()
- content, _ := ioutil.ReadAll(&zipBuf)
-
- testOpenFile("gzip", content, t)
-}
-func TestOpenStaticFileDeflate_1(t *testing.T) {
- file, _ := os.Open(licenseFile)
- var zipBuf bytes.Buffer
- fileWriter, _ := zlib.NewWriterLevel(&zipBuf, zlib.BestCompression)
- io.Copy(fileWriter, file)
- fileWriter.Close()
- content, _ := ioutil.ReadAll(&zipBuf)
-
- testOpenFile("deflate", content, t)
-}
-
-func TestStaticCacheWork(t *testing.T) {
- encodings := []string{"", "gzip", "deflate"}
-
- fi, _ := os.Stat(licenseFile)
- for _, encoding := range encodings {
- _, _, first, _, err := openFile(licenseFile, fi, encoding)
- if err != nil {
- t.Error(err)
- continue
- }
-
- _, _, second, _, err := openFile(licenseFile, fi, encoding)
- if err != nil {
- t.Error(err)
- continue
- }
-
- address1 := fmt.Sprintf("%p", first)
- address2 := fmt.Sprintf("%p", second)
- if address1 != address2 {
- t.Errorf("encoding '%v' can not hit cache", encoding)
- }
- }
-}
-
-func assetOpenFileAndContent(sch *serveContentHolder, reader *serveContentReader, content []byte, t *testing.T) {
- t.Log(sch.size, len(content))
- if sch.size != int64(len(content)) {
- t.Log("static content file size not same")
- t.Fail()
- }
- bs, _ := ioutil.ReadAll(reader)
- for i, v := range content {
- if v != bs[i] {
- t.Log("content not same")
- t.Fail()
- }
- }
- if staticFileLruCache.Len() == 0 {
- t.Log("men map is empty")
- t.Fail()
- }
-}
diff --git a/swagger/swagger.go b/swagger/swagger.go
deleted file mode 100644
index a55676cd..00000000
--- a/swagger/swagger.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// Swagger™ is a project used to describe and document RESTful APIs.
-//
-// The Swagger specification defines a set of files required to describe such an API. These files can then be used by the Swagger-UI project to display the API and Swagger-Codegen to generate clients in various languages. Additional utilities can also take advantage of the resulting files, such as testing tools.
-// Now in version 2.0, Swagger is more enabling than ever. And it's 100% open source software.
-
-// Package swagger struct definition
-package swagger
-
-// Swagger list the resource
-type Swagger struct {
- SwaggerVersion string `json:"swagger,omitempty" yaml:"swagger,omitempty"`
- Infos Information `json:"info" yaml:"info"`
- Host string `json:"host,omitempty" yaml:"host,omitempty"`
- BasePath string `json:"basePath,omitempty" yaml:"basePath,omitempty"`
- Schemes []string `json:"schemes,omitempty" yaml:"schemes,omitempty"`
- Consumes []string `json:"consumes,omitempty" yaml:"consumes,omitempty"`
- Produces []string `json:"produces,omitempty" yaml:"produces,omitempty"`
- Paths map[string]*Item `json:"paths" yaml:"paths"`
- Definitions map[string]Schema `json:"definitions,omitempty" yaml:"definitions,omitempty"`
- SecurityDefinitions map[string]Security `json:"securityDefinitions,omitempty" yaml:"securityDefinitions,omitempty"`
- Security []map[string][]string `json:"security,omitempty" yaml:"security,omitempty"`
- Tags []Tag `json:"tags,omitempty" yaml:"tags,omitempty"`
- ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"`
-}
-
-// Information Provides metadata about the API. The metadata can be used by the clients if needed.
-type Information struct {
- Title string `json:"title,omitempty" yaml:"title,omitempty"`
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- Version string `json:"version,omitempty" yaml:"version,omitempty"`
- TermsOfService string `json:"termsOfService,omitempty" yaml:"termsOfService,omitempty"`
-
- Contact Contact `json:"contact,omitempty" yaml:"contact,omitempty"`
- License *License `json:"license,omitempty" yaml:"license,omitempty"`
-}
-
-// Contact information for the exposed API.
-type Contact struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- URL string `json:"url,omitempty" yaml:"url,omitempty"`
- EMail string `json:"email,omitempty" yaml:"email,omitempty"`
-}
-
-// License information for the exposed API.
-type License struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- URL string `json:"url,omitempty" yaml:"url,omitempty"`
-}
-
-// Item Describes the operations available on a single path.
-type Item struct {
- Ref string `json:"$ref,omitempty" yaml:"$ref,omitempty"`
- Get *Operation `json:"get,omitempty" yaml:"get,omitempty"`
- Put *Operation `json:"put,omitempty" yaml:"put,omitempty"`
- Post *Operation `json:"post,omitempty" yaml:"post,omitempty"`
- Delete *Operation `json:"delete,omitempty" yaml:"delete,omitempty"`
- Options *Operation `json:"options,omitempty" yaml:"options,omitempty"`
- Head *Operation `json:"head,omitempty" yaml:"head,omitempty"`
- Patch *Operation `json:"patch,omitempty" yaml:"patch,omitempty"`
-}
-
-// Operation Describes a single API operation on a path.
-type Operation struct {
- Tags []string `json:"tags,omitempty" yaml:"tags,omitempty"`
- Summary string `json:"summary,omitempty" yaml:"summary,omitempty"`
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- OperationID string `json:"operationId,omitempty" yaml:"operationId,omitempty"`
- Consumes []string `json:"consumes,omitempty" yaml:"consumes,omitempty"`
- Produces []string `json:"produces,omitempty" yaml:"produces,omitempty"`
- Schemes []string `json:"schemes,omitempty" yaml:"schemes,omitempty"`
- Parameters []Parameter `json:"parameters,omitempty" yaml:"parameters,omitempty"`
- Responses map[string]Response `json:"responses,omitempty" yaml:"responses,omitempty"`
- Security []map[string][]string `json:"security,omitempty" yaml:"security,omitempty"`
- Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"`
-}
-
-// Parameter Describes a single operation parameter.
-type Parameter struct {
- In string `json:"in,omitempty" yaml:"in,omitempty"`
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- Required bool `json:"required,omitempty" yaml:"required,omitempty"`
- Schema *Schema `json:"schema,omitempty" yaml:"schema,omitempty"`
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- Format string `json:"format,omitempty" yaml:"format,omitempty"`
- Items *ParameterItems `json:"items,omitempty" yaml:"items,omitempty"`
- Default interface{} `json:"default,omitempty" yaml:"default,omitempty"`
-}
-
-// ParameterItems A limited subset of JSON-Schema's items object. It is used by parameter definitions that are not located in "body".
-// http://swagger.io/specification/#itemsObject
-type ParameterItems struct {
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- Format string `json:"format,omitempty" yaml:"format,omitempty"`
- Items []*ParameterItems `json:"items,omitempty" yaml:"items,omitempty"` //Required if type is "array". Describes the type of items in the array.
- CollectionFormat string `json:"collectionFormat,omitempty" yaml:"collectionFormat,omitempty"`
- Default string `json:"default,omitempty" yaml:"default,omitempty"`
-}
-
-// Schema Object allows the definition of input and output data types.
-type Schema struct {
- Ref string `json:"$ref,omitempty" yaml:"$ref,omitempty"`
- Title string `json:"title,omitempty" yaml:"title,omitempty"`
- Format string `json:"format,omitempty" yaml:"format,omitempty"`
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- Required []string `json:"required,omitempty" yaml:"required,omitempty"`
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- Items *Schema `json:"items,omitempty" yaml:"items,omitempty"`
- Properties map[string]Propertie `json:"properties,omitempty" yaml:"properties,omitempty"`
- Enum []interface{} `json:"enum,omitempty" yaml:"enum,omitempty"`
- Example interface{} `json:"example,omitempty" yaml:"example,omitempty"`
-}
-
-// Propertie are taken from the JSON Schema definition but their definitions were adjusted to the Swagger Specification
-type Propertie struct {
- Ref string `json:"$ref,omitempty" yaml:"$ref,omitempty"`
- Title string `json:"title,omitempty" yaml:"title,omitempty"`
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- Default interface{} `json:"default,omitempty" yaml:"default,omitempty"`
- Type string `json:"type,omitempty" yaml:"type,omitempty"`
- Example interface{} `json:"example,omitempty" yaml:"example,omitempty"`
- Required []string `json:"required,omitempty" yaml:"required,omitempty"`
- Format string `json:"format,omitempty" yaml:"format,omitempty"`
- ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"`
- Properties map[string]Propertie `json:"properties,omitempty" yaml:"properties,omitempty"`
- Items *Propertie `json:"items,omitempty" yaml:"items,omitempty"`
- AdditionalProperties *Propertie `json:"additionalProperties,omitempty" yaml:"additionalProperties,omitempty"`
-}
-
-// Response as they are returned from executing this operation.
-type Response struct {
- Description string `json:"description" yaml:"description"`
- Schema *Schema `json:"schema,omitempty" yaml:"schema,omitempty"`
- Ref string `json:"$ref,omitempty" yaml:"$ref,omitempty"`
-}
-
-// Security Allows the definition of a security scheme that can be used by the operations
-type Security struct {
- Type string `json:"type,omitempty" yaml:"type,omitempty"` // Valid values are "basic", "apiKey" or "oauth2".
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- In string `json:"in,omitempty" yaml:"in,omitempty"` // Valid values are "query" or "header".
- Flow string `json:"flow,omitempty" yaml:"flow,omitempty"` // Valid values are "implicit", "password", "application" or "accessCode".
- AuthorizationURL string `json:"authorizationUrl,omitempty" yaml:"authorizationUrl,omitempty"`
- TokenURL string `json:"tokenUrl,omitempty" yaml:"tokenUrl,omitempty"`
- Scopes map[string]string `json:"scopes,omitempty" yaml:"scopes,omitempty"` // The available scopes for the OAuth2 security scheme.
-}
-
-// Tag Allows adding meta data to a single tag that is used by the Operation Object
-type Tag struct {
- Name string `json:"name,omitempty" yaml:"name,omitempty"`
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"`
-}
-
-// ExternalDocs include Additional external documentation
-type ExternalDocs struct {
- Description string `json:"description,omitempty" yaml:"description,omitempty"`
- URL string `json:"url,omitempty" yaml:"url,omitempty"`
-}
diff --git a/template.go b/template.go
deleted file mode 100644
index 69b178ca..00000000
--- a/template.go
+++ /dev/null
@@ -1,417 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "errors"
- "fmt"
- "html/template"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "path/filepath"
- "regexp"
- "strings"
- "sync"
-
- "github.com/astaxie/beego/logs"
- "github.com/astaxie/beego/utils"
-)
-
-var (
- beegoTplFuncMap = make(template.FuncMap)
- beeViewPathTemplateLocked = false
- // beeViewPathTemplates caching map and supported template file extensions per view
- beeViewPathTemplates = make(map[string]map[string]*template.Template)
- templatesLock sync.RWMutex
- // beeTemplateExt stores the template extension which will build
- beeTemplateExt = []string{"tpl", "html", "gohtml"}
- // beeTemplatePreprocessors stores associations of extension -> preprocessor handler
- beeTemplateEngines = map[string]templatePreProcessor{}
- beeTemplateFS = defaultFSFunc
-)
-
-// ExecuteTemplate applies the template with name to the specified data object,
-// writing the output to wr.
-// A template will be executed safely in parallel.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
- return ExecuteViewPathTemplate(wr, name, BConfig.WebConfig.ViewsPath, data)
-}
-
-// ExecuteViewPathTemplate applies the template with name and from specific viewPath to the specified data object,
-// writing the output to wr.
-// A template will be executed safely in parallel.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func ExecuteViewPathTemplate(wr io.Writer, name string, viewPath string, data interface{}) error {
- if BConfig.RunMode == DEV {
- templatesLock.RLock()
- defer templatesLock.RUnlock()
- }
- if beeTemplates, ok := beeViewPathTemplates[viewPath]; ok {
- if t, ok := beeTemplates[name]; ok {
- var err error
- if t.Lookup(name) != nil {
- err = t.ExecuteTemplate(wr, name, data)
- } else {
- err = t.Execute(wr, data)
- }
- if err != nil {
- logs.Trace("template Execute err:", err)
- }
- return err
- }
- panic("can't find templatefile in the path:" + viewPath + "/" + name)
- }
- panic("Unknown view path:" + viewPath)
-}
-
-func init() {
- beegoTplFuncMap["dateformat"] = DateFormat
- beegoTplFuncMap["date"] = Date
- beegoTplFuncMap["compare"] = Compare
- beegoTplFuncMap["compare_not"] = CompareNot
- beegoTplFuncMap["not_nil"] = NotNil
- beegoTplFuncMap["not_null"] = NotNil
- beegoTplFuncMap["substr"] = Substr
- beegoTplFuncMap["html2str"] = HTML2str
- beegoTplFuncMap["str2html"] = Str2html
- beegoTplFuncMap["htmlquote"] = Htmlquote
- beegoTplFuncMap["htmlunquote"] = Htmlunquote
- beegoTplFuncMap["renderform"] = RenderForm
- beegoTplFuncMap["assets_js"] = AssetsJs
- beegoTplFuncMap["assets_css"] = AssetsCSS
- beegoTplFuncMap["config"] = GetConfig
- beegoTplFuncMap["map_get"] = MapGet
-
- // Comparisons
- beegoTplFuncMap["eq"] = eq // ==
- beegoTplFuncMap["ge"] = ge // >=
- beegoTplFuncMap["gt"] = gt // >
- beegoTplFuncMap["le"] = le // <=
- beegoTplFuncMap["lt"] = lt // <
- beegoTplFuncMap["ne"] = ne // !=
-
- beegoTplFuncMap["urlfor"] = URLFor // build a URL to match a Controller and it's method
-}
-
-// AddFuncMap let user to register a func in the template.
-func AddFuncMap(key string, fn interface{}) error {
- beegoTplFuncMap[key] = fn
- return nil
-}
-
-type templatePreProcessor func(root, path string, funcs template.FuncMap) (*template.Template, error)
-
-type templateFile struct {
- root string
- files map[string][]string
-}
-
-// visit will make the paths into two part,the first is subDir (without tf.root),the second is full path(without tf.root).
-// if tf.root="views" and
-// paths is "views/errors/404.html",the subDir will be "errors",the file will be "errors/404.html"
-// paths is "views/admin/errors/404.html",the subDir will be "admin/errors",the file will be "admin/errors/404.html"
-func (tf *templateFile) visit(paths string, f os.FileInfo, err error) error {
- if f == nil {
- return err
- }
- if f.IsDir() || (f.Mode()&os.ModeSymlink) > 0 {
- return nil
- }
- if !HasTemplateExt(paths) {
- return nil
- }
-
- replace := strings.NewReplacer("\\", "/")
- file := strings.TrimLeft(replace.Replace(paths[len(tf.root):]), "/")
- subDir := filepath.Dir(file)
-
- tf.files[subDir] = append(tf.files[subDir], file)
- return nil
-}
-
-// HasTemplateExt return this path contains supported template extension of beego or not.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func HasTemplateExt(paths string) bool {
- for _, v := range beeTemplateExt {
- if strings.HasSuffix(paths, "."+v) {
- return true
- }
- }
- return false
-}
-
-// AddTemplateExt add new extension for template.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func AddTemplateExt(ext string) {
- for _, v := range beeTemplateExt {
- if v == ext {
- return
- }
- }
- beeTemplateExt = append(beeTemplateExt, ext)
-}
-
-// AddViewPath adds a new path to the supported view paths.
-//Can later be used by setting a controller ViewPath to this folder
-//will panic if called after beego.Run()
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func AddViewPath(viewPath string) error {
- if beeViewPathTemplateLocked {
- if _, exist := beeViewPathTemplates[viewPath]; exist {
- return nil //Ignore if viewpath already exists
- }
- panic("Can not add new view paths after beego.Run()")
- }
- beeViewPathTemplates[viewPath] = make(map[string]*template.Template)
- return BuildTemplate(viewPath)
-}
-
-func lockViewPaths() {
- beeViewPathTemplateLocked = true
-}
-
-// BuildTemplate will build all template files in a directory.
-// it makes beego can render any template file in view directory.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func BuildTemplate(dir string, files ...string) error {
- var err error
- fs := beeTemplateFS()
- f, err := fs.Open(dir)
- if err != nil {
- if os.IsNotExist(err) {
- return nil
- }
- return errors.New("dir open err")
- }
- defer f.Close()
-
- beeTemplates, ok := beeViewPathTemplates[dir]
- if !ok {
- panic("Unknown view path: " + dir)
- }
- self := &templateFile{
- root: dir,
- files: make(map[string][]string),
- }
- err = Walk(fs, dir, func(path string, f os.FileInfo, err error) error {
- return self.visit(path, f, err)
- })
- if err != nil {
- fmt.Printf("Walk() returned %v\n", err)
- return err
- }
- buildAllFiles := len(files) == 0
- for _, v := range self.files {
- for _, file := range v {
- if buildAllFiles || utils.InSlice(file, files) {
- templatesLock.Lock()
- ext := filepath.Ext(file)
- var t *template.Template
- if len(ext) == 0 {
- t, err = getTemplate(self.root, fs, file, v...)
- } else if fn, ok := beeTemplateEngines[ext[1:]]; ok {
- t, err = fn(self.root, file, beegoTplFuncMap)
- } else {
- t, err = getTemplate(self.root, fs, file, v...)
- }
- if err != nil {
- logs.Error("parse template err:", file, err)
- templatesLock.Unlock()
- return err
- }
- beeTemplates[file] = t
- templatesLock.Unlock()
- }
- }
- }
- return nil
-}
-
-func getTplDeep(root string, fs http.FileSystem, file string, parent string, t *template.Template) (*template.Template, [][]string, error) {
- var fileAbsPath string
- var rParent string
- var err error
- if strings.HasPrefix(file, "../") {
- rParent = filepath.Join(filepath.Dir(parent), file)
- fileAbsPath = filepath.Join(root, filepath.Dir(parent), file)
- } else {
- rParent = file
- fileAbsPath = filepath.Join(root, file)
- }
- f, err := fs.Open(fileAbsPath)
- if err != nil {
- panic("can't find template file:" + file)
- }
- defer f.Close()
- data, err := ioutil.ReadAll(f)
- if err != nil {
- return nil, [][]string{}, err
- }
- t, err = t.New(file).Parse(string(data))
- if err != nil {
- return nil, [][]string{}, err
- }
- reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*template[ ]+\"([^\"]+)\"")
- allSub := reg.FindAllStringSubmatch(string(data), -1)
- for _, m := range allSub {
- if len(m) == 2 {
- tl := t.Lookup(m[1])
- if tl != nil {
- continue
- }
- if !HasTemplateExt(m[1]) {
- continue
- }
- _, _, err = getTplDeep(root, fs, m[1], rParent, t)
- if err != nil {
- return nil, [][]string{}, err
- }
- }
- }
- return t, allSub, nil
-}
-
-func getTemplate(root string, fs http.FileSystem, file string, others ...string) (t *template.Template, err error) {
- t = template.New(file).Delims(BConfig.WebConfig.TemplateLeft, BConfig.WebConfig.TemplateRight).Funcs(beegoTplFuncMap)
- var subMods [][]string
- t, subMods, err = getTplDeep(root, fs, file, "", t)
- if err != nil {
- return nil, err
- }
- t, err = _getTemplate(t, root, fs, subMods, others...)
-
- if err != nil {
- return nil, err
- }
- return
-}
-
-func _getTemplate(t0 *template.Template, root string, fs http.FileSystem, subMods [][]string, others ...string) (t *template.Template, err error) {
- t = t0
- for _, m := range subMods {
- if len(m) == 2 {
- tpl := t.Lookup(m[1])
- if tpl != nil {
- continue
- }
- //first check filename
- for _, otherFile := range others {
- if otherFile == m[1] {
- var subMods1 [][]string
- t, subMods1, err = getTplDeep(root, fs, otherFile, "", t)
- if err != nil {
- logs.Trace("template parse file err:", err)
- } else if len(subMods1) > 0 {
- t, err = _getTemplate(t, root, fs, subMods1, others...)
- }
- break
- }
- }
- //second check define
- for _, otherFile := range others {
- var data []byte
- fileAbsPath := filepath.Join(root, otherFile)
- f, err := fs.Open(fileAbsPath)
- if err != nil {
- f.Close()
- logs.Trace("template file parse error, not success open file:", err)
- continue
- }
- data, err = ioutil.ReadAll(f)
- f.Close()
- if err != nil {
- logs.Trace("template file parse error, not success read file:", err)
- continue
- }
- reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*define[ ]+\"([^\"]+)\"")
- allSub := reg.FindAllStringSubmatch(string(data), -1)
- for _, sub := range allSub {
- if len(sub) == 2 && sub[1] == m[1] {
- var subMods1 [][]string
- t, subMods1, err = getTplDeep(root, fs, otherFile, "", t)
- if err != nil {
- logs.Trace("template parse file err:", err)
- } else if len(subMods1) > 0 {
- t, err = _getTemplate(t, root, fs, subMods1, others...)
- if err != nil {
- logs.Trace("template parse file err:", err)
- }
- }
- break
- }
- }
- }
- }
-
- }
- return
-}
-
-type templateFSFunc func() http.FileSystem
-
-func defaultFSFunc() http.FileSystem {
- return FileSystem{}
-}
-
-// SetTemplateFSFunc set default filesystem function
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func SetTemplateFSFunc(fnt templateFSFunc) {
- beeTemplateFS = fnt
-}
-
-// SetViewsPath sets view directory path in beego application.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func SetViewsPath(path string) *App {
- BConfig.WebConfig.ViewsPath = path
- return BeeApp
-}
-
-// SetStaticPath sets static directory path and proper url pattern in beego application.
-// if beego.SetStaticPath("static","public"), visit /static/* to load static file in folder "public".
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func SetStaticPath(url string, path string) *App {
- if !strings.HasPrefix(url, "/") {
- url = "/" + url
- }
- if url != "/" {
- url = strings.TrimRight(url, "/")
- }
- BConfig.WebConfig.StaticDir[url] = path
- return BeeApp
-}
-
-// DelStaticPath removes the static folder setting in this url pattern in beego application.
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func DelStaticPath(url string) *App {
- if !strings.HasPrefix(url, "/") {
- url = "/" + url
- }
- if url != "/" {
- url = strings.TrimRight(url, "/")
- }
- delete(BConfig.WebConfig.StaticDir, url)
- return BeeApp
-}
-
-// AddTemplateEngine add a new templatePreProcessor which support extension
-// Deprecated: using pkg/, we will delete this in v2.1.0
-func AddTemplateEngine(extension string, fn templatePreProcessor) *App {
- AddTemplateExt(extension)
- beeTemplateEngines[extension] = fn
- return BeeApp
-}
diff --git a/template_test.go b/template_test.go
deleted file mode 100644
index bde9c100..00000000
--- a/template_test.go
+++ /dev/null
@@ -1,316 +0,0 @@
-// Copyright 2014 beego Author. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package beego
-
-import (
- "bytes"
- "github.com/astaxie/beego/test"
- "github.com/elazarl/go-bindata-assetfs"
- "net/http"
- "os"
- "path/filepath"
- "testing"
-)
-
-var header = `{{define "header"}}
-