1
0
mirror of https://github.com/astaxie/beego.git synced 2025-07-12 04:11:02 +00:00

189 Commits

Author SHA1 Message Date
d96289a81b Merge pull request #2771 from astaxie/develop
v1.9.0
2017-07-19 00:56:48 +08:00
4fc95b0d69 gofmt and golint 2017-07-19 00:52:27 +08:00
aa3d6c5363 fix the gosimple 2017-07-19 00:37:42 +08:00
5ac0cb929c v1.9.0 2017-07-18 23:58:22 +08:00
b27ab53017 fix issue for runMethod and runRouter from context 2017-07-18 23:41:50 +08:00
1aba294405 Merge pull request #2740 from xlwcom/master
fix the bugs in the "ParseBool" function in the file of config.go
2017-07-18 13:39:45 +08:00
657e55ed59 Merge pull request #2692 from jerson/master
added statusCode and pattern to FilterMonitorFunc
2017-07-17 11:06:09 +08:00
621c25396e Merge pull request #2766 from yangsf5/master
sort ControllerComments
2017-07-17 11:02:28 +08:00
715ba918f0 Merge pull request #2744 from gnanakeethan/feature/database-migration
[Proposal] Database Migrations;
2017-07-17 10:54:44 +08:00
8bb0a70847 Update: Fix in SQL Generation
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-16 08:48:44 +05:30
94e79eddcf Update: removing remnant of revert commit ( a call to function
m.DDLSpec() )

Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-16 08:37:27 +05:30
749a4028b4 Revert "Update: removing the need to call DDLSpec in the migration file"
The odds of getting this perfectly up is not good.

This reverts commit d58ad2ee36.
2017-07-16 08:11:10 +05:30
fc55c2b57c Update: missed to call DDLSpec in Down migration
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-16 07:30:53 +05:30
d58ad2ee36 Update: removing the need to call DDLSpec in the migration file
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-16 07:24:58 +05:30
cb38ab4f85 Update: fixing a SQL generation code
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-16 07:10:09 +05:30
fc86f6422d sort ControllerComments 2017-07-15 17:26:20 +08:00
d453242e48 Update: moving package to bottom
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-14 14:42:56 +05:30
7c2ec075a4 Update: fixing some methods and adding documentation
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-13 21:03:30 +05:30
c903de41e4 updated sample for FilterMonitorFunc
added pattern to sample
2017-07-12 09:51:37 -05:00
e8c8366308 Merge pull request #2754 from imiskolee/develop
supported gzip for req.Header has `Content-Encoding: gzip`
2017-07-12 19:57:29 +08:00
4901567bba Merge pull request #2749 from satng/patch-4
oracle插入占位符
2017-07-12 19:49:32 +08:00
29bcd31b27 supported gzip for req.Header has Content-Encoding: gzip 2017-07-10 21:27:54 +08:00
83a563c0ab oracle插入占位符 2017-07-09 12:25:51 +08:00
e888fee4e0 Update: Foreign Key & Comments
Summary: Foreign Key functions are now available

Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-06 20:26:37 +05:30
c1ba11f531 Fixing typo
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-06 14:58:40 +05:30
ed558a0e70 Fix: typo due to find and replace migration renamed to m
Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-06 07:45:07 +05:30
6b9c3f4824 [Proposal] Database Migrations;
Summary: The database migrations now can be created using the methods on
the migration struct. it does not break any existing migration features.
it upgrades the migration struct and adds few more struct types so that
the migrations can be efficiently generated for create, alter, reverse,
drop.

Current Features:
* Supports creation of columns
   * `m.NewCol("name").SetDataType("VARCHAR(10)").SetNullable("true")`
   * **NOTE** `SetNullable` & `SetDefault` methods should not be called on
   same column for consistency
* Supports addition of primary keys
   * `m.PriCol("id").SetDataType("INT(10)").SetNullable("true")`
   * **NOTE** `setAuto(true)` can be only called on Primary keys
* Supports addition of unique keys
   * `m.UniCol("unique_index","column_name").SetDataType("VARCHAR(23)").SetNullable("true")`
   * **NOTE** `UniCol` can be called again with the same index name to
   add column to the index
* Supports rename of columns
   * `m.RenameColumn("from_name","to_name")`
   * Allows standard column methods and methods such that, `SetOldDefault` allows
   reversibility of renames

* TODO:
   * ForeignKey

Signed-off-by: Gnanakeethan Balasubramaniam <gnanakeethan@gmail.com>
2017-07-06 07:44:48 +05:30
7ec819deed fix #2725 big form 2017-07-04 21:16:59 +08:00
4cfb3678f8 Merge pull request #2741 from miraclesu/validation
validation: support required option for some struct tag valids
2017-07-04 15:49:36 +08:00
3c17e2a7e6 remove the comments 2017-07-04 11:03:49 +08:00
e72b02b7cc validation: support required option for some struct tag valids 2017-07-03 16:26:23 +08:00
82586c70e9 Merge pull request #2683 from jialijelly/master
Provide permission to access old files to everyone
2017-07-03 11:23:49 +08:00
cb86bcc9e8 Merge pull request #2728 from miraclesu/validation
validation: support int64 int32 int16 and int8 type on 64-bit platform
2017-07-01 15:35:48 +08:00
234708062a fix the bug in the "ParseBool" function in the file of config.go 2017-06-29 13:32:40 +08:00
6e34f43721 Fix break API change
support int64 on 64-bit platform
2017-06-28 16:56:37 +08:00
3249ec8ebf Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-27 10:49:10 +08:00
31b2b21dbc Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-27 10:48:52 +08:00
d0c1936922 Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-22 19:18:49 +08:00
338a23a12b Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-22 19:18:34 +08:00
932def1ed2 Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-22 18:55:37 +08:00
16b5a11484 Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-22 18:55:25 +08:00
547fbce86c Merge branch 'master' of https://github.com/jialijelly/beego 2017-06-22 18:53:29 +08:00
5a2eea07cb Provide permission to access old log files to everyone 2017-06-22 18:51:52 +08:00
2231841d74 validation: support int64 int32 int16 and int8 type 2017-06-21 14:13:30 +08:00
805a674825 Merge pull request #2712 from eyalpost/develop
incorrect error rendering (wrong status)
2017-06-16 13:34:28 +08:00
f2925978f1 Merge pull request #2717 from huwenbo/master
fix panic sync: negative WaitGroup counter
2017-06-16 13:26:36 +08:00
fe3a224a23 Merge pull request #2724 from JessonChan/develop
AddAPPStartHook func modify
2017-06-16 08:20:45 +08:00
2754edc849 Merge pull request #2726 from moqiancong/develop
fix cache/memory fatal error: concurrent map iteration and map write
2017-06-16 08:20:03 +08:00
79f60274a0 fix cache/memory fatal error: concurrent map iteration and map write 2017-06-16 01:26:55 +08:00
a87c1c5e8e AddAPPStartHook func modify 2017-06-15 17:36:37 +08:00
2b00b7d66d fix panic: sync: negative WaitGroup counter 2017-06-13 20:15:43 +08:00
3d9286f089 fix panic: sync: negative WaitGroup counter 2017-06-13 15:34:57 +08:00
55e6c15073 fix panic: sync: negative WaitGroup counter 2017-06-13 15:19:51 +08:00
8b504e7d51 incorrect error rendering (wrong status) 2017-06-12 21:05:40 +03:00
d1c3bd8416 Merge pull request #2701 from eyalpost/develop
correctly handle multiple params with same type
2017-06-09 15:33:37 +08:00
0240e182c6 correctly handle multiple params with same type 2017-06-09 10:15:36 +03:00
7f2e3feb3c added pattern to FilterMonitorFunc 2017-06-05 18:21:31 -05:00
d15dd2795c added statusCode in FilterMonitorFunc 2017-06-03 15:24:45 -05:00
0ea34fff27 Provide permission to access old log files to everyone 2017-06-02 10:56:55 +08:00
5e8312bc23 Merge pull request #2654 from casbin/master
Fix the new repo address for casbin.
2017-05-19 23:34:58 +08:00
88d07058a5 Fix the new repo address for casbin. 2017-05-19 22:19:31 +08:00
cab8458c1c Merge pull request #2651 from astaxie/develop
v1.8.3
2017-05-19 21:19:18 +08:00
f0b95c552b Merge pull request #2315 from sch00lb0y/master
issue no:#2261 fix for xsrf panic error
2017-05-19 18:50:56 +08:00
ce677202e5 issue no:#2261 fix for xsrf panic error 2017-05-19 14:02:14 +05:30
720c323e20 Merge pull request #2652 from gouyang/gouyang/dev
Support timeformat "2006-01-02T15:04:05"
2017-05-19 09:48:35 +08:00
47e351e11d Support timeformat "2006-01-02T15:04:05"
Fixes #2649

Signed-off-by: Guohua Ouyang <guohuaouyang@gmail.com>
2017-05-19 09:22:27 +08:00
248beab557 v1.8.3 2017-05-18 22:55:10 +08:00
388a5610fa Merge pull request #2365 from chesedo/RequiredValidationCatchSpaces
[WIP]Have Required validator trim strings to fix #2361
2017-05-18 22:44:15 +08:00
41498758fe Merge pull request #2620 from hsluoyz/authz
Add an authorization plugin that supports ACL, RBAC based on casbin.
2017-05-18 18:18:12 +08:00
655484b4df Merge pull request #2586 from eyalpost/develop
Automatic Parameter Router
2017-05-18 18:14:49 +08:00
11b4bf8aaa move to context 2017-05-18 10:38:12 +03:00
2513bcf584 remove Redirect to avoid confusion 2017-05-18 10:32:51 +03:00
3e51823c0f move response 2017-05-18 09:05:49 +03:00
e32a18203b fix gosimple 2017-05-17 21:27:32 +03:00
ee1d8bc30e fix gosimple 2017-05-17 20:50:41 +03:00
828cbbdf5d Refactor a bit to consolidate packages 2017-05-17 20:38:59 +03:00
d54cd4fa5f Merge remote-tracking branch 'upstream/develop' into develop 2017-05-17 20:02:40 +03:00
7747e9ec8b Merge branch 'develop' of https://github.com/astaxie/beego into develop 2017-05-17 20:52:53 +08:00
9765519f38 Merge pull request #2637 from alexsunxl/develop
allow o.Raw(sql).QueryRows(&container) pass nested struct
2017-05-17 16:45:14 +08:00
69f0b94745 fix gosimple 2017-05-16 22:21:43 +08:00
3c9b6c99b7 Merge pull request #2643 from rbw0/master
Spelling fixes
2017-05-16 11:26:16 +08:00
b5c6eb54d2 Missing PK error spelling fix 2017-05-16 00:58:20 +02:00
e1c90bfc09 Table not found spelling fixes 2017-05-16 00:27:57 +02:00
91400f10b0 Merge pull request #2640 from franzwilhelm/master
Moved Security to Operation struct to support swagger API security auth
2017-05-14 19:15:20 +08:00
c814893d65 add support for global security 2017-05-14 12:13:35 +02:00
2325090101 add test case that used nested struct test QueryRows 2017-05-14 12:03:34 +08:00
40bc52b844 fix security struct placement and formatting 2017-05-14 00:42:09 +02:00
589f3755f0 允许o.Raw(sql).QueryRows(&container) 传入的container包含结构的嵌套 2017-05-12 18:11:42 +08:00
1004678005 popular status codes 2017-05-12 09:57:56 +03:00
0ac2e47162 location=>paramType 2017-05-12 09:28:46 +03:00
b6a35a8944 more tests 2017-05-12 09:25:12 +03:00
74dc3c7500 tests 2017-05-11 19:32:44 +03:00
cb4f252a06 defValue -> defaultValue 2017-05-11 17:58:25 +03:00
bceefc9075 Merge pull request #2636 from guanly/master
ISSUE2630 使用sqlite,orm中通过filter后的delete删除不成功
2017-05-11 22:04:27 +08:00
10cd1070f4 使用sqlite,orm中通过filter后的delete删除不成功
https://github.com/astaxie/beego/issues/2630
2017-05-11 21:45:38 +08:00
9b01b1c63d ISSUE2630 使用sqlite,orm中通过filter后的delete删除不成功
https://github.com/astaxie/beego/issues/2630
2017-05-11 14:49:01 +08:00
b2e7720fcd Add an authorization plugin that supports ACL, RBAC based on casbin. It requires the built-in HTTP basic authentication by default. 2017-05-04 14:02:21 +08:00
83814a76cc hotfix: err nil 2017-05-02 12:47:15 +08:00
d3a16dca85 Redirect should returns error 2017-05-01 08:57:57 +03:00
7452151bee Merge pull request #2611 from astaxie/develop
1.8.2
2017-05-01 13:01:59 +08:00
1b8f05cef1 golint fixes 2017-04-30 19:28:26 +03:00
cfb2f68dd6 Merge remote-tracking branch 'upstream/develop' into develop 2017-04-30 18:59:50 +03:00
e76423e6dc revert #2518, fix #2605 2017-04-30 23:59:38 +08:00
947980b5eb beego 1.8.2 2017-04-30 23:43:46 +08:00
44bdf1df63 ignore NilErr 2017-04-30 23:38:48 +08:00
79b66ef053 fix the beego ORM test 2017-04-30 22:55:39 +08:00
a91e2e9950 add golint check and fix all golints 2017-04-30 22:41:23 +08:00
ea3d0690cf golint 2017-04-29 09:13:28 +08:00
1c32c011a1 fix misspell 2017-04-28 23:37:40 +08:00
64b475d7d6 fix ReadOrCreate test case 2017-04-28 22:58:17 +08:00
aa8f7bc146 fix ineffectual 2017-04-28 22:36:28 +08:00
3e29078f68 add check ineffect and gofmt 2017-04-28 21:38:08 +08:00
a1bc94e648 dont generate comment if router not found 2017-04-26 01:00:25 +03:00
4cba78afd9 small fixes 2017-04-25 23:42:35 +03:00
f311ae9ebe feature: Export function printTree (#2597)
This exports PrintTree function which allow to set Role Based Access Control, Create our own requests statistics...etc
2017-04-25 18:12:03 +02:00
cbd831042a move under context 2017-04-25 18:39:42 +03:00
522b3a4a70 Merge pull request #2596 from astaxie/develop
beego 1.8.1
2017-04-25 22:51:23 +08:00
cd67f13bf9 add template layout test #2481 2017-04-25 22:21:27 +08:00
0f554d9b1a update travis to latest go version 2017-04-25 22:21:08 +08:00
9b79437778 all types working + controller comments generation 2017-04-25 16:00:49 +03:00
3742d1178c httplib support delete params fix #2593 2017-04-25 20:42:43 +08:00
3b29a9c12a Merge remote-tracking branch 'upstream/develop' into develop 2017-04-24 18:23:58 +03:00
d03285a0ee Merge pull request #2555 from Liaodd/master
Update ini.go: change the key to lowercase when set a new key for ini configer
2017-04-24 22:37:25 +08:00
e810f2e930 add more oracle alias 2017-04-24 21:36:07 +08:00
41aac79ac0 Merge pull request #2590 from amrfaissal/fix-2587
Fix warnings raised by gometalinter and gosimple
2017-04-24 21:14:00 +08:00
52f916a28a support Go1.8 default GOPATH 2017-04-24 21:10:03 +08:00
864693d2f8 mall fixes 2017-04-24 02:35:04 +03:00
08ea9b3339 Merge remote-tracking branch 'upstream/develop' into develop 2017-04-23 22:07:46 +03:00
19f4a6ac0b slice support 2017-04-23 21:37:09 +03:00
bf6bd6b292 Fixes #2587
Fixes warnings and errors raised by gometalinter and gosimple.
2017-04-23 19:19:05 +02:00
89e01d125c all types implemented 2017-04-23 01:33:50 +03:00
3bb4ca5adc Merge pull request #2583 from OlegFX/develop
Fixed InsertOrUpdate bug
2017-04-22 11:19:15 +08:00
712df81c99 Fixed InsertOrUpdate bug 2017-04-21 19:57:04 +03:00
3d7ef599cc Merge pull request #2 from astaxie/develop
Update
2017-04-21 19:50:33 +03:00
9aedb4d05a phase #1 2017-04-21 15:26:41 +03:00
453691728a gofmt simplify 2017-04-20 10:56:09 +08:00
e7e3ca77ad Merge pull request #2491 from chendx79/master
Fix routing bug for splat matching
2017-04-19 21:20:30 +08:00
d3f3956def Merge pull request #2574 from extrlibs/master
Fix the following template reference
2017-04-19 21:18:26 +08:00
7206214105 Merge pull request #2575 from PaulChen2016/master
Beego 运行过程中动态增减定时任务时,now的时间需要更新,否则等待时间会不正确
2017-04-19 21:17:38 +08:00
b08ace7532 Merge pull request #2577 from ggicci/develop
Fix ini parsing error for multiple users on one machine.
2017-04-19 20:17:45 +08:00
d1a2583972 Fix ini parsing error for multiple users on one machine.
If there were multiple users working on one machine, it's common that
"/tmp/beego" will be owned by one of them, and the others won't be able
to access to it. So, it's better to add an "id-like" postfix to the
temporary directory.
2017-04-19 19:50:11 +08:00
0cb8de4218 Beego 运行过程中动态增减定时任务时,now的时间需要更新,否则等待时间会不正确
beego 启动时,执行toolbox.StartTask()
运行过程中,动态添加定时任务(这个是now时间已经不是starttask的时间了,需要刷新)     
     case <-changed:
			now = time.Now().Local()
			continue
2017-04-19 16:22:58 +08:00
0cd31a247f fix Template nesting problem 2017-04-19 07:47:05 +08:00
405c170d45 Merge pull request #2556 from zjjott/master
fix: log mode should be 0440 should not be 440
2017-04-10 21:14:34 +08:00
932019770d fix: log mode 0440 should not be 440 2017-04-10 17:37:55 +08:00
d5c03f5b8f Update ini.go
change the key to lowercase when set a new key for ini configer
2017-04-10 11:30:23 +08:00
3d20c0b8f4 Merge pull request #2543 from Bobochka/fix_route_hander_docs
Fix example for Hander func
2017-04-07 23:04:38 +08:00
a4fb4c6a03 Merge pull request #2547 from TrueFurby/patch-1
Update README.md
2017-04-03 20:25:41 +02:00
a9941b6edc Merge branch 'develop' into patch-1 2017-04-02 13:46:06 +02:00
6a32b048bd Update README.md 2017-04-02 11:04:08 +02:00
fb04d3cff1 Fix example for Hander func 2017-03-30 12:53:12 +03:00
5c7673e73d update to 1.8.1 2017-03-29 10:22:27 +08:00
54b05377d9 Merge pull request #2466 from gouyang/gouyang/develop
Parse form time by its length
2017-03-29 10:01:03 +08:00
f49f3f92ec Merge pull request #2533 from cnxh/redis-session-poolsize
redis poolsize could set to zero
2017-03-29 10:00:28 +08:00
b18b94f03b Merge pull request #2539 from astaxie/revert-2421-develop
Revert "close mysql connection"
2017-03-29 09:57:54 +08:00
bf469f0b55 Revert "close mysql connection" 2017-03-29 09:57:18 +08:00
c12709dbc9 Merge pull request #2536 from ChristophPech/master
Fix for IndexExists in SQLite driver
2017-03-29 09:53:55 +08:00
2808a13f07 Fix for IndexExists in SQLite driver, they added the "origin" and "partial" columns to the index_list pragma.
see: https://www.sqlite.org/src/info/2743846cdba572f6
2017-03-28 12:38:27 +02:00
a8d48aa5ea Merge pull request #2535 from ketanhwr/patch-1
Update README.md
2017-03-28 10:49:52 +02:00
fdb2660a2a Update README.md 2017-03-28 11:29:20 +05:30
7c3a997735 poolsize could set to zero
sometimes we may want disable the redis pool
2017-03-27 23:34:58 +08:00
1760f0ceca Merge pull request #2532 from vbalien/patch-1
Fix markdown formatting
2017-03-27 13:34:19 +08:00
c387aeeb36 fix markdown formatting 2017-03-27 14:30:42 +09:00
83d4385f1f Support time.RFC3339
Signed-off-by: Guohua Ouyang <guohuaouyang@gmail.com>
2017-03-25 07:52:43 +08:00
ae0a75c464 console debug use diffrent color 2017-03-21 23:55:40 +08:00
a05e5a7c09 Merge branches 'master' and 'develop' of https://github.com/astaxie/beego into develop 2017-03-21 23:47:32 +08:00
75ca7b77b6 Merge pull request #2518 from sicojuy/develop
Fix set cookie bug, zero max age is not valid.
2017-03-21 09:44:37 +08:00
b0e2012a17 Fix set cookie bug, zero max age is not valid. 2017-03-21 09:15:30 +08:00
12dff072fa Merge pull request #2509 from sergeylanzman/add-gosimple
add go simple support
2017-03-18 13:37:31 +08:00
d956444965 Merge branch 'develop' into add-gosimple 2017-03-18 11:20:30 +08:00
8dbf9eb0bf Merge pull request #2512 from sergeylanzman/add-unconvert
add unconverted support
2017-03-18 11:05:28 +08:00
faa981fb6a Merge pull request #2511 from sergeylanzman/drop-go1.4-1.5-travis
drop support go1.4/5 in travis yml
2017-03-18 11:02:39 +08:00
1ea3c13ff5 Merge branch 'develop' into drop-go1.4-1.5-travis 2017-03-18 11:02:15 +08:00
46d8fef0ad Merge pull request #2510 from sergeylanzman/travis-add-1.7-1.8
add support go1.7/8 to travis yml
2017-03-18 11:01:27 +08:00
37c1ffc57a add go simple support 2017-03-17 20:22:20 +02:00
856fde28dc add unconverted support 2017-03-17 19:45:30 +02:00
3204d7631b drop support go1.4/5 in travis yml 2017-03-17 19:33:54 +02:00
f5fc2edfd3 add support go1.7/8 to travis yml 2017-03-17 19:32:31 +02:00
21d1267c14 Merge pull request #2501 from sergeylanzman/change-template-error-log
temple parse error write in log as trace. change to error
2017-03-14 20:04:42 +08:00
3e37b97549 temple parse error write in log as trace. change to error 2017-03-14 12:55:40 +02:00
206f736819 Merge pull request #2496 from eyalpost/develop
Don't panic during AddViewPath if adding the same path twice
2017-03-14 12:54:48 +08:00
24d4a27842 Don't panic during AddViewPath if adding the same path twice 2017-03-13 08:46:57 +02:00
46f3ea4f43 Merge pull request #2494 from miraclesu/develop
validation: fix email valid
2017-03-12 22:26:25 +08:00
c9cc642d37 validation: amend email test case 2017-03-12 20:27:09 +08:00
b34853f8cc validattion: add test case for email valid 2017-03-12 19:30:23 +08:00
d41f4c0a3a validation: fix email valid 2017-03-12 19:23:46 +08:00
8e46decc8e fix routing bug for splat 2017-03-10 09:28:25 +08:00
49fffe3ebe Parse form time by its length
Fix #2451

Signed-off-by: Guohua Ouyang <guohuaouyang@gmail.com>
2017-02-27 14:43:16 +08:00
a8a2dffc59 Have Required validator trim strings to fix #2361
This will cause the Required validator not to consider fields that has
only spaces or new lines to be regarded as valid. This is done by
checking if the trimmed version of the string is valid.
2017-01-06 10:12:22 +02:00
118 changed files with 2710 additions and 883 deletions

4
.gosimpleignore Normal file
View File

@ -0,0 +1,4 @@
github.com/astaxie/beego/*/*:S1012
github.com/astaxie/beego/*:S1012
github.com/astaxie/beego/*/*:S1007
github.com/astaxie/beego/*:S1007

View File

@ -1,9 +1,9 @@
language: go language: go
go: go:
- 1.6 - 1.6.4
- 1.5.3 - 1.7.5
- 1.4.3 - 1.8.1
services: services:
- redis-server - redis-server
- mysql - mysql
@ -33,6 +33,12 @@ install:
- go get github.com/ssdb/gossdb/ssdb - go get github.com/ssdb/gossdb/ssdb
- go get github.com/cloudflare/golz4 - go get github.com/cloudflare/golz4
- go get github.com/gogo/protobuf/proto - go get github.com/gogo/protobuf/proto
- go get github.com/Knetic/govaluate
- go get github.com/casbin/casbin
- go get -u honnef.co/go/tools/cmd/gosimple
- go get -u github.com/mdempsky/unconvert
- go get -u github.com/gordonklaus/ineffassign
- go get -u github.com/golang/lint/golint
before_script: before_script:
- psql --version - psql --version
- sh -c "if [ '$ORM_DRIVER' = 'postgres' ]; then psql -c 'create database orm_test;' -U postgres; fi" - sh -c "if [ '$ORM_DRIVER' = 'postgres' ]; then psql -c 'create database orm_test;' -U postgres; fi"
@ -47,5 +53,10 @@ after_script:
- rm -rf ./res/var/* - rm -rf ./res/var/*
script: script:
- go test -v ./... - go test -v ./...
- gosimple -ignore "$(cat .gosimpleignore)" $(go list ./... | grep -v /vendor/)
- unconvert $(go list ./... | grep -v /vendor/)
- ineffassign .
- find . ! \( -path './vendor' -prune \) -type f -name '*.go' -print0 | xargs -0 gofmt -l -s
- golint ./...
addons: addons:
postgresql: "9.4" postgresql: "9.4"

View File

@ -1,20 +1,17 @@
## Beego # Beego [![Build Status](https://travis-ci.org/astaxie/beego.svg?branch=master)](https://travis-ci.org/astaxie/beego) [![GoDoc](http://godoc.org/github.com/astaxie/beego?status.svg)](http://godoc.org/github.com/astaxie/beego) [![Foundation](https://img.shields.io/badge/Golang-Foundation-green.svg)](http://golangfoundation.org)
[![Build Status](https://travis-ci.org/astaxie/beego.svg?branch=master)](https://travis-ci.org/astaxie/beego)
[![GoDoc](http://godoc.org/github.com/astaxie/beego?status.svg)](http://godoc.org/github.com/astaxie/beego)
[![Foundation](https://img.shields.io/badge/Golang-Foundation-green.svg)](http://golangfoundation.org)
beego is used for rapid development of RESTful APIs, web apps and backend services in Go. beego is used for rapid development of RESTful APIs, web apps and backend services in Go.
It is inspired by Tornado, Sinatra and Flask. beego has some Go-specific features such as interfaces and struct embedding. It is inspired by Tornado, Sinatra and Flask. beego has some Go-specific features such as interfaces and struct embedding.
More info [beego.me](http://beego.me) ###### More info at [beego.me](http://beego.me).
## Quick Start ## Quick Start
######Download and install
#### Download and install
go get github.com/astaxie/beego go get github.com/astaxie/beego
######Create file `hello.go` #### Create file `hello.go`
```go ```go
package main package main
@ -24,15 +21,16 @@ func main(){
beego.Run() beego.Run()
} }
``` ```
######Build and run #### Build and run
```bash
go build hello.go go build hello.go
./hello ./hello
```
######Congratulations! #### Go to [http://localhost:8080](http://localhost:8080)
You just built your first beego app.
Open your browser and visit `http://localhost:8080`. Congratulations! You've just built your first **beego** app.
Please see [Documentation](http://beego.me/docs) for more.
###### Please see [Documentation](http://beego.me/docs) for more.
## Features ## Features
@ -56,7 +54,7 @@ Please see [Documentation](http://beego.me/docs) for more.
* [http://beego.me/community](http://beego.me/community) * [http://beego.me/community](http://beego.me/community)
* Welcome to join us in Slack: [https://beego.slack.com](https://beego.slack.com), you can get invited from [here](https://github.com/beego/beedoc/issues/232) * Welcome to join us in Slack: [https://beego.slack.com](https://beego.slack.com), you can get invited from [here](https://github.com/beego/beedoc/issues/232)
## LICENSE ## License
beego source code is licensed under the Apache Licence, Version 2.0 beego source code is licensed under the Apache Licence, Version 2.0
(http://www.apache.org/licenses/LICENSE-2.0.html). (http://www.apache.org/licenses/LICENSE-2.0.html).

View File

@ -37,7 +37,7 @@ var beeAdminApp *adminApp
// FilterMonitorFunc is default monitor filter when admin module is enable. // FilterMonitorFunc is default monitor filter when admin module is enable.
// if this func returns, admin module records qbs for this request by condition of this function logic. // if this func returns, admin module records qbs for this request by condition of this function logic.
// usage: // usage:
// func MyFilterMonitor(method, requestPath string, t time.Duration) bool { // func MyFilterMonitor(method, requestPath string, t time.Duration, pattern string, statusCode int) bool {
// if method == "POST" { // if method == "POST" {
// return false // return false
// } // }
@ -50,7 +50,7 @@ var beeAdminApp *adminApp
// return true // return true
// } // }
// beego.FilterMonitorFunc = MyFilterMonitor. // beego.FilterMonitorFunc = MyFilterMonitor.
var FilterMonitorFunc func(string, string, time.Duration) bool var FilterMonitorFunc func(string, string, time.Duration, string, int) bool
func init() { func init() {
beeAdminApp = &adminApp{ beeAdminApp = &adminApp{
@ -62,7 +62,7 @@ func init() {
beeAdminApp.Route("/healthcheck", healthcheck) beeAdminApp.Route("/healthcheck", healthcheck)
beeAdminApp.Route("/task", taskStatus) beeAdminApp.Route("/task", taskStatus)
beeAdminApp.Route("/listconf", listConf) beeAdminApp.Route("/listconf", listConf)
FilterMonitorFunc = func(string, string, time.Duration) bool { return true } FilterMonitorFunc = func(string, string, time.Duration, string, int) bool { return true }
} }
// AdminIndex is the default http.Handler for admin module. // AdminIndex is the default http.Handler for admin module.
@ -105,29 +105,12 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
tmpl.Execute(rw, data) tmpl.Execute(rw, data)
case "router": case "router":
var ( content := PrintTree()
content = map[string]interface{}{ content["Fields"] = []string{
"Fields": []string{
"Router Pattern", "Router Pattern",
"Methods", "Methods",
"Controller", "Controller",
},
} }
methods = []string{}
methodsData = make(map[string]interface{})
)
for method, t := range BeeApp.Handlers.routers {
resultList := new([][]string)
printTree(resultList, t)
methods = append(methods, method)
methodsData[method] = resultList
}
content["Data"] = methodsData
content["Methods"] = methods
data["Content"] = content data["Content"] = content
data["Title"] = "Routers" data["Title"] = "Routers"
execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl) execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl)
@ -157,8 +140,8 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
resultList := new([][]string) resultList := new([][]string)
for _, f := range bf { for _, f := range bf {
var result = []string{ var result = []string{
fmt.Sprintf("%s", f.pattern), f.pattern,
fmt.Sprintf("%s", utils.GetFuncName(f.filterFunc)), utils.GetFuncName(f.filterFunc),
} }
*resultList = append(*resultList, result) *resultList = append(*resultList, result)
} }
@ -200,6 +183,28 @@ func list(root string, p interface{}, m map[string]interface{}) {
} }
} }
// PrintTree prints all registered routers.
func PrintTree() map[string]interface{} {
var (
content = map[string]interface{}{}
methods = []string{}
methodsData = make(map[string]interface{})
)
for method, t := range BeeApp.Handlers.routers {
resultList := new([][]string)
printTree(resultList, t)
methods = append(methods, method)
methodsData[method] = resultList
}
content["Data"] = methodsData
content["Methods"] = methods
return content
}
func printTree(resultList *[][]string, t *Tree) { func printTree(resultList *[][]string, t *Tree) {
for _, tr := range t.fixrouters { for _, tr := range t.fixrouters {
printTree(resultList, tr) printTree(resultList, tr)
@ -208,12 +213,12 @@ func printTree(resultList *[][]string, t *Tree) {
printTree(resultList, t.wildcard) printTree(resultList, t.wildcard)
} }
for _, l := range t.leaves { for _, l := range t.leaves {
if v, ok := l.runObject.(*controllerInfo); ok { if v, ok := l.runObject.(*ControllerInfo); ok {
if v.routerType == routerTypeBeego { if v.routerType == routerTypeBeego {
var result = []string{ var result = []string{
v.pattern, v.pattern,
fmt.Sprintf("%s", v.methods), fmt.Sprintf("%s", v.methods),
fmt.Sprintf("%s", v.controllerType), v.controllerType.String(),
} }
*resultList = append(*resultList, result) *resultList = append(*resultList, result)
} else if v.routerType == routerTypeRESTFul { } else if v.routerType == routerTypeRESTFul {
@ -276,8 +281,8 @@ func profIndex(rw http.ResponseWriter, r *http.Request) {
// it's in "/healthcheck" pattern in admin module. // it's in "/healthcheck" pattern in admin module.
func healthcheck(rw http.ResponseWriter, req *http.Request) { func healthcheck(rw http.ResponseWriter, req *http.Request) {
var ( var (
result []string
data = make(map[interface{}]interface{}) data = make(map[interface{}]interface{})
result = []string{}
resultList = new([][]string) resultList = new([][]string)
content = map[string]interface{}{ content = map[string]interface{}{
"Fields": []string{"Name", "Message", "Status"}, "Fields": []string{"Name", "Message", "Status"},
@ -287,21 +292,20 @@ func healthcheck(rw http.ResponseWriter, req *http.Request) {
for name, h := range toolbox.AdminCheckList { for name, h := range toolbox.AdminCheckList {
if err := h.Check(); err != nil { if err := h.Check(); err != nil {
result = []string{ result = []string{
fmt.Sprintf("error"), "error",
fmt.Sprintf("%s", name), name,
fmt.Sprintf("%s", err.Error()), err.Error(),
} }
} else { } else {
result = []string{ result = []string{
fmt.Sprintf("success"), "success",
fmt.Sprintf("%s", name), name,
fmt.Sprintf("OK"), "OK",
} }
} }
*resultList = append(*resultList, result) *resultList = append(*resultList, result)
} }
content["Data"] = resultList content["Data"] = resultList
data["Content"] = content data["Content"] = content
data["Title"] = "Health Check" data["Title"] = "Health Check"
@ -330,7 +334,6 @@ func taskStatus(rw http.ResponseWriter, req *http.Request) {
// List Tasks // List Tasks
content := make(map[string]interface{}) content := make(map[string]interface{})
resultList := new([][]string) resultList := new([][]string)
var result = []string{}
var fields = []string{ var fields = []string{
"Task Name", "Task Name",
"Task Spec", "Task Spec",
@ -339,10 +342,10 @@ func taskStatus(rw http.ResponseWriter, req *http.Request) {
"", "",
} }
for tname, tk := range toolbox.AdminTaskList { for tname, tk := range toolbox.AdminTaskList {
result = []string{ result := []string{
tname, tname,
fmt.Sprintf("%s", tk.GetSpec()), tk.GetSpec(),
fmt.Sprintf("%s", tk.GetStatus()), tk.GetStatus(),
tk.GetPrev().String(), tk.GetPrev().String(),
} }
*resultList = append(*resultList, result) *resultList = append(*resultList, result)

6
app.go
View File

@ -348,9 +348,9 @@ func Any(rootpath string, f FilterFunc) *App {
// Handler used to register a Handler router // Handler used to register a Handler router
// usage: // usage:
// beego.Handler("/api", func(ctx *context.Context){ // beego.Handler("/api", http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
// ctx.Output.Body("hello world") // fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
// }) // }))
func Handler(rootpath string, h http.Handler, options ...interface{}) *App { func Handler(rootpath string, h http.Handler, options ...interface{}) *App {
BeeApp.Handlers.Handler(rootpath, h, options...) BeeApp.Handlers.Handler(rootpath, h, options...)
return BeeApp return BeeApp

View File

@ -23,7 +23,7 @@ import (
const ( const (
// VERSION represent beego web framework version. // VERSION represent beego web framework version.
VERSION = "1.8.0" VERSION = "1.9.0"
// DEV is for develop // DEV is for develop
DEV = "dev" DEV = "dev"
@ -40,9 +40,9 @@ var (
// AddAPPStartHook is used to register the hookfunc // AddAPPStartHook is used to register the hookfunc
// The hookfuncs will run in beego.Run() // The hookfuncs will run in beego.Run()
// such as sessionInit, middlerware start, buildtemplate, admin start // such as initiating session , starting middleware , building template, starting admin control and so on.
func AddAPPStartHook(hf hookfunc) { func AddAPPStartHook(hf ...hookfunc) {
hooks = append(hooks, hf) hooks = append(hooks, hf...)
} }
// Run beego application. // Run beego application.
@ -69,12 +69,14 @@ func Run(params ...string) {
func initBeforeHTTPRun() { func initBeforeHTTPRun() {
//init hooks //init hooks
AddAPPStartHook(registerMime) AddAPPStartHook(
AddAPPStartHook(registerDefaultErrorHandler) registerMime,
AddAPPStartHook(registerSession) registerDefaultErrorHandler,
AddAPPStartHook(registerTemplate) registerSession,
AddAPPStartHook(registerAdmin) registerTemplate,
AddAPPStartHook(registerGzip) registerAdmin,
registerGzip,
)
for _, hk := range hooks { for _, hk := range hooks {
if err := hk(); err != nil { if err := hk(); err != nil {

2
cache/conv.go vendored
View File

@ -28,7 +28,7 @@ func GetString(v interface{}) string {
return string(result) return string(result)
default: default:
if v != nil { if v != nil {
return fmt.Sprintf("%v", result) return fmt.Sprint(result)
} }
} }
return "" return ""

6
cache/conv_test.go vendored
View File

@ -118,14 +118,14 @@ func TestGetFloat64(t *testing.T) {
func TestGetBool(t *testing.T) { func TestGetBool(t *testing.T) {
var t1 = true var t1 = true
if true != GetBool(t1) { if !GetBool(t1) {
t.Error("get bool from bool error") t.Error("get bool from bool error")
} }
var t2 = "true" var t2 = "true"
if true != GetBool(t2) { if !GetBool(t2) {
t.Error("get bool from string error") t.Error("get bool from string error")
} }
if false != GetBool(nil) { if GetBool(nil) {
t.Error("get bool from nil error") t.Error("get bool from nil error")
} }
} }

View File

@ -146,10 +146,7 @@ func (rc *Cache) IsExist(key string) bool {
} }
} }
_, err := rc.conn.Get(key) _, err := rc.conn.Get(key)
if err != nil { return !(err != nil)
return false
}
return true
} }
// ClearAll clear all cached in memcache. // ClearAll clear all cached in memcache.

31
cache/memory.go vendored
View File

@ -217,26 +217,31 @@ func (bc *MemoryCache) vaccuum() {
if bc.items == nil { if bc.items == nil {
return return
} }
for name := range bc.items { if keys := bc.expiredKeys(); len(keys) != 0 {
bc.itemExpired(name) bc.clearItems(keys)
} }
} }
} }
// itemExpired returns true if an item is expired. // expiredKeys returns key list which are expired.
func (bc *MemoryCache) itemExpired(name string) bool { func (bc *MemoryCache) expiredKeys() (keys []string) {
bc.RLock()
defer bc.RUnlock()
for key, itm := range bc.items {
if itm.isExpire() {
keys = append(keys, key)
}
}
return
}
// clearItems removes all the items which key in keys.
func (bc *MemoryCache) clearItems(keys []string) {
bc.Lock() bc.Lock()
defer bc.Unlock() defer bc.Unlock()
for _, key := range keys {
itm, ok := bc.items[name] delete(bc.items, key)
if !ok {
return true
} }
if itm.isExpire() {
delete(bc.items, name)
return true
}
return false
} }
func init() { func init() {

View File

@ -137,7 +137,7 @@ func (rc *Cache) IsExist(key string) bool {
if err != nil { if err != nil {
return false return false
} }
if v == false { if !v {
if _, err = rc.do("HDEL", rc.key, key); err != nil { if _, err = rc.do("HDEL", rc.key, key); err != nil {
return false return false
} }

13
cache/ssdb/ssdb.go vendored
View File

@ -53,7 +53,7 @@ func (rc *Cache) GetMulti(keys []string) []interface{} {
resSize := len(res) resSize := len(res)
if err == nil { if err == nil {
for i := 1; i < resSize; i += 2 { for i := 1; i < resSize; i += 2 {
values = append(values, string(res[i+1])) values = append(values, res[i+1])
} }
return values return values
} }
@ -71,11 +71,8 @@ func (rc *Cache) DelMulti(keys []string) error {
} }
} }
_, err := rc.conn.Do("multi_del", keys) _, err := rc.conn.Do("multi_del", keys)
if err != nil {
return err return err
} }
return nil
}
// Put put value to memcache. only support string. // Put put value to memcache. only support string.
func (rc *Cache) Put(key string, value interface{}, timeout time.Duration) error { func (rc *Cache) Put(key string, value interface{}, timeout time.Duration) error {
@ -113,11 +110,8 @@ func (rc *Cache) Delete(key string) error {
} }
} }
_, err := rc.conn.Del(key) _, err := rc.conn.Del(key)
if err != nil {
return err return err
} }
return nil
}
// Incr increase counter. // Incr increase counter.
func (rc *Cache) Incr(key string) error { func (rc *Cache) Incr(key string) error {
@ -175,7 +169,7 @@ func (rc *Cache) ClearAll() error {
} }
keys := []string{} keys := []string{}
for i := 1; i < size; i += 2 { for i := 1; i < size; i += 2 {
keys = append(keys, string(resp[i])) keys = append(keys, resp[i])
} }
_, e := rc.conn.Do("multi_del", keys) _, e := rc.conn.Do("multi_del", keys)
if e != nil { if e != nil {
@ -229,11 +223,8 @@ func (rc *Cache) connectInit() error {
} }
var err error var err error
rc.conn, err = ssdb.Connect(host, port) rc.conn, err = ssdb.Connect(host, port)
if err != nil {
return err return err
} }
return nil
}
func init() { func init() {
cache.Register("ssdb", NewSsdbCache) cache.Register("ssdb", NewSsdbCache)

View File

@ -345,7 +345,7 @@ func assignSingleConfig(p interface{}, ac config.Configer) {
case reflect.String: case reflect.String:
pf.SetString(ac.DefaultString(name, pf.String())) pf.SetString(ac.DefaultString(name, pf.String()))
case reflect.Int, reflect.Int64: case reflect.Int, reflect.Int64:
pf.SetInt(int64(ac.DefaultInt64(name, pf.Int()))) pf.SetInt(ac.DefaultInt64(name, pf.Int()))
case reflect.Bool: case reflect.Bool:
pf.SetBool(ac.DefaultBool(name, pf.Bool())) pf.SetBool(ac.DefaultBool(name, pf.Bool()))
case reflect.Struct: case reflect.Struct:

View File

@ -189,16 +189,16 @@ func ParseBool(val interface{}) (value bool, err error) {
return false, nil return false, nil
} }
case int8, int32, int64: case int8, int32, int64:
strV := fmt.Sprintf("%s", v) strV := fmt.Sprintf("%d", v)
if strV == "1" { if strV == "1" {
return true, nil return true, nil
} else if strV == "0" { } else if strV == "0" {
return false, nil return false, nil
} }
case float64: case float64:
if v == 1 { if v == 1.0 {
return true, nil return true, nil
} else if v == 0 { } else if v == 0.0 {
return false, nil return false, nil
} }
} }

2
config/env/env.go vendored
View File

@ -12,6 +12,8 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Package env is used to parse environment.
package env package env
import ( import (

View File

@ -21,6 +21,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"os/user"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
@ -184,10 +185,17 @@ func (ini *IniConfig) parseData(dir string, data []byte) (*IniConfigContainer, e
// ParseData parse ini the data // ParseData parse ini the data
// When include other.conf,other.conf is either absolute directory // When include other.conf,other.conf is either absolute directory
// or under beego in default temporary directory(/tmp/beego). // or under beego in default temporary directory(/tmp/beego[-username]).
func (ini *IniConfig) ParseData(data []byte) (Configer, error) { func (ini *IniConfig) ParseData(data []byte) (Configer, error) {
dir := filepath.Join(os.TempDir(), "beego") dir := "beego"
os.MkdirAll(dir, os.ModePerm) currentUser, err := user.Current()
if err == nil {
dir = "beego-" + currentUser.Username
}
dir = filepath.Join(os.TempDir(), dir)
if err = os.MkdirAll(dir, os.ModePerm); err != nil {
return nil, err
}
return ini.parseData(dir, data) return ini.parseData(dir, data)
} }
@ -317,7 +325,10 @@ func (c *IniConfigContainer) SaveConfigFile(filename string) (err error) {
// Get section or key comments. Fixed #1607 // Get section or key comments. Fixed #1607
getCommentStr := func(section, key string) string { getCommentStr := func(section, key string) string {
comment, ok := "", false var (
comment string
ok bool
)
if len(key) == 0 { if len(key) == 0 {
comment, ok = c.sectionComment[section] comment, ok = c.sectionComment[section]
} else { } else {
@ -397,12 +408,9 @@ func (c *IniConfigContainer) SaveConfigFile(filename string) (err error) {
} }
} }
} }
_, err = buf.WriteTo(f)
if _, err = buf.WriteTo(f); err != nil {
return err return err
} }
return nil
}
// Set writes a new value for key. // Set writes a new value for key.
// if write to one section, the key need be "section::key". // if write to one section, the key need be "section::key".
@ -416,7 +424,7 @@ func (c *IniConfigContainer) Set(key, value string) error {
var ( var (
section, k string section, k string
sectionKey = strings.Split(key, "::") sectionKey = strings.Split(strings.ToLower(key), "::")
) )
if len(sectionKey) >= 2 { if len(sectionKey) >= 2 {

View File

@ -181,7 +181,7 @@ name=mysql
cfgData := string(data) cfgData := string(data)
datas := strings.Split(saveResult, "\n") datas := strings.Split(saveResult, "\n")
for _, line := range datas { for _, line := range datas {
if strings.Contains(cfgData, line+"\n") == false { if !strings.Contains(cfgData, line+"\n") {
t.Fatalf("different after save ini config file. need contains %q", line) t.Fatalf("different after save ini config file. need contains %q", line)
} }
} }

View File

@ -39,6 +39,7 @@ var (
getMethodOnly bool getMethodOnly bool
) )
// InitGzip init the gzipcompress
func InitGzip(minLength, compressLevel int, methods []string) { func InitGzip(minLength, compressLevel int, methods []string) {
if minLength >= 0 { if minLength >= 0 {
gzipMinLength = minLength gzipMinLength = minLength

View File

@ -171,6 +171,22 @@ func (ctx *Context) CheckXSRFCookie() bool {
return true return true
} }
// RenderMethodResult renders the return value of a controller method to the output
func (ctx *Context) RenderMethodResult(result interface{}) {
if result != nil {
renderer, ok := result.(Renderer)
if !ok {
err, ok := result.(error)
if ok {
renderer = errorRenderer(err)
} else {
renderer = jsonRenderer(result)
}
}
renderer.Render(ctx)
}
}
//Response is a wrapper for the http.ResponseWriter //Response is a wrapper for the http.ResponseWriter
//started set to true if response was written to then don't execute other handler //started set to true if response was written to then don't execute other handler
type Response struct { type Response struct {

View File

@ -16,9 +16,11 @@ package context
import ( import (
"bytes" "bytes"
"compress/gzip"
"errors" "errors"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http"
"net/url" "net/url"
"reflect" "reflect"
"regexp" "regexp"
@ -349,11 +351,22 @@ func (input *BeegoInput) CopyBody(MaxMemory int64) []byte {
if input.Context.Request.Body == nil { if input.Context.Request.Body == nil {
return []byte{} return []byte{}
} }
var requestbody []byte
safe := &io.LimitedReader{R: input.Context.Request.Body, N: MaxMemory} safe := &io.LimitedReader{R: input.Context.Request.Body, N: MaxMemory}
requestbody, _ := ioutil.ReadAll(safe) if input.Header("Content-Encoding") == "gzip" {
reader, err := gzip.NewReader(safe)
if err != nil {
return nil
}
requestbody, _ = ioutil.ReadAll(reader)
} else {
requestbody, _ = ioutil.ReadAll(safe)
}
input.Context.Request.Body.Close() input.Context.Request.Body.Close()
bf := bytes.NewBuffer(requestbody) bf := bytes.NewBuffer(requestbody)
input.Context.Request.Body = ioutil.NopCloser(bf) input.Context.Request.Body = http.MaxBytesReader(input.Context.ResponseWriter, ioutil.NopCloser(bf), MaxMemory)
input.RequestBody = requestbody input.RequestBody = requestbody
return requestbody return requestbody
} }

View File

@ -73,8 +73,8 @@ func TestBind(t *testing.T) {
{"/?human.ID=888&human.Nick=astaxie&human.Ms=true&human[Pwd]=pass", []testItem{{"human", Human{}, Human{ID: 888, Nick: "astaxie", Ms: true, Pwd: "pass"}}}}, {"/?human.ID=888&human.Nick=astaxie&human.Ms=true&human[Pwd]=pass", []testItem{{"human", Human{}, Human{ID: 888, Nick: "astaxie", Ms: true, Pwd: "pass"}}}},
{"/?human[0].ID=888&human[0].Nick=astaxie&human[0].Ms=true&human[0][Pwd]=pass01&human[1].ID=999&human[1].Nick=ysqi&human[1].Ms=On&human[1].Pwd=pass02", {"/?human[0].ID=888&human[0].Nick=astaxie&human[0].Ms=true&human[0][Pwd]=pass01&human[1].ID=999&human[1].Nick=ysqi&human[1].Ms=On&human[1].Pwd=pass02",
[]testItem{{"human", []Human{}, []Human{ []testItem{{"human", []Human{}, []Human{
Human{ID: 888, Nick: "astaxie", Ms: true, Pwd: "pass01"}, {ID: 888, Nick: "astaxie", Ms: true, Pwd: "pass01"},
Human{ID: 999, Nick: "ysqi", Ms: true, Pwd: "pass02"}, {ID: 999, Nick: "ysqi", Ms: true, Pwd: "pass02"},
}}}}, }}}},
{ {

View File

@ -168,6 +168,19 @@ func sanitizeValue(v string) string {
return cookieValueSanitizer.Replace(v) return cookieValueSanitizer.Replace(v)
} }
func jsonRenderer(value interface{}) Renderer {
return rendererFunc(func(ctx *Context) {
ctx.Output.JSON(value, false, false)
})
}
func errorRenderer(err error) Renderer {
return rendererFunc(func(ctx *Context) {
ctx.Output.SetStatus(500)
ctx.Output.Body([]byte(err.Error()))
})
}
// JSON writes json to response body. // JSON writes json to response body.
// if coding is true, it converts utf-8 to \u0000 type. // if coding is true, it converts utf-8 to \u0000 type.
func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, coding bool) error { func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, coding bool) error {
@ -330,9 +343,8 @@ func (output *BeegoOutput) IsServerError() bool {
} }
func stringsToJSON(str string) string { func stringsToJSON(str string) string {
rs := []rune(str)
var jsons bytes.Buffer var jsons bytes.Buffer
for _, r := range rs { for _, r := range str {
rint := int(r) rint := int(r)
if rint < 128 { if rint < 128 {
jsons.WriteRune(r) jsons.WriteRune(r)

78
context/param/conv.go Normal file
View File

@ -0,0 +1,78 @@
package param
import (
"fmt"
"reflect"
beecontext "github.com/astaxie/beego/context"
"github.com/astaxie/beego/logs"
)
// ConvertParams converts http method params to values that will be passed to the method controller as arguments
func ConvertParams(methodParams []*MethodParam, methodType reflect.Type, ctx *beecontext.Context) (result []reflect.Value) {
result = make([]reflect.Value, 0, len(methodParams))
for i := 0; i < len(methodParams); i++ {
reflectValue := convertParam(methodParams[i], methodType.In(i), ctx)
result = append(result, reflectValue)
}
return
}
func convertParam(param *MethodParam, paramType reflect.Type, ctx *beecontext.Context) (result reflect.Value) {
paramValue := getParamValue(param, ctx)
if paramValue == "" {
if param.required {
ctx.Abort(400, fmt.Sprintf("Missing parameter %s", param.name))
} else {
paramValue = param.defaultValue
}
}
reflectValue, err := parseValue(param, paramValue, paramType)
if err != nil {
logs.Debug(fmt.Sprintf("Error converting param %s to type %s. Value: %v, Error: %s", param.name, paramType, paramValue, err))
ctx.Abort(400, fmt.Sprintf("Invalid parameter %s. Can not convert %v to type %s", param.name, paramValue, paramType))
}
return reflectValue
}
func getParamValue(param *MethodParam, ctx *beecontext.Context) string {
switch param.in {
case body:
return string(ctx.Input.RequestBody)
case header:
return ctx.Input.Header(param.name)
case path:
return ctx.Input.Query(":" + param.name)
default:
return ctx.Input.Query(param.name)
}
}
func parseValue(param *MethodParam, paramValue string, paramType reflect.Type) (result reflect.Value, err error) {
if paramValue == "" {
return reflect.Zero(paramType), nil
}
parser := getParser(param, paramType)
value, err := parser.parse(paramValue, paramType)
if err != nil {
return result, err
}
return safeConvert(reflect.ValueOf(value), paramType)
}
func safeConvert(value reflect.Value, t reflect.Type) (result reflect.Value, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
err, ok = r.(error)
if !ok {
err = fmt.Errorf("%v", r)
}
}
}()
result = value.Convert(t)
return
}

View File

@ -0,0 +1,69 @@
package param
import (
"fmt"
"strings"
)
//MethodParam keeps param information to be auto passed to controller methods
type MethodParam struct {
name string
in paramType
required bool
defaultValue string
}
type paramType byte
const (
param paramType = iota
path
body
header
)
//New creates a new MethodParam with name and specific options
func New(name string, opts ...MethodParamOption) *MethodParam {
return newParam(name, nil, opts)
}
func newParam(name string, parser paramParser, opts []MethodParamOption) (param *MethodParam) {
param = &MethodParam{name: name}
for _, option := range opts {
option(param)
}
return
}
//Make creates an array of MethodParmas or an empty array
func Make(list ...*MethodParam) []*MethodParam {
if len(list) > 0 {
return list
}
return nil
}
func (mp *MethodParam) String() string {
options := []string{}
result := "param.New(\"" + mp.name + "\""
if mp.required {
options = append(options, "param.IsRequired")
}
switch mp.in {
case path:
options = append(options, "param.InPath")
case body:
options = append(options, "param.InBody")
case header:
options = append(options, "param.InHeader")
}
if mp.defaultValue != "" {
options = append(options, fmt.Sprintf(`param.Default("%s")`, mp.defaultValue))
}
if len(options) > 0 {
result += ", "
}
result += strings.Join(options, ", ")
result += ")"
return result
}

37
context/param/options.go Normal file
View File

@ -0,0 +1,37 @@
package param
import (
"fmt"
)
// MethodParamOption defines a func which apply options on a MethodParam
type MethodParamOption func(*MethodParam)
// IsRequired indicates that this param is required and can not be ommited from the http request
var IsRequired MethodParamOption = func(p *MethodParam) {
p.required = true
}
// InHeader indicates that this param is passed via an http header
var InHeader MethodParamOption = func(p *MethodParam) {
p.in = header
}
// InPath indicates that this param is part of the URL path
var InPath MethodParamOption = func(p *MethodParam) {
p.in = path
}
// InBody indicates that this param is passed as an http request body
var InBody MethodParamOption = func(p *MethodParam) {
p.in = body
}
// Default provides a default value for the http param
func Default(defaultValue interface{}) MethodParamOption {
return func(p *MethodParam) {
if defaultValue != nil {
p.defaultValue = fmt.Sprint(defaultValue)
}
}
}

149
context/param/parsers.go Normal file
View File

@ -0,0 +1,149 @@
package param
import (
"encoding/json"
"reflect"
"strconv"
"strings"
"time"
)
type paramParser interface {
parse(value string, toType reflect.Type) (interface{}, error)
}
func getParser(param *MethodParam, t reflect.Type) paramParser {
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return intParser{}
case reflect.Slice:
if t.Elem().Kind() == reflect.Uint8 { //treat []byte as string
return stringParser{}
}
if param.in == body {
return jsonParser{}
}
elemParser := getParser(param, t.Elem())
if elemParser == (jsonParser{}) {
return elemParser
}
return sliceParser(elemParser)
case reflect.Bool:
return boolParser{}
case reflect.String:
return stringParser{}
case reflect.Float32, reflect.Float64:
return floatParser{}
case reflect.Ptr:
elemParser := getParser(param, t.Elem())
if elemParser == (jsonParser{}) {
return elemParser
}
return ptrParser(elemParser)
default:
if t.PkgPath() == "time" && t.Name() == "Time" {
return timeParser{}
}
return jsonParser{}
}
}
type parserFunc func(value string, toType reflect.Type) (interface{}, error)
func (f parserFunc) parse(value string, toType reflect.Type) (interface{}, error) {
return f(value, toType)
}
type boolParser struct {
}
func (p boolParser) parse(value string, toType reflect.Type) (interface{}, error) {
return strconv.ParseBool(value)
}
type stringParser struct {
}
func (p stringParser) parse(value string, toType reflect.Type) (interface{}, error) {
return value, nil
}
type intParser struct {
}
func (p intParser) parse(value string, toType reflect.Type) (interface{}, error) {
return strconv.Atoi(value)
}
type floatParser struct {
}
func (p floatParser) parse(value string, toType reflect.Type) (interface{}, error) {
if toType.Kind() == reflect.Float32 {
res, err := strconv.ParseFloat(value, 32)
if err != nil {
return nil, err
}
return float32(res), nil
}
return strconv.ParseFloat(value, 64)
}
type timeParser struct {
}
func (p timeParser) parse(value string, toType reflect.Type) (result interface{}, err error) {
result, err = time.Parse(time.RFC3339, value)
if err != nil {
result, err = time.Parse("2006-01-02", value)
}
return
}
type jsonParser struct {
}
func (p jsonParser) parse(value string, toType reflect.Type) (interface{}, error) {
pResult := reflect.New(toType)
v := pResult.Interface()
err := json.Unmarshal([]byte(value), v)
if err != nil {
return nil, err
}
return pResult.Elem().Interface(), nil
}
func sliceParser(elemParser paramParser) paramParser {
return parserFunc(func(value string, toType reflect.Type) (interface{}, error) {
values := strings.Split(value, ",")
result := reflect.MakeSlice(toType, 0, len(values))
elemType := toType.Elem()
for _, v := range values {
parsedValue, err := elemParser.parse(v, elemType)
if err != nil {
return nil, err
}
result = reflect.Append(result, reflect.ValueOf(parsedValue))
}
return result.Interface(), nil
})
}
func ptrParser(elemParser paramParser) paramParser {
return parserFunc(func(value string, toType reflect.Type) (interface{}, error) {
parsedValue, err := elemParser.parse(value, toType.Elem())
if err != nil {
return nil, err
}
newValPtr := reflect.New(toType.Elem())
newVal := reflect.Indirect(newValPtr)
convertedVal, err := safeConvert(reflect.ValueOf(parsedValue), toType.Elem())
if err != nil {
return nil, err
}
newVal.Set(convertedVal)
return newValPtr.Interface(), nil
})
}

View File

@ -0,0 +1,84 @@
package param
import "testing"
import "reflect"
import "time"
type testDefinition struct {
strValue string
expectedValue interface{}
expectedParser paramParser
}
func Test_Parsers(t *testing.T) {
//ints
checkParser(testDefinition{"1", 1, intParser{}}, t)
checkParser(testDefinition{"-1", int64(-1), intParser{}}, t)
checkParser(testDefinition{"1", uint64(1), intParser{}}, t)
//floats
checkParser(testDefinition{"1.0", float32(1.0), floatParser{}}, t)
checkParser(testDefinition{"-1.0", float64(-1.0), floatParser{}}, t)
//strings
checkParser(testDefinition{"AB", "AB", stringParser{}}, t)
checkParser(testDefinition{"AB", []byte{65, 66}, stringParser{}}, t)
//bools
checkParser(testDefinition{"true", true, boolParser{}}, t)
checkParser(testDefinition{"0", false, boolParser{}}, t)
//timeParser
checkParser(testDefinition{"2017-05-30T13:54:53Z", time.Date(2017, 5, 30, 13, 54, 53, 0, time.UTC), timeParser{}}, t)
checkParser(testDefinition{"2017-05-30", time.Date(2017, 5, 30, 0, 0, 0, 0, time.UTC), timeParser{}}, t)
//json
checkParser(testDefinition{`{"X": 5, "Y":"Z"}`, struct {
X int
Y string
}{5, "Z"}, jsonParser{}}, t)
//slice in query is parsed as comma delimited
checkParser(testDefinition{`1,2`, []int{1, 2}, sliceParser(intParser{})}, t)
//slice in body is parsed as json
checkParser(testDefinition{`["a","b"]`, []string{"a", "b"}, jsonParser{}}, t, MethodParam{in: body})
//pointers
var someInt = 1
checkParser(testDefinition{`1`, &someInt, ptrParser(intParser{})}, t)
var someStruct = struct{ X int }{5}
checkParser(testDefinition{`{"X": 5}`, &someStruct, jsonParser{}}, t)
}
func checkParser(def testDefinition, t *testing.T, methodParam ...MethodParam) {
toType := reflect.TypeOf(def.expectedValue)
var mp MethodParam
if len(methodParam) == 0 {
mp = MethodParam{}
} else {
mp = methodParam[0]
}
parser := getParser(&mp, toType)
if reflect.TypeOf(parser) != reflect.TypeOf(def.expectedParser) {
t.Errorf("Invalid parser for value %v. Expected: %v, actual: %v", def.strValue, reflect.TypeOf(def.expectedParser).Name(), reflect.TypeOf(parser).Name())
return
}
result, err := parser.parse(def.strValue, toType)
if err != nil {
t.Errorf("Parsing error for value %v. Expected result: %v, error: %v", def.strValue, def.expectedValue, err)
return
}
convResult, err := safeConvert(reflect.ValueOf(result), toType)
if err != nil {
t.Errorf("Convertion error for %v. from value: %v, toType: %v, error: %v", def.strValue, result, toType, err)
return
}
if !reflect.DeepEqual(convResult.Interface(), def.expectedValue) {
t.Errorf("Parsing error for value %v. Expected result: %v, actual: %v", def.strValue, def.expectedValue, result)
}
}

12
context/renderer.go Normal file
View File

@ -0,0 +1,12 @@
package context
// Renderer defines an http response renderer
type Renderer interface {
Render(ctx *Context)
}
type rendererFunc func(ctx *Context)
func (f rendererFunc) Render(ctx *Context) {
f(ctx)
}

27
context/response.go Normal file
View File

@ -0,0 +1,27 @@
package context
import (
"strconv"
"net/http"
)
const (
//BadRequest indicates http error 400
BadRequest StatusCode = http.StatusBadRequest
//NotFound indicates http error 404
NotFound StatusCode = http.StatusNotFound
)
// StatusCode sets the http response status code
type StatusCode int
func (s StatusCode) Error() string {
return strconv.Itoa(int(s))
}
// Render sets the http status code
func (s StatusCode) Render(ctx *Context) {
ctx.Output.SetStatus(int(s))
}

View File

@ -28,6 +28,7 @@ import (
"strings" "strings"
"github.com/astaxie/beego/context" "github.com/astaxie/beego/context"
"github.com/astaxie/beego/context/param"
"github.com/astaxie/beego/session" "github.com/astaxie/beego/session"
) )
@ -51,8 +52,16 @@ type ControllerComments struct {
Router string Router string
AllowHTTPMethods []string AllowHTTPMethods []string
Params []map[string]string Params []map[string]string
MethodParams []*param.MethodParam
} }
// ControllerCommentsSlice implements the sort interface
type ControllerCommentsSlice []ControllerComments
func (p ControllerCommentsSlice) Len() int { return len(p) }
func (p ControllerCommentsSlice) Less(i, j int) bool { return p[i].Router < p[j].Router }
func (p ControllerCommentsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Controller defines some basic http request handler operations, such as // Controller defines some basic http request handler operations, such as
// http context, template and view, session and xsrf. // http context, template and view, session and xsrf.
type Controller struct { type Controller struct {
@ -314,7 +323,7 @@ func (c *Controller) ServeJSON(encoding ...bool) {
if BConfig.RunMode == PROD { if BConfig.RunMode == PROD {
hasIndent = false hasIndent = false
} }
if len(encoding) > 0 && encoding[0] == true { if len(encoding) > 0 && encoding[0] {
hasEncoding = true hasEncoding = true
} }
c.Ctx.Output.JSON(c.Data["json"], hasIndent, hasEncoding) c.Ctx.Output.JSON(c.Data["json"], hasIndent, hasEncoding)

View File

@ -172,10 +172,10 @@ func TestAdditionalViewPaths(t *testing.T) {
t.Fatal("TestAdditionalViewPaths expected error") t.Fatal("TestAdditionalViewPaths expected error")
} }
}() }()
ctrl.RenderString(); ctrl.RenderString()
}() }()
ctrl.TplName = "file2.tpl" ctrl.TplName = "file2.tpl"
ctrl.ViewPath = dir2 ctrl.ViewPath = dir2
ctrl.RenderString(); ctrl.RenderString()
} }

View File

@ -252,6 +252,30 @@ func forbidden(rw http.ResponseWriter, r *http.Request) {
) )
} }
// show 422 missing xsrf token
func missingxsrf(rw http.ResponseWriter, r *http.Request) {
responseError(rw, r,
422,
"<br>The page you have requested is forbidden."+
"<br>Perhaps you are here because:"+
"<br><br><ul>"+
"<br>'_xsrf' argument missing from POST"+
"</ul>",
)
}
// show 417 invalid xsrf token
func invalidxsrf(rw http.ResponseWriter, r *http.Request) {
responseError(rw, r,
417,
"<br>The page you have requested is forbidden."+
"<br>Perhaps you are here because:"+
"<br><br><ul>"+
"<br>expected XSRF not found"+
"</ul>",
)
}
// show 404 not found error. // show 404 not found error.
func notFound(rw http.ResponseWriter, r *http.Request) { func notFound(rw http.ResponseWriter, r *http.Request) {
responseError(rw, r, responseError(rw, r,

View File

@ -52,7 +52,7 @@ func TestErrorCode_01(t *testing.T) {
if w.Code != code { if w.Code != code {
t.Fail() t.Fail()
} }
if !strings.Contains(string(w.Body.Bytes()), http.StatusText(code)) { if !strings.Contains(w.Body.String(), http.StatusText(code)) {
t.Fail() t.Fail()
} }
} }
@ -82,7 +82,7 @@ func TestErrorCode_03(t *testing.T) {
if w.Code != 200 { if w.Code != 200 {
t.Fail() t.Fail()
} }
if string(w.Body.Bytes()) != parseCodeError { if w.Body.String() != parseCodeError {
t.Fail() t.Fail()
} }
} }

View File

@ -48,7 +48,7 @@ func TestFlashHeader(t *testing.T) {
// match for the expected header // match for the expected header
res := strings.Contains(sc, "BEEGO_FLASH=%00notice%23BEEGOFLASH%23TestFlashString%00") res := strings.Contains(sc, "BEEGO_FLASH=%00notice%23BEEGOFLASH%23TestFlashString%00")
// validate the assertion // validate the assertion
if res != true { if !res {
t.Errorf("TestFlashHeader() unable to validate flash message") t.Errorf("TestFlashHeader() unable to validate flash message")
} }
} }

View File

@ -3,14 +3,17 @@ package grace
import ( import (
"errors" "errors"
"net" "net"
"sync"
) )
type graceConn struct { type graceConn struct {
net.Conn net.Conn
server *Server server *Server
m sync.Mutex
closed bool
} }
func (c graceConn) Close() (err error) { func (c *graceConn) Close() (err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
switch x := r.(type) { switch x := r.(type) {
@ -23,6 +26,14 @@ func (c graceConn) Close() (err error) {
} }
} }
}() }()
c.m.Lock()
if c.closed {
c.m.Unlock()
return
}
c.server.wg.Done() c.server.wg.Done()
c.closed = true
c.m.Unlock()
return c.Conn.Close() return c.Conn.Close()
} }

View File

@ -21,7 +21,7 @@ func newGraceListener(l net.Listener, srv *Server) (el *graceListener) {
server: srv, server: srv,
} }
go func() { go func() {
_ = <-el.stop <-el.stop
el.stopped = true el.stopped = true
el.stop <- el.Listener.Close() el.stop <- el.Listener.Close()
}() }()
@ -37,7 +37,7 @@ func (gl *graceListener) Accept() (c net.Conn, err error) {
tc.SetKeepAlive(true) tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute) tc.SetKeepAlivePeriod(3 * time.Minute)
c = graceConn{ c = &graceConn{
Conn: tc, Conn: tc,
server: gl.server, server: gl.server,
} }

View File

@ -196,7 +196,6 @@ func (srv *Server) signalHooks(ppFlag int, sig os.Signal) {
for _, f := range srv.SignalHooks[ppFlag][sig] { for _, f := range srv.SignalHooks[ppFlag][sig] {
f() f()
} }
return
} }
// shutdown closes the listener so that no new connections are accepted. it also // shutdown closes the listener so that no new connections are accepted. it also
@ -292,7 +291,7 @@ func (srv *Server) fork() (err error) {
// RegisterSignalHook registers a function to be run PreSignal or PostSignal for a given signal. // RegisterSignalHook registers a function to be run PreSignal or PostSignal for a given signal.
func (srv *Server) RegisterSignalHook(ppFlag int, sig os.Signal, f func()) (err error) { func (srv *Server) RegisterSignalHook(ppFlag int, sig os.Signal, f func()) (err error) {
if ppFlag != PreSignal && ppFlag != PostSignal { if ppFlag != PreSignal && ppFlag != PostSignal {
err = fmt.Errorf("Invalid ppFlag argument. Must be either grace.PreSignal or grace.PostSignal.") err = fmt.Errorf("Invalid ppFlag argument. Must be either grace.PreSignal or grace.PostSignal")
return return
} }
for _, s := range hookableSignals { for _, s := range hookableSignals {
@ -301,6 +300,6 @@ func (srv *Server) RegisterSignalHook(ppFlag int, sig os.Signal, f func()) (err
return return
} }
} }
err = fmt.Errorf("Signal '%v' is not supported.", sig) err = fmt.Errorf("Signal '%v' is not supported", sig)
return return
} }

View File

@ -32,6 +32,8 @@ func registerDefaultErrorHandler() error {
"502": badGateway, "502": badGateway,
"503": serviceUnavailable, "503": serviceUnavailable,
"504": gatewayTimeout, "504": gatewayTimeout,
"417": invalidxsrf,
"422": missingxsrf,
} }
for e, h := range m { for e, h := range m {
if _, ok := ErrorMaps[e]; !ok { if _, ok := ErrorMaps[e]; !ok {
@ -55,9 +57,9 @@ func registerSession() error {
conf.ProviderConfig = filepath.ToSlash(BConfig.WebConfig.Session.SessionProviderConfig) conf.ProviderConfig = filepath.ToSlash(BConfig.WebConfig.Session.SessionProviderConfig)
conf.DisableHTTPOnly = BConfig.WebConfig.Session.SessionDisableHTTPOnly conf.DisableHTTPOnly = BConfig.WebConfig.Session.SessionDisableHTTPOnly
conf.Domain = BConfig.WebConfig.Session.SessionDomain conf.Domain = BConfig.WebConfig.Session.SessionDomain
conf.EnableSidInHttpHeader = BConfig.WebConfig.Session.SessionEnableSidInHTTPHeader conf.EnableSidInHTTPHeader = BConfig.WebConfig.Session.SessionEnableSidInHTTPHeader
conf.SessionNameInHttpHeader = BConfig.WebConfig.Session.SessionNameInHTTPHeader conf.SessionNameInHTTPHeader = BConfig.WebConfig.Session.SessionNameInHTTPHeader
conf.EnableSidInUrlQuery = BConfig.WebConfig.Session.SessionEnableSidInURLQuery conf.EnableSidInURLQuery = BConfig.WebConfig.Session.SessionEnableSidInURLQuery
} else { } else {
if err = json.Unmarshal([]byte(sessionConfig), conf); err != nil { if err = json.Unmarshal([]byte(sessionConfig), conf); err != nil {
return err return err

View File

@ -32,7 +32,7 @@ The default timeout is `60` seconds, function prototype:
SetTimeout(connectTimeout, readWriteTimeout time.Duration) SetTimeout(connectTimeout, readWriteTimeout time.Duration)
Exmaple: Example:
// GET // GET
httplib.Get("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) httplib.Get("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second)

View File

@ -335,7 +335,7 @@ func (b *BeegoHTTPRequest) JSONBody(obj interface{}) (*BeegoHTTPRequest, error)
func (b *BeegoHTTPRequest) buildURL(paramBody string) { func (b *BeegoHTTPRequest) buildURL(paramBody string) {
// build GET url with query string // build GET url with query string
if b.req.Method == "GET" && len(paramBody) > 0 { if b.req.Method == "GET" && len(paramBody) > 0 {
if strings.Index(b.url, "?") != -1 { if strings.Contains(b.url, "?") {
b.url += "&" + paramBody b.url += "&" + paramBody
} else { } else {
b.url = b.url + "?" + paramBody b.url = b.url + "?" + paramBody
@ -344,7 +344,7 @@ func (b *BeegoHTTPRequest) buildURL(paramBody string) {
} }
// build POST/PUT/PATCH url and body // build POST/PUT/PATCH url and body
if (b.req.Method == "POST" || b.req.Method == "PUT" || b.req.Method == "PATCH") && b.req.Body == nil { if (b.req.Method == "POST" || b.req.Method == "PUT" || b.req.Method == "PATCH" || b.req.Method == "DELETE") && b.req.Body == nil {
// with files // with files
if len(b.files) > 0 { if len(b.files) > 0 {
pr, pw := io.Pipe() pr, pw := io.Pipe()
@ -520,9 +520,9 @@ func (b *BeegoHTTPRequest) Bytes() ([]byte, error) {
return nil, err return nil, err
} }
b.body, err = ioutil.ReadAll(reader) b.body, err = ioutil.ReadAll(reader)
} else { return b.body, err
b.body, err = ioutil.ReadAll(resp.Body)
} }
b.body, err = ioutil.ReadAll(resp.Body)
return b.body, err return b.body, err
} }

View File

@ -102,6 +102,14 @@ func TestSimpleDelete(t *testing.T) {
t.Log(str) t.Log(str)
} }
func TestSimpleDeleteParam(t *testing.T) {
str, err := Delete("http://httpbin.org/delete").Param("key", "val").String()
if err != nil {
t.Fatal(err)
}
t.Log(str)
}
func TestWithCookie(t *testing.T) { func TestWithCookie(t *testing.T) {
v := "smallfish" v := "smallfish"
str, err := Get("http://httpbin.org/cookies/set?k1=" + v).SetEnableCookie(true).String() str, err := Get("http://httpbin.org/cookies/set?k1=" + v).SetEnableCookie(true).String()

View File

@ -2,19 +2,23 @@ package alils
import ( import (
"encoding/json" "encoding/json"
"github.com/astaxie/beego/logs"
"github.com/gogo/protobuf/proto"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/astaxie/beego/logs"
"github.com/gogo/protobuf/proto"
) )
const ( const (
// CacheSize set the flush size
CacheSize int = 64 CacheSize int = 64
// Delimiter define the topic delimiter
Delimiter string = "##" Delimiter string = "##"
) )
type AliLSConfig struct { // Config is the Config for Ali Log
type Config struct {
Project string `json:"project"` Project string `json:"project"`
Endpoint string `json:"endpoint"` Endpoint string `json:"endpoint"`
KeyID string `json:"key_id"` KeyID string `json:"key_id"`
@ -34,18 +38,17 @@ type aliLSWriter struct {
withMap bool withMap bool
groupMap map[string]*LogGroup groupMap map[string]*LogGroup
lock *sync.Mutex lock *sync.Mutex
AliLSConfig Config
} }
// 创建提供Logger接口的日志服务 // NewAliLS create a new Logger
func NewAliLS() logs.Logger { func NewAliLS() logs.Logger {
alils := new(aliLSWriter) alils := new(aliLSWriter)
alils.Level = logs.LevelTrace alils.Level = logs.LevelTrace
return alils return alils
} }
// 读取配置 // Init parse config and init struct
// 初始化必要的数据结构
func (c *aliLSWriter) Init(jsonConfig string) (err error) { func (c *aliLSWriter) Init(jsonConfig string) (err error) {
json.Unmarshal([]byte(jsonConfig), c) json.Unmarshal([]byte(jsonConfig), c)
@ -54,28 +57,26 @@ func (c *aliLSWriter) Init(jsonConfig string) (err error) {
c.FlushWhen = CacheSize c.FlushWhen = CacheSize
} }
// 初始化Project
prj := &LogProject{ prj := &LogProject{
Name: c.Project, Name: c.Project,
Endpoint: c.Endpoint, Endpoint: c.Endpoint,
AccessKeyId: c.KeyID, AccessKeyID: c.KeyID,
AccessKeySecret: c.KeySecret, AccessKeySecret: c.KeySecret,
} }
// 获取logstore
c.store, err = prj.GetLogStore(c.LogStore) c.store, err = prj.GetLogStore(c.LogStore)
if err != nil { if err != nil {
return err return err
} }
// 创建默认Log Group // Create default Log Group
c.group = append(c.group, &LogGroup{ c.group = append(c.group, &LogGroup{
Topic: proto.String(""), Topic: proto.String(""),
Source: proto.String(c.Source), Source: proto.String(c.Source),
Logs: make([]*Log, 0, c.FlushWhen), Logs: make([]*Log, 0, c.FlushWhen),
}) })
// 创建其它Log Group // Create other Log Group
c.groupMap = make(map[string]*LogGroup) c.groupMap = make(map[string]*LogGroup)
for _, topic := range c.Topics { for _, topic := range c.Topics {
@ -113,7 +114,7 @@ func (c *aliLSWriter) WriteMsg(when time.Time, msg string, level int) (err error
var lg *LogGroup var lg *LogGroup
if c.withMap { if c.withMap {
// 解析出Topic并匹配LogGroup // TopicLogGroup
strs := strings.SplitN(msg, Delimiter, 2) strs := strings.SplitN(msg, Delimiter, 2)
if len(strs) == 2 { if len(strs) == 2 {
pos := strings.LastIndex(strs[0], " ") pos := strings.LastIndex(strs[0], " ")
@ -122,27 +123,24 @@ func (c *aliLSWriter) WriteMsg(when time.Time, msg string, level int) (err error
lg = c.groupMap[topic] lg = c.groupMap[topic]
} }
// 默认发到空Topic // send to empty Topic
if lg == nil { if lg == nil {
topic = ""
content = msg content = msg
lg = c.group[0] lg = c.group[0]
} }
} else { } else {
topic = ""
content = msg content = msg
lg = c.group[0] lg = c.group[0]
} }
// 生成日志 c1 := &LogContent{
c1 := &Log_Content{
Key: proto.String("msg"), Key: proto.String("msg"),
Value: proto.String(content), Value: proto.String(content),
} }
l := &Log{ l := &Log{
Time: proto.Uint32(uint32(when.Unix())), // 填写日志时间 Time: proto.Uint32(uint32(when.Unix())),
Contents: []*Log_Content{ Contents: []*LogContent{
c1, c1,
}, },
} }
@ -151,7 +149,6 @@ func (c *aliLSWriter) WriteMsg(when time.Time, msg string, level int) (err error
lg.Logs = append(lg.Logs, l) lg.Logs = append(lg.Logs, l)
c.lock.Unlock() c.lock.Unlock()
// 满足条件则Flush
if len(lg.Logs) >= c.FlushWhen { if len(lg.Logs) >= c.FlushWhen {
c.flush(lg) c.flush(lg)
} }
@ -162,7 +159,7 @@ func (c *aliLSWriter) WriteMsg(when time.Time, msg string, level int) (err error
// Flush implementing method. empty. // Flush implementing method. empty.
func (c *aliLSWriter) Flush() { func (c *aliLSWriter) Flush() {
// flush所有group // flush all group
for _, lg := range c.group { for _, lg := range c.group {
c.flush(lg) c.flush(lg)
} }
@ -176,9 +173,6 @@ func (c *aliLSWriter) flush(lg *LogGroup) {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
// 把以上的LogGroup推送到SLS服务器
// SLS服务器会根据该logstore的shard个数自动进行负载均衡。
err := c.store.PutLogs(lg) err := c.store.PutLogs(lg)
if err != nil { if err != nil {
return return

View File

@ -1,30 +1,43 @@
package alils package alils
import "github.com/gogo/protobuf/proto" import (
import "fmt" "fmt"
import "math" "io"
"math"
// discarding unused import gogoproto "." "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" )
import "io"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal var _ = proto.Marshal
var _ = fmt.Errorf var _ = fmt.Errorf
var _ = math.Inf var _ = math.Inf
var (
// ErrInvalidLengthLog invalid proto
ErrInvalidLengthLog = fmt.Errorf("proto: negative length found during unmarshaling")
// ErrIntOverflowLog overflow
ErrIntOverflowLog = fmt.Errorf("proto: integer overflow")
)
// Log define the proto Log
type Log struct { type Log struct {
Time *uint32 `protobuf:"varint,1,req,name=Time" json:"Time,omitempty"` Time *uint32 `protobuf:"varint,1,req,name=Time" json:"Time,omitempty"`
Contents []*Log_Content `protobuf:"bytes,2,rep,name=Contents" json:"Contents,omitempty"` Contents []*LogContent `protobuf:"bytes,2,rep,name=Contents" json:"Contents,omitempty"`
XXX_unrecognized []byte `json:"-"` XXXUnrecognized []byte `json:"-"`
} }
// Reset the Log
func (m *Log) Reset() { *m = Log{} } func (m *Log) Reset() { *m = Log{} }
// String return the Compact Log
func (m *Log) String() string { return proto.CompactTextString(m) } func (m *Log) String() string { return proto.CompactTextString(m) }
// ProtoMessage not implemented
func (*Log) ProtoMessage() {} func (*Log) ProtoMessage() {}
// GetTime return the Log's Time
func (m *Log) GetTime() uint32 { func (m *Log) GetTime() uint32 {
if m != nil && m.Time != nil { if m != nil && m.Time != nil {
return *m.Time return *m.Time
@ -32,49 +45,65 @@ func (m *Log) GetTime() uint32 {
return 0 return 0
} }
func (m *Log) GetContents() []*Log_Content { // GetContents return the Log's Contents
func (m *Log) GetContents() []*LogContent {
if m != nil { if m != nil {
return m.Contents return m.Contents
} }
return nil return nil
} }
type Log_Content struct { // LogContent define the Log content struct
type LogContent struct {
Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"`
Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"` Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"`
XXX_unrecognized []byte `json:"-"` XXXUnrecognized []byte `json:"-"`
} }
func (m *Log_Content) Reset() { *m = Log_Content{} } // Reset LogContent
func (m *Log_Content) String() string { return proto.CompactTextString(m) } func (m *LogContent) Reset() { *m = LogContent{} }
func (*Log_Content) ProtoMessage() {}
func (m *Log_Content) GetKey() string { // String return the compact text
func (m *LogContent) String() string { return proto.CompactTextString(m) }
// ProtoMessage not implemented
func (*LogContent) ProtoMessage() {}
// GetKey return the Key
func (m *LogContent) GetKey() string {
if m != nil && m.Key != nil { if m != nil && m.Key != nil {
return *m.Key return *m.Key
} }
return "" return ""
} }
func (m *Log_Content) GetValue() string { // GetValue return the Value
func (m *LogContent) GetValue() string {
if m != nil && m.Value != nil { if m != nil && m.Value != nil {
return *m.Value return *m.Value
} }
return "" return ""
} }
// LogGroup define the logs struct
type LogGroup struct { type LogGroup struct {
Logs []*Log `protobuf:"bytes,1,rep,name=Logs" json:"Logs,omitempty"` Logs []*Log `protobuf:"bytes,1,rep,name=Logs" json:"Logs,omitempty"`
Reserved *string `protobuf:"bytes,2,opt,name=Reserved" json:"Reserved,omitempty"` Reserved *string `protobuf:"bytes,2,opt,name=Reserved" json:"Reserved,omitempty"`
Topic *string `protobuf:"bytes,3,opt,name=Topic" json:"Topic,omitempty"` Topic *string `protobuf:"bytes,3,opt,name=Topic" json:"Topic,omitempty"`
Source *string `protobuf:"bytes,4,opt,name=Source" json:"Source,omitempty"` Source *string `protobuf:"bytes,4,opt,name=Source" json:"Source,omitempty"`
XXX_unrecognized []byte `json:"-"` XXXUnrecognized []byte `json:"-"`
} }
// Reset LogGroup
func (m *LogGroup) Reset() { *m = LogGroup{} } func (m *LogGroup) Reset() { *m = LogGroup{} }
// String return the compact text
func (m *LogGroup) String() string { return proto.CompactTextString(m) } func (m *LogGroup) String() string { return proto.CompactTextString(m) }
// ProtoMessage not implemented
func (*LogGroup) ProtoMessage() {} func (*LogGroup) ProtoMessage() {}
// GetLogs return the loggroup logs
func (m *LogGroup) GetLogs() []*Log { func (m *LogGroup) GetLogs() []*Log {
if m != nil { if m != nil {
return m.Logs return m.Logs
@ -82,6 +111,7 @@ func (m *LogGroup) GetLogs() []*Log {
return nil return nil
} }
// GetReserved return Reserved
func (m *LogGroup) GetReserved() string { func (m *LogGroup) GetReserved() string {
if m != nil && m.Reserved != nil { if m != nil && m.Reserved != nil {
return *m.Reserved return *m.Reserved
@ -89,6 +119,7 @@ func (m *LogGroup) GetReserved() string {
return "" return ""
} }
// GetTopic return Topic
func (m *LogGroup) GetTopic() string { func (m *LogGroup) GetTopic() string {
if m != nil && m.Topic != nil { if m != nil && m.Topic != nil {
return *m.Topic return *m.Topic
@ -96,6 +127,7 @@ func (m *LogGroup) GetTopic() string {
return "" return ""
} }
// GetSource return Source
func (m *LogGroup) GetSource() string { func (m *LogGroup) GetSource() string {
if m != nil && m.Source != nil { if m != nil && m.Source != nil {
return *m.Source return *m.Source
@ -103,15 +135,22 @@ func (m *LogGroup) GetSource() string {
return "" return ""
} }
// LogGroupList define the LogGroups
type LogGroupList struct { type LogGroupList struct {
LogGroups []*LogGroup `protobuf:"bytes,1,rep,name=logGroups" json:"logGroups,omitempty"` LogGroups []*LogGroup `protobuf:"bytes,1,rep,name=logGroups" json:"logGroups,omitempty"`
XXX_unrecognized []byte `json:"-"` XXXUnrecognized []byte `json:"-"`
} }
// Reset LogGroupList
func (m *LogGroupList) Reset() { *m = LogGroupList{} } func (m *LogGroupList) Reset() { *m = LogGroupList{} }
// String return compact text
func (m *LogGroupList) String() string { return proto.CompactTextString(m) } func (m *LogGroupList) String() string { return proto.CompactTextString(m) }
// ProtoMessage not implemented
func (*LogGroupList) ProtoMessage() {} func (*LogGroupList) ProtoMessage() {}
// GetLogGroups return the LogGroups
func (m *LogGroupList) GetLogGroups() []*LogGroup { func (m *LogGroupList) GetLogGroups() []*LogGroup {
if m != nil { if m != nil {
return m.LogGroups return m.LogGroups
@ -119,6 +158,7 @@ func (m *LogGroupList) GetLogGroups() []*LogGroup {
return nil return nil
} }
// Marshal the logs to byte slice
func (m *Log) Marshal() (data []byte, err error) { func (m *Log) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@ -129,6 +169,7 @@ func (m *Log) Marshal() (data []byte, err error) {
return data[:n], nil return data[:n], nil
} }
// MarshalTo data
func (m *Log) MarshalTo(data []byte) (int, error) { func (m *Log) MarshalTo(data []byte) (int, error) {
var i int var i int
_ = i _ = i
@ -136,11 +177,10 @@ func (m *Log) MarshalTo(data []byte) (int, error) {
_ = l _ = l
if m.Time == nil { if m.Time == nil {
return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Time") return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Time")
} else { }
data[i] = 0x8 data[i] = 0x8
i++ i++
i = encodeVarintLog(data, i, uint64(*m.Time)) i = encodeVarintLog(data, i, uint64(*m.Time))
}
if len(m.Contents) > 0 { if len(m.Contents) > 0 {
for _, msg := range m.Contents { for _, msg := range m.Contents {
data[i] = 0x12 data[i] = 0x12
@ -153,13 +193,14 @@ func (m *Log) MarshalTo(data []byte) (int, error) {
i += n i += n
} }
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized) i += copy(data[i:], m.XXXUnrecognized)
} }
return i, nil return i, nil
} }
func (m *Log_Content) Marshal() (data []byte, err error) { // Marshal LogContent
func (m *LogContent) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
n, err := m.MarshalTo(data) n, err := m.MarshalTo(data)
@ -169,33 +210,34 @@ func (m *Log_Content) Marshal() (data []byte, err error) {
return data[:n], nil return data[:n], nil
} }
func (m *Log_Content) MarshalTo(data []byte) (int, error) { // MarshalTo logcontent to data
func (m *LogContent) MarshalTo(data []byte) (int, error) {
var i int var i int
_ = i _ = i
var l int var l int
_ = l _ = l
if m.Key == nil { if m.Key == nil {
return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Key") return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Key")
} else { }
data[i] = 0xa data[i] = 0xa
i++ i++
i = encodeVarintLog(data, i, uint64(len(*m.Key))) i = encodeVarintLog(data, i, uint64(len(*m.Key)))
i += copy(data[i:], *m.Key) i += copy(data[i:], *m.Key)
}
if m.Value == nil { if m.Value == nil {
return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Value") return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("Value")
} else { }
data[i] = 0x12 data[i] = 0x12
i++ i++
i = encodeVarintLog(data, i, uint64(len(*m.Value))) i = encodeVarintLog(data, i, uint64(len(*m.Value)))
i += copy(data[i:], *m.Value) i += copy(data[i:], *m.Value)
} if m.XXXUnrecognized != nil {
if m.XXX_unrecognized != nil { i += copy(data[i:], m.XXXUnrecognized)
i += copy(data[i:], m.XXX_unrecognized)
} }
return i, nil return i, nil
} }
// Marshal LogGroup
func (m *LogGroup) Marshal() (data []byte, err error) { func (m *LogGroup) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@ -206,6 +248,7 @@ func (m *LogGroup) Marshal() (data []byte, err error) {
return data[:n], nil return data[:n], nil
} }
// MarshalTo LogGroup to data
func (m *LogGroup) MarshalTo(data []byte) (int, error) { func (m *LogGroup) MarshalTo(data []byte) (int, error) {
var i int var i int
_ = i _ = i
@ -241,12 +284,13 @@ func (m *LogGroup) MarshalTo(data []byte) (int, error) {
i = encodeVarintLog(data, i, uint64(len(*m.Source))) i = encodeVarintLog(data, i, uint64(len(*m.Source)))
i += copy(data[i:], *m.Source) i += copy(data[i:], *m.Source)
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized) i += copy(data[i:], m.XXXUnrecognized)
} }
return i, nil return i, nil
} }
// Marshal LogGroupList
func (m *LogGroupList) Marshal() (data []byte, err error) { func (m *LogGroupList) Marshal() (data []byte, err error) {
size := m.Size() size := m.Size()
data = make([]byte, size) data = make([]byte, size)
@ -257,6 +301,7 @@ func (m *LogGroupList) Marshal() (data []byte, err error) {
return data[:n], nil return data[:n], nil
} }
// MarshalTo LogGroupList to data
func (m *LogGroupList) MarshalTo(data []byte) (int, error) { func (m *LogGroupList) MarshalTo(data []byte) (int, error) {
var i int var i int
_ = i _ = i
@ -274,8 +319,8 @@ func (m *LogGroupList) MarshalTo(data []byte) (int, error) {
i += n i += n
} }
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
i += copy(data[i:], m.XXX_unrecognized) i += copy(data[i:], m.XXXUnrecognized)
} }
return i, nil return i, nil
} }
@ -307,6 +352,8 @@ func encodeVarintLog(data []byte, offset int, v uint64) int {
data[offset] = uint8(v) data[offset] = uint8(v)
return offset + 1 return offset + 1
} }
// Size return the log's size
func (m *Log) Size() (n int) { func (m *Log) Size() (n int) {
var l int var l int
_ = l _ = l
@ -319,13 +366,14 @@ func (m *Log) Size() (n int) {
n += 1 + l + sovLog(uint64(l)) n += 1 + l + sovLog(uint64(l))
} }
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXXUnrecognized)
} }
return n return n
} }
func (m *Log_Content) Size() (n int) { // Size return LogContent size based on Key and Value
func (m *LogContent) Size() (n int) {
var l int var l int
_ = l _ = l
if m.Key != nil { if m.Key != nil {
@ -336,12 +384,13 @@ func (m *Log_Content) Size() (n int) {
l = len(*m.Value) l = len(*m.Value)
n += 1 + l + sovLog(uint64(l)) n += 1 + l + sovLog(uint64(l))
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXXUnrecognized)
} }
return n return n
} }
// Size return LogGroup size based on Logs
func (m *LogGroup) Size() (n int) { func (m *LogGroup) Size() (n int) {
var l int var l int
_ = l _ = l
@ -363,12 +412,13 @@ func (m *LogGroup) Size() (n int) {
l = len(*m.Source) l = len(*m.Source)
n += 1 + l + sovLog(uint64(l)) n += 1 + l + sovLog(uint64(l))
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXXUnrecognized)
} }
return n return n
} }
// Size return LogGroupList size
func (m *LogGroupList) Size() (n int) { func (m *LogGroupList) Size() (n int) {
var l int var l int
_ = l _ = l
@ -378,8 +428,8 @@ func (m *LogGroupList) Size() (n int) {
n += 1 + l + sovLog(uint64(l)) n += 1 + l + sovLog(uint64(l))
} }
} }
if m.XXX_unrecognized != nil { if m.XXXUnrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXXUnrecognized)
} }
return n return n
} }
@ -395,8 +445,10 @@ func sovLog(x uint64) (n int) {
return n return n
} }
func sozLog(x uint64) (n int) { func sozLog(x uint64) (n int) {
return sovLog(uint64((x << 1) ^ uint64((int64(x) >> 63)))) return sovLog((x << 1) ^ (x >> 63))
} }
// Unmarshal data to log
func (m *Log) Unmarshal(data []byte) error { func (m *Log) Unmarshal(data []byte) error {
var hasFields [1]uint64 var hasFields [1]uint64
l := len(data) l := len(data)
@ -474,7 +526,7 @@ func (m *Log) Unmarshal(data []byte) error {
if postIndex > l { if postIndex > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.Contents = append(m.Contents, &Log_Content{}) m.Contents = append(m.Contents, &LogContent{})
if err := m.Contents[len(m.Contents)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { if err := m.Contents[len(m.Contents)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err return err
} }
@ -491,7 +543,7 @@ func (m *Log) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l { if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy iNdEx += skippy
} }
} }
@ -504,7 +556,9 @@ func (m *Log) Unmarshal(data []byte) error {
} }
return nil return nil
} }
func (m *Log_Content) Unmarshal(data []byte) error {
// Unmarshal data to LogContent
func (m *LogContent) Unmarshal(data []byte) error {
var hasFields [1]uint64 var hasFields [1]uint64
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
@ -608,7 +662,7 @@ func (m *Log_Content) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l { if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy iNdEx += skippy
} }
} }
@ -624,6 +678,8 @@ func (m *Log_Content) Unmarshal(data []byte) error {
} }
return nil return nil
} }
// Unmarshal data to LogGroup
func (m *LogGroup) Unmarshal(data []byte) error { func (m *LogGroup) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
@ -786,7 +842,7 @@ func (m *LogGroup) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l { if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy iNdEx += skippy
} }
} }
@ -796,6 +852,8 @@ func (m *LogGroup) Unmarshal(data []byte) error {
} }
return nil return nil
} }
// Unmarshal data to LogGroupList
func (m *LogGroupList) Unmarshal(data []byte) error { func (m *LogGroupList) Unmarshal(data []byte) error {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
@ -868,7 +926,7 @@ func (m *LogGroupList) Unmarshal(data []byte) error {
if (iNdEx + skippy) > l { if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) m.XXXUnrecognized = append(m.XXXUnrecognized, data[iNdEx:iNdEx+skippy]...)
iNdEx += skippy iNdEx += skippy
} }
} }
@ -878,6 +936,7 @@ func (m *LogGroupList) Unmarshal(data []byte) error {
} }
return nil return nil
} }
func skipLog(data []byte) (n int, err error) { func skipLog(data []byte) (n int, err error) {
l := len(data) l := len(data)
iNdEx := 0 iNdEx := 0
@ -940,7 +999,7 @@ func skipLog(data []byte) (n int, err error) {
case 3: case 3:
for { for {
var innerWire uint64 var innerWire uint64
var start int = iNdEx var start = iNdEx
for shift := uint(0); ; shift += 7 { for shift := uint(0); ; shift += 7 {
if shift >= 64 { if shift >= 64 {
return 0, ErrIntOverflowLog return 0, ErrIntOverflowLog
@ -977,8 +1036,3 @@ func skipLog(data []byte) (n int, err error) {
} }
panic("unreachable") panic("unreachable")
} }
var (
ErrInvalidLengthLog = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowLog = fmt.Errorf("proto: integer overflow")
)

View File

@ -1,5 +1,6 @@
package alils package alils
// InputDetail define log detail
type InputDetail struct { type InputDetail struct {
LogType string `json:"logType"` LogType string `json:"logType"`
LogPath string `json:"logPath"` LogPath string `json:"logPath"`
@ -14,11 +15,13 @@ type InputDetail struct {
TopicFormat string `json:"topicFormat"` TopicFormat string `json:"topicFormat"`
} }
// OutputDetail define the output detail
type OutputDetail struct { type OutputDetail struct {
Endpoint string `json:"endpoint"` Endpoint string `json:"endpoint"`
LogStoreName string `json:"logstoreName"` LogStoreName string `json:"logstoreName"`
} }
// LogConfig define Log Config
type LogConfig struct { type LogConfig struct {
Name string `json:"configName"` Name string `json:"configName"`
InputType string `json:"inputType"` InputType string `json:"inputType"`

View File

@ -1,5 +1,5 @@
/* /*
Package sls implements the SDK(v0.5.0) of Simple Log Service(abbr. SLS). Package alils implements the SDK(v0.5.0) of Simple Log Service(abbr. SLS).
For more description about SLS, please read this article: For more description about SLS, please read this article:
http://gitlab.alibaba-inc.com/sls/doc. http://gitlab.alibaba-inc.com/sls/doc.
@ -20,19 +20,20 @@ type errorMessage struct {
Message string `json:"errorMessage"` Message string `json:"errorMessage"`
} }
// LogProject Define the Ali Project detail
type LogProject struct { type LogProject struct {
Name string // Project name Name string // Project name
Endpoint string // IP or hostname of SLS endpoint Endpoint string // IP or hostname of SLS endpoint
AccessKeyId string AccessKeyID string
AccessKeySecret string AccessKeySecret string
} }
// NewLogProject creates a new SLS project. // NewLogProject creates a new SLS project.
func NewLogProject(name, endpoint, accessKeyId, accessKeySecret string) (p *LogProject, err error) { func NewLogProject(name, endpoint, AccessKeyID, accessKeySecret string) (p *LogProject, err error) {
p = &LogProject{ p = &LogProject{
Name: name, Name: name,
Endpoint: endpoint, Endpoint: endpoint,
AccessKeyId: accessKeyId, AccessKeyID: AccessKeyID,
AccessKeySecret: accessKeySecret, AccessKeySecret: accessKeySecret,
} }
return p, nil return p, nil

View File

@ -12,6 +12,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
) )
// LogStore Store the logs
type LogStore struct { type LogStore struct {
Name string `json:"logstoreName"` Name string `json:"logstoreName"`
TTL int TTL int
@ -23,6 +24,7 @@ type LogStore struct {
project *LogProject project *LogProject
} }
// Shard define the Log Shard
type Shard struct { type Shard struct {
ShardID int `json:"shardID"` ShardID int `json:"shardID"`
} }
@ -116,16 +118,16 @@ func (s *LogStore) PutLogs(lg *LogGroup) (err error) {
return return
} }
// GetCursor gets log cursor of one shard specified by shardId. // GetCursor gets log cursor of one shard specified by shardID.
// The from can be in three form: a) unix timestamp in seccond, b) "begin", c) "end". // The from can be in three form: a) unix timestamp in seccond, b) "begin", c) "end".
// For more detail please read: http://gitlab.alibaba-inc.com/sls/doc/blob/master/api/shard.md#logstore // For more detail please read: http://gitlab.alibaba-inc.com/sls/doc/blob/master/api/shard.md#logstore
func (s *LogStore) GetCursor(shardId int, from string) (cursor string, err error) { func (s *LogStore) GetCursor(shardID int, from string) (cursor string, err error) {
h := map[string]string{ h := map[string]string{
"x-sls-bodyrawsize": "0", "x-sls-bodyrawsize": "0",
} }
uri := fmt.Sprintf("/logstores/%v/shards/%v?type=cursor&from=%v", uri := fmt.Sprintf("/logstores/%v/shards/%v?type=cursor&from=%v",
s.Name, shardId, from) s.Name, shardID, from)
r, err := request(s.project, "GET", uri, h, nil) r, err := request(s.project, "GET", uri, h, nil)
if err != nil { if err != nil {
@ -163,10 +165,10 @@ func (s *LogStore) GetCursor(shardId int, from string) (cursor string, err error
return return
} }
// GetLogsBytes gets logs binary data from shard specified by shardId according cursor. // GetLogsBytes gets logs binary data from shard specified by shardID according cursor.
// The logGroupMaxCount is the max number of logGroup could be returned. // The logGroupMaxCount is the max number of logGroup could be returned.
// The nextCursor is the next curosr can be used to read logs at next time. // The nextCursor is the next curosr can be used to read logs at next time.
func (s *LogStore) GetLogsBytes(shardId int, cursor string, func (s *LogStore) GetLogsBytes(shardID int, cursor string,
logGroupMaxCount int) (out []byte, nextCursor string, err error) { logGroupMaxCount int) (out []byte, nextCursor string, err error) {
h := map[string]string{ h := map[string]string{
@ -176,7 +178,7 @@ func (s *LogStore) GetLogsBytes(shardId int, cursor string,
} }
uri := fmt.Sprintf("/logstores/%v/shards/%v?type=logs&cursor=%v&count=%v", uri := fmt.Sprintf("/logstores/%v/shards/%v?type=logs&cursor=%v&count=%v",
s.Name, shardId, cursor, logGroupMaxCount) s.Name, shardID, cursor, logGroupMaxCount)
r, err := request(s.project, "GET", uri, h, nil) r, err := request(s.project, "GET", uri, h, nil)
if err != nil { if err != nil {
@ -249,13 +251,13 @@ func LogsBytesDecode(data []byte) (gl *LogGroupList, err error) {
return return
} }
// GetLogs gets logs from shard specified by shardId according cursor. // GetLogs gets logs from shard specified by shardID according cursor.
// The logGroupMaxCount is the max number of logGroup could be returned. // The logGroupMaxCount is the max number of logGroup could be returned.
// The nextCursor is the next curosr can be used to read logs at next time. // The nextCursor is the next curosr can be used to read logs at next time.
func (s *LogStore) GetLogs(shardId int, cursor string, func (s *LogStore) GetLogs(shardID int, cursor string,
logGroupMaxCount int) (gl *LogGroupList, nextCursor string, err error) { logGroupMaxCount int) (gl *LogGroupList, nextCursor string, err error) {
out, nextCursor, err := s.GetLogsBytes(shardId, cursor, logGroupMaxCount) out, nextCursor, err := s.GetLogsBytes(shardID, cursor, logGroupMaxCount)
if err != nil { if err != nil {
return return
} }

View File

@ -8,18 +8,20 @@ import (
"net/http/httputil" "net/http/httputil"
) )
type MachinGroupAttribute struct { // MachineGroupAttribute define the Attribute
type MachineGroupAttribute struct {
ExternalName string `json:"externalName"` ExternalName string `json:"externalName"`
TopicName string `json:"groupTopic"` TopicName string `json:"groupTopic"`
} }
// MachineGroup define the machine Group
type MachineGroup struct { type MachineGroup struct {
Name string `json:"groupName"` Name string `json:"groupName"`
Type string `json:"groupType"` Type string `json:"groupType"`
MachineIdType string `json:"machineIdentifyType"` MachineIDType string `json:"machineIdentifyType"`
MachineIdList []string `json:"machineList"` MachineIDList []string `json:"machineList"`
Attribute MachinGroupAttribute `json:"groupAttribute"` Attribute MachineGroupAttribute `json:"groupAttribute"`
CreateTime uint32 CreateTime uint32
LastModifyTime uint32 LastModifyTime uint32
@ -27,12 +29,14 @@ type MachineGroup struct {
project *LogProject project *LogProject
} }
// Machine define the Machine
type Machine struct { type Machine struct {
IP string IP string
UniqueId string `json:"machine-uniqueid"` UniqueID string `json:"machine-uniqueid"`
UserdefinedId string `json:"userdefined-id"` UserdefinedID string `json:"userdefined-id"`
} }
// MachineList define the Machine List
type MachineList struct { type MachineList struct {
Total int Total int
Machines []*Machine Machines []*Machine

View File

@ -33,12 +33,12 @@ func request(project *LogProject, method, uri string, headers map[string]string,
} }
// Calc Authorization // Calc Authorization
// Authorization = "SLS <AccessKeyId>:<Signature>" // Authorization = "SLS <AccessKeyID>:<Signature>"
digest, err := signature(project, method, uri, headers) digest, err := signature(project, method, uri, headers)
if err != nil { if err != nil {
return return
} }
auth := fmt.Sprintf("SLS %v:%v", project.AccessKeyId, digest) auth := fmt.Sprintf("SLS %v:%v", project.AccessKeyID, digest)
headers["Authorization"] = auth headers["Authorization"] = auth
// Initialize http request // Initialize http request

View File

@ -76,7 +76,7 @@ func signature(project *LogProject, method, uri string,
var keys sort.StringSlice var keys sort.StringSlice
vals := u.Query() vals := u.Query()
for k, _ := range vals { for k := range vals {
keys = append(keys, k) keys = append(keys, k)
} }
@ -109,4 +109,3 @@ func signature(project *LogProject, method, uri string,
digest = base64.StdEncoding.EncodeToString(mac.Sum(nil)) digest = base64.StdEncoding.EncodeToString(mac.Sum(nil))
return return
} }

View File

@ -361,7 +361,7 @@ func isParameterChar(b byte) bool {
} }
func (cw *ansiColorWriter) Write(p []byte) (int, error) { func (cw *ansiColorWriter) Write(p []byte) (int, error) {
r, nw, first, last := 0, 0, 0, 0 var r, nw, first, last int
if cw.mode != DiscardNonColorEscSeq { if cw.mode != DiscardNonColorEscSeq {
cw.state = outsideCsiCode cw.state = outsideCsiCode
cw.resetBuffer() cw.resetBuffer()

View File

@ -41,7 +41,7 @@ var colors = []brush{
newBrush("1;33"), // Warning yellow newBrush("1;33"), // Warning yellow
newBrush("1;32"), // Notice green newBrush("1;32"), // Notice green
newBrush("1;34"), // Informational blue newBrush("1;34"), // Informational blue
newBrush("1;34"), // Debug blue newBrush("1;44"), // Debug Background blue
} }
// consoleWriter implements LoggerInterface and writes messages to terminal. // consoleWriter implements LoggerInterface and writes messages to terminal.

View File

@ -56,6 +56,8 @@ type fileLogWriter struct {
Perm string `json:"perm"` Perm string `json:"perm"`
RotatePerm string `json:"rotateperm"`
fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix
} }
@ -65,6 +67,7 @@ func newFileWriter() Logger {
Daily: true, Daily: true,
MaxDays: 7, MaxDays: 7,
Rotate: true, Rotate: true,
RotatePerm: "0440",
Level: LevelTrace, Level: LevelTrace,
Perm: "0660", Perm: "0660",
} }
@ -170,7 +173,7 @@ func (w *fileLogWriter) initFd() error {
fd := w.fileWriter fd := w.fileWriter
fInfo, err := fd.Stat() fInfo, err := fd.Stat()
if err != nil { if err != nil {
return fmt.Errorf("get stat err: %s\n", err) return fmt.Errorf("get stat err: %s", err)
} }
w.maxSizeCurSize = int(fInfo.Size()) w.maxSizeCurSize = int(fInfo.Size())
w.dailyOpenTime = time.Now() w.dailyOpenTime = time.Now()
@ -193,8 +196,7 @@ func (w *fileLogWriter) dailyRotate(openTime time.Time) {
y, m, d := openTime.Add(24 * time.Hour).Date() y, m, d := openTime.Add(24 * time.Hour).Date()
nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location()) nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location())
tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100)) tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100))
select { <-tm.C
case <-tm.C:
w.Lock() w.Lock()
if w.needRotate(0, time.Now().Day()) { if w.needRotate(0, time.Now().Day()) {
if err := w.doRotate(time.Now()); err != nil { if err := w.doRotate(time.Now()); err != nil {
@ -203,7 +205,6 @@ func (w *fileLogWriter) dailyRotate(openTime time.Time) {
} }
w.Unlock() w.Unlock()
} }
}
func (w *fileLogWriter) lines() (int, error) { func (w *fileLogWriter) lines() (int, error) {
fd, err := os.Open(w.Filename) fd, err := os.Open(w.Filename)
@ -239,8 +240,12 @@ func (w *fileLogWriter) doRotate(logTime time.Time) error {
// Find the next available number // Find the next available number
num := 1 num := 1
fName := "" fName := ""
rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64)
if err != nil {
return err
}
_, err := os.Lstat(w.Filename) _, err = os.Lstat(w.Filename)
if err != nil { if err != nil {
//even if the file is not exist or other ,we should RESTART the logger //even if the file is not exist or other ,we should RESTART the logger
goto RESTART_LOGGER goto RESTART_LOGGER
@ -261,7 +266,7 @@ func (w *fileLogWriter) doRotate(logTime time.Time) error {
} }
// return error if the last file checked still existed // return error if the last file checked still existed
if err == nil { if err == nil {
return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.Filename) return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename)
} }
// close fileWriter before rename // close fileWriter before rename
@ -270,21 +275,24 @@ func (w *fileLogWriter) doRotate(logTime time.Time) error {
// Rename the file to its new found name // Rename the file to its new found name
// even if occurs error,we MUST guarantee to restart new logger // even if occurs error,we MUST guarantee to restart new logger
err = os.Rename(w.Filename, fName) err = os.Rename(w.Filename, fName)
err = os.Chmod(fName, os.FileMode(440)) if err != nil {
// re-start logger goto RESTART_LOGGER
}
err = os.Chmod(fName, os.FileMode(rotatePerm))
RESTART_LOGGER: RESTART_LOGGER:
startLoggerErr := w.startLogger() startLoggerErr := w.startLogger()
go w.deleteOldLog() go w.deleteOldLog()
if startLoggerErr != nil { if startLoggerErr != nil {
return fmt.Errorf("Rotate StartLogger: %s\n", startLoggerErr) return fmt.Errorf("Rotate StartLogger: %s", startLoggerErr)
} }
if err != nil { if err != nil {
return fmt.Errorf("Rotate: %s\n", err) return fmt.Errorf("Rotate: %s", err)
} }
return nil return nil
} }
func (w *fileLogWriter) deleteOldLog() { func (w *fileLogWriter) deleteOldLog() {

View File

@ -162,7 +162,27 @@ func TestFileRotate_05(t *testing.T) {
testFileDailyRotate(t, fn1, fn2) testFileDailyRotate(t, fn1, fn2)
os.Remove(fn) os.Remove(fn)
} }
func TestFileRotate_06(t *testing.T) { //test file mode
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`)
log.Debug("debug")
log.Info("info")
log.Notice("notice")
log.Warning("warning")
log.Error("error")
log.Alert("alert")
log.Critical("critical")
log.Emergency("emergency")
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1) + ".log"
s, _ := os.Lstat(rotateName)
if s.Mode() != 0440 {
os.Remove(rotateName)
os.Remove("test3.log")
t.Fatal("rotate file mode error")
}
os.Remove(rotateName)
os.Remove("test3.log")
}
func testFileRotate(t *testing.T, fn1, fn2 string) { func testFileRotate(t *testing.T, fn1, fn2 string) {
fw := &fileLogWriter{ fw := &fileLogWriter{
Daily: true, Daily: true,
@ -170,6 +190,7 @@ func testFileRotate(t *testing.T, fn1, fn2 string) {
Rotate: true, Rotate: true,
Level: LevelTrace, Level: LevelTrace,
Perm: "0660", Perm: "0660",
RotatePerm: "0440",
} }
fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1)) fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1))
fw.dailyOpenTime = time.Now().Add(-24 * time.Hour) fw.dailyOpenTime = time.Now().Add(-24 * time.Hour)
@ -193,6 +214,7 @@ func testFileDailyRotate(t *testing.T, fn1, fn2 string) {
Rotate: true, Rotate: true,
Level: LevelTrace, Level: LevelTrace,
Perm: "0660", Perm: "0660",
RotatePerm: "0440",
} }
fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1)) fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1))
fw.dailyOpenTime = time.Now().Add(-24 * time.Hour) fw.dailyOpenTime = time.Now().Add(-24 * time.Hour)

View File

@ -25,11 +25,7 @@ func newJLWriter() Logger {
// Init JLWriter with json config string // Init JLWriter with json config string
func (s *JLWriter) Init(jsonconfig string) error { func (s *JLWriter) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), s) return json.Unmarshal([]byte(jsonconfig), s)
if err != nil {
return err
}
return nil
} }
// WriteMsg write message in smtp writer. // WriteMsg write message in smtp writer.
@ -65,12 +61,10 @@ func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error {
// Flush implementing method. empty. // Flush implementing method. empty.
func (s *JLWriter) Flush() { func (s *JLWriter) Flush() {
return
} }
// Destroy implementing method. empty. // Destroy implementing method. empty.
func (s *JLWriter) Destroy() { func (s *JLWriter) Destroy() {
return
} }
func init() { func init() {

View File

@ -275,7 +275,7 @@ func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error
line = 0 line = 0
} }
_, filename := path.Split(file) _, filename := path.Split(file)
msg = "[" + filename + ":" + strconv.FormatInt(int64(line), 10) + "] " + msg msg = "[" + filename + ":" + strconv.Itoa(line) + "] " + msg
} }
//set level info in front of filename info //set level info in front of filename info
@ -492,9 +492,9 @@ func (bl *BeeLogger) flush() {
} }
// beeLogger references the used application logger. // beeLogger references the used application logger.
var beeLogger *BeeLogger = NewLogger() var beeLogger = NewLogger()
// GetLogger returns the default BeeLogger // GetBeeLogger returns the default BeeLogger
func GetBeeLogger() *BeeLogger { func GetBeeLogger() *BeeLogger {
return beeLogger return beeLogger
} }
@ -534,6 +534,7 @@ func Reset() {
beeLogger.Reset() beeLogger.Reset()
} }
// Async set the beelogger with Async mode and hold msglen messages
func Async(msgLen ...int64) *BeeLogger { func Async(msgLen ...int64) *BeeLogger {
return beeLogger.Async(msgLen...) return beeLogger.Async(msgLen...)
} }
@ -561,11 +562,7 @@ func SetLogFuncCallDepth(d int) {
// SetLogger sets a new logger. // SetLogger sets a new logger.
func SetLogger(adapter string, config ...string) error { func SetLogger(adapter string, config ...string) error {
err := beeLogger.SetLogger(adapter, config...) return beeLogger.SetLogger(adapter, config...)
if err != nil {
return err
}
return nil
} }
// Emergency logs a message at emergency level. // Emergency logs a message at emergency level.

View File

@ -139,6 +139,11 @@ var (
reset = string([]byte{27, 91, 48, 109}) reset = string([]byte{27, 91, 48, 109})
) )
// ColorByStatus return color by http code
// 2xx return Green
// 3xx return White
// 4xx return Yellow
// 5xx return Red
func ColorByStatus(cond bool, code int) string { func ColorByStatus(cond bool, code int) string {
switch { switch {
case code >= 200 && code < 300: case code >= 200 && code < 300:
@ -152,6 +157,14 @@ func ColorByStatus(cond bool, code int) string {
} }
} }
// ColorByMethod return color by http code
// GET return Blue
// POST return Cyan
// PUT return Yellow
// DELETE return Red
// PATCH return Green
// HEAD return Magenta
// OPTIONS return WHITE
func ColorByMethod(cond bool, method string) string { func ColorByMethod(cond bool, method string) string {
switch method { switch method {
case "GET": case "GET":
@ -173,10 +186,10 @@ func ColorByMethod(cond bool, method string) string {
} }
} }
// Guard Mutex to guarantee atomicity of W32Debug(string) function // Guard Mutex to guarantee atomic of W32Debug(string) function
var mu sync.Mutex var mu sync.Mutex
// Helper method to output colored logs in Windows terminals // W32Debug Helper method to output colored logs in Windows terminals
func W32Debug(msg string) { func W32Debug(msg string) {
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()

View File

@ -21,11 +21,7 @@ func newSLACKWriter() Logger {
// Init SLACKWriter with json config string // Init SLACKWriter with json config string
func (s *SLACKWriter) Init(jsonconfig string) error { func (s *SLACKWriter) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), s) return json.Unmarshal([]byte(jsonconfig), s)
if err != nil {
return err
}
return nil
} }
// WriteMsg write message in smtp writer. // WriteMsg write message in smtp writer.
@ -53,12 +49,10 @@ func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error {
// Flush implementing method. empty. // Flush implementing method. empty.
func (s *SLACKWriter) Flush() { func (s *SLACKWriter) Flush() {
return
} }
// Destroy implementing method. empty. // Destroy implementing method. empty.
func (s *SLACKWriter) Destroy() { func (s *SLACKWriter) Destroy() {
return
} }
func init() { func init() {

View File

@ -52,11 +52,7 @@ func newSMTPWriter() Logger {
// "level":LevelError // "level":LevelError
// } // }
func (s *SMTPWriter) Init(jsonconfig string) error { func (s *SMTPWriter) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), s) return json.Unmarshal([]byte(jsonconfig), s)
if err != nil {
return err
}
return nil
} }
func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth { func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth {
@ -106,7 +102,7 @@ func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAd
if err != nil { if err != nil {
return err return err
} }
_, err = w.Write([]byte(msgContent)) _, err = w.Write(msgContent)
if err != nil { if err != nil {
return err return err
} }
@ -116,12 +112,7 @@ func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAd
return err return err
} }
err = client.Quit() return client.Quit()
if err != nil {
return err
}
return nil
} }
// WriteMsg write message in smtp writer. // WriteMsg write message in smtp writer.
@ -147,12 +138,10 @@ func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error {
// Flush implementing method. empty. // Flush implementing method. empty.
func (s *SMTPWriter) Flush() { func (s *SMTPWriter) Flush() {
return
} }
// Destroy implementing method. empty. // Destroy implementing method. empty.
func (s *SMTPWriter) Destroy() { func (s *SMTPWriter) Destroy() {
return
} }
func init() { func init() {

View File

@ -14,40 +14,382 @@
package migration package migration
// Table store the tablename and Column import (
type Table struct { "fmt"
TableName string
"github.com/astaxie/beego"
)
// Index struct defines the structure of Index Columns
type Index struct {
Name string
}
// Unique struct defines a single unique key combination
type Unique struct {
Definition string
Columns []*Column Columns []*Column
} }
// Create return the create sql //Column struct defines a single column of a table
func (t *Table) Create() string {
return ""
}
// Drop return the drop sql
func (t *Table) Drop() string {
return ""
}
// Column define the columns name type and Default
type Column struct { type Column struct {
Name string Name string
Type string Inc string
Default interface{} Null string
Default string
Unsign string
DataType string
remove bool
Modify bool
} }
// Create return create sql with the provided tbname and columns // Foreign struct defines a single foreign relationship
func Create(tbname string, columns ...Column) string { type Foreign struct {
return "" ForeignTable string
ForeignColumn string
OnDelete string
OnUpdate string
Column
} }
// Drop return the drop sql with the provided tbname and columns // RenameColumn struct allows renaming of columns
func Drop(tbname string, columns ...Column) string { type RenameColumn struct {
return "" OldName string
OldNull string
OldDefault string
OldUnsign string
OldDataType string
NewName string
Column
} }
// TableDDL is still in think // CreateTable creates the table on system
func TableDDL(tbname string, columns ...Column) string { func (m *Migration) CreateTable(tablename, engine, charset string, p ...func()) {
return "" m.TableName = tablename
m.Engine = engine
m.Charset = charset
m.ModifyType = "create"
}
// AlterTable set the ModifyType to alter
func (m *Migration) AlterTable(tablename string) {
m.TableName = tablename
m.ModifyType = "alter"
}
// NewCol creates a new standard column and attaches it to m struct
func (m *Migration) NewCol(name string) *Column {
col := &Column{Name: name}
m.AddColumns(col)
return col
}
//PriCol creates a new primary column and attaches it to m struct
func (m *Migration) PriCol(name string) *Column {
col := &Column{Name: name}
m.AddColumns(col)
m.AddPrimary(col)
return col
}
//UniCol creates / appends columns to specified unique key and attaches it to m struct
func (m *Migration) UniCol(uni, name string) *Column {
col := &Column{Name: name}
m.AddColumns(col)
uniqueOriginal := &Unique{}
for _, unique := range m.Uniques {
if unique.Definition == uni {
unique.AddColumnsToUnique(col)
uniqueOriginal = unique
}
}
if uniqueOriginal.Definition == "" {
unique := &Unique{Definition: uni}
unique.AddColumnsToUnique(col)
m.AddUnique(unique)
}
return col
}
//ForeignCol creates a new foreign column and returns the instance of column
func (m *Migration) ForeignCol(colname, foreigncol, foreigntable string) (foreign *Foreign) {
foreign = &Foreign{ForeignColumn: foreigncol, ForeignTable: foreigntable}
foreign.Name = colname
m.AddForeign(foreign)
return foreign
}
//SetOnDelete sets the on delete of foreign
func (foreign *Foreign) SetOnDelete(del string) *Foreign {
foreign.OnDelete = "ON DELETE" + del
return foreign
}
//SetOnUpdate sets the on update of foreign
func (foreign *Foreign) SetOnUpdate(update string) *Foreign {
foreign.OnUpdate = "ON UPDATE" + update
return foreign
}
//Remove marks the columns to be removed.
//it allows reverse m to create the column.
func (c *Column) Remove() {
c.remove = true
}
//SetAuto enables auto_increment of column (can be used once)
func (c *Column) SetAuto(inc bool) *Column {
if inc {
c.Inc = "auto_increment"
}
return c
}
//SetNullable sets the column to be null
func (c *Column) SetNullable(null bool) *Column {
if null {
c.Null = ""
} else {
c.Null = "NOT NULL"
}
return c
}
//SetDefault sets the default value, prepend with "DEFAULT "
func (c *Column) SetDefault(def string) *Column {
c.Default = "DEFAULT " + def
return c
}
//SetUnsigned sets the column to be unsigned int
func (c *Column) SetUnsigned(unsign bool) *Column {
if unsign {
c.Unsign = "UNSIGNED"
}
return c
}
//SetDataType sets the dataType of the column
func (c *Column) SetDataType(dataType string) *Column {
c.DataType = dataType
return c
}
//SetOldNullable allows reverting to previous nullable on reverse ms
func (c *RenameColumn) SetOldNullable(null bool) *RenameColumn {
if null {
c.OldNull = ""
} else {
c.OldNull = "NOT NULL"
}
return c
}
//SetOldDefault allows reverting to previous default on reverse ms
func (c *RenameColumn) SetOldDefault(def string) *RenameColumn {
c.OldDefault = def
return c
}
//SetOldUnsigned allows reverting to previous unsgined on reverse ms
func (c *RenameColumn) SetOldUnsigned(unsign bool) *RenameColumn {
if unsign {
c.OldUnsign = "UNSIGNED"
}
return c
}
//SetOldDataType allows reverting to previous datatype on reverse ms
func (c *RenameColumn) SetOldDataType(dataType string) *RenameColumn {
c.OldDataType = dataType
return c
}
//SetPrimary adds the columns to the primary key (can only be used any number of times in only one m)
func (c *Column) SetPrimary(m *Migration) *Column {
m.Primary = append(m.Primary, c)
return c
}
//AddColumnsToUnique adds the columns to Unique Struct
func (unique *Unique) AddColumnsToUnique(columns ...*Column) *Unique {
unique.Columns = append(unique.Columns, columns...)
return unique
}
//AddColumns adds columns to m struct
func (m *Migration) AddColumns(columns ...*Column) *Migration {
m.Columns = append(m.Columns, columns...)
return m
}
//AddPrimary adds the column to primary in m struct
func (m *Migration) AddPrimary(primary *Column) *Migration {
m.Primary = append(m.Primary, primary)
return m
}
//AddUnique adds the column to unique in m struct
func (m *Migration) AddUnique(unique *Unique) *Migration {
m.Uniques = append(m.Uniques, unique)
return m
}
//AddForeign adds the column to foreign in m struct
func (m *Migration) AddForeign(foreign *Foreign) *Migration {
m.Foreigns = append(m.Foreigns, foreign)
return m
}
//AddIndex adds the column to index in m struct
func (m *Migration) AddIndex(index *Index) *Migration {
m.Indexes = append(m.Indexes, index)
return m
}
//RenameColumn allows renaming of columns
func (m *Migration) RenameColumn(from, to string) *RenameColumn {
rename := &RenameColumn{OldName: from, NewName: to}
m.Renames = append(m.Renames, rename)
return rename
}
//GetSQL returns the generated sql depending on ModifyType
func (m *Migration) GetSQL() (sql string) {
sql = ""
switch m.ModifyType {
case "create":
{
sql += fmt.Sprintf("CREATE TABLE `%s` (", m.TableName)
for index, column := range m.Columns {
sql += fmt.Sprintf("\n `%s` %s %s %s %s %s", column.Name, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
if len(m.Columns) > index+1 {
sql += ","
}
}
if len(m.Primary) > 0 {
sql += fmt.Sprintf(",\n PRIMARY KEY( ")
}
for index, column := range m.Primary {
sql += fmt.Sprintf(" `%s`", column.Name)
if len(m.Primary) > index+1 {
sql += ","
}
}
if len(m.Primary) > 0 {
sql += fmt.Sprintf(")")
}
for _, unique := range m.Uniques {
sql += fmt.Sprintf(",\n UNIQUE KEY `%s`( ", unique.Definition)
for index, column := range unique.Columns {
sql += fmt.Sprintf(" `%s`", column.Name)
if len(unique.Columns) > index+1 {
sql += ","
}
}
sql += fmt.Sprintf(")")
}
for _, foreign := range m.Foreigns {
sql += fmt.Sprintf(",\n `%s` %s %s %s %s %s", foreign.Name, foreign.DataType, foreign.Unsign, foreign.Null, foreign.Inc, foreign.Default)
sql += fmt.Sprintf(",\n KEY `%s_%s_foreign`(`%s`),", m.TableName, foreign.Column.Name, foreign.Column.Name)
sql += fmt.Sprintf("\n CONSTRAINT `%s_%s_foreign` FOREIGN KEY (`%s`) REFERENCES `%s` (`%s`) %s %s", m.TableName, foreign.Column.Name, foreign.Column.Name, foreign.ForeignTable, foreign.ForeignColumn, foreign.OnDelete, foreign.OnUpdate)
}
sql += fmt.Sprintf(")ENGINE=%s DEFAULT CHARSET=%s;", m.Engine, m.Charset)
break
}
case "alter":
{
sql += fmt.Sprintf("ALTER TABLE `%s` ", m.TableName)
for index, column := range m.Columns {
if !column.remove {
beego.BeeLogger.Info("col")
sql += fmt.Sprintf("\n ADD `%s` %s %s %s %s %s", column.Name, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
} else {
sql += fmt.Sprintf("\n DROP COLUMN `%s`", column.Name)
}
if len(m.Columns) > index {
sql += ","
}
}
for index, column := range m.Renames {
sql += fmt.Sprintf("CHANGE COLUMN `%s` `%s` %s %s %s %s %s", column.OldName, column.NewName, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
if len(m.Renames) > index+1 {
sql += ","
}
}
for index, foreign := range m.Foreigns {
sql += fmt.Sprintf("ADD `%s` %s %s %s %s %s", foreign.Name, foreign.DataType, foreign.Unsign, foreign.Null, foreign.Inc, foreign.Default)
sql += fmt.Sprintf(",\n ADD KEY `%s_%s_foreign`(`%s`)", m.TableName, foreign.Column.Name, foreign.Column.Name)
sql += fmt.Sprintf(",\n ADD CONSTRAINT `%s_%s_foreign` FOREIGN KEY (`%s`) REFERENCES `%s` (`%s`) %s %s", m.TableName, foreign.Column.Name, foreign.Column.Name, foreign.ForeignTable, foreign.ForeignColumn, foreign.OnDelete, foreign.OnUpdate)
if len(m.Foreigns) > index+1 {
sql += ","
}
}
sql += ";"
break
}
case "reverse":
{
sql += fmt.Sprintf("ALTER TABLE `%s`", m.TableName)
for index, column := range m.Columns {
if column.remove {
sql += fmt.Sprintf("\n ADD `%s` %s %s %s %s %s", column.Name, column.DataType, column.Unsign, column.Null, column.Inc, column.Default)
} else {
sql += fmt.Sprintf("\n DROP COLUMN `%s`", column.Name)
}
if len(m.Columns) > index {
sql += ","
}
}
if len(m.Primary) > 0 {
sql += fmt.Sprintf("\n DROP PRIMARY KEY,")
}
for index, unique := range m.Uniques {
sql += fmt.Sprintf("\n DROP KEY `%s`", unique.Definition)
if len(m.Uniques) > index {
sql += ","
}
}
for index, column := range m.Renames {
sql += fmt.Sprintf("\n CHANGE COLUMN `%s` `%s` %s %s %s %s", column.NewName, column.OldName, column.OldDataType, column.OldUnsign, column.OldNull, column.OldDefault)
if len(m.Renames) > index {
sql += ","
}
}
for _, foreign := range m.Foreigns {
sql += fmt.Sprintf("\n DROP KEY `%s_%s_foreign`", m.TableName, foreign.Column.Name)
sql += fmt.Sprintf(",\n DROP FOREIGN KEY `%s_%s_foreign`", m.TableName, foreign.Column.Name)
sql += fmt.Sprintf(",\n DROP COLUMN `%s`", foreign.Name)
}
sql += ";"
}
case "delete":
{
sql += fmt.Sprintf("DROP TABLE IF EXISTS `%s`;", m.TableName)
}
}
return
} }

32
migration/doc.go Normal file
View File

@ -0,0 +1,32 @@
// Package migration enables you to generate migrations back and forth. It generates both migrations.
//
// //Creates a table
// m.CreateTable("tablename","InnoDB","utf8");
//
// //Alter a table
// m.AlterTable("tablename")
//
// Standard Column Methods
// * SetDataType
// * SetNullable
// * SetDefault
// * SetUnsigned (use only on integer types unless produces error)
//
// //Sets a primary column, multiple calls allowed, standard column methods available
// m.PriCol("id").SetAuto(true).SetNullable(false).SetDataType("INT(10)").SetUnsigned(true)
//
// //UniCol Can be used multiple times, allows standard Column methods. Use same "index" string to add to same index
// m.UniCol("index","column")
//
// //Standard Column Initialisation, can call .Remove() after NewCol("") on alter to remove
// m.NewCol("name").SetDataType("VARCHAR(255) COLLATE utf8_unicode_ci").SetNullable(false)
// m.NewCol("value").SetDataType("DOUBLE(8,2)").SetNullable(false)
//
// //Rename Columns , only use with Alter table, doesn't works with Create, prefix standard column methods with "Old" to
// //create a true reversible migration eg: SetOldDataType("DOUBLE(12,3)")
// m.RenameColumn("from","to")...
//
// //Foreign Columns, single columns are only supported, SetOnDelete & SetOnUpdate are available, call appropriately.
// //Supports standard column methods, automatic reverse.
// m.ForeignCol("local_col","foreign_col","foreign_table")
package migration

View File

@ -52,6 +52,26 @@ type Migrationer interface {
GetCreated() int64 GetCreated() int64
} }
//Migration defines the migrations by either SQL or DDL
type Migration struct {
sqls []string
Created string
TableName string
Engine string
Charset string
ModifyType string
Columns []*Column
Indexes []*Index
Primary []*Column
Uniques []*Unique
Foreigns []*Foreign
Renames []*RenameColumn
RemoveColumns []*Column
RemoveIndexes []*Index
RemoveUniques []*Unique
RemoveForeigns []*Foreign
}
var ( var (
migrationMap map[string]Migrationer migrationMap map[string]Migrationer
) )
@ -60,20 +80,34 @@ func init() {
migrationMap = make(map[string]Migrationer) migrationMap = make(map[string]Migrationer)
} }
// Migration the basic type which will implement the basic type
type Migration struct {
sqls []string
Created string
}
// Up implement in the Inheritance struct for upgrade // Up implement in the Inheritance struct for upgrade
func (m *Migration) Up() { func (m *Migration) Up() {
switch m.ModifyType {
case "reverse":
m.ModifyType = "alter"
case "delete":
m.ModifyType = "create"
}
m.sqls = append(m.sqls, m.GetSQL())
} }
// Down implement in the Inheritance struct for down // Down implement in the Inheritance struct for down
func (m *Migration) Down() { func (m *Migration) Down() {
switch m.ModifyType {
case "alter":
m.ModifyType = "reverse"
case "create":
m.ModifyType = "delete"
}
m.sqls = append(m.sqls, m.GetSQL())
}
//Migrate adds the SQL to the execution list
func (m *Migration) Migrate(migrationType string) {
m.ModifyType = migrationType
m.sqls = append(m.sqls, m.GetSQL())
} }
// SQL add sql want to execute // SQL add sql want to execute

View File

@ -267,13 +267,12 @@ func addPrefix(t *Tree, prefix string) {
addPrefix(t.wildcard, prefix) addPrefix(t.wildcard, prefix)
} }
for _, l := range t.leaves { for _, l := range t.leaves {
if c, ok := l.runObject.(*controllerInfo); ok { if c, ok := l.runObject.(*ControllerInfo); ok {
if !strings.HasPrefix(c.pattern, prefix) { if !strings.HasPrefix(c.pattern, prefix) {
c.pattern = prefix + c.pattern c.pattern = prefix + c.pattern
} }
} }
} }
} }
// NSCond is Namespace Condition // NSCond is Namespace Condition
@ -284,16 +283,16 @@ func NSCond(cond namespaceCond) LinkNamespace {
} }
// NSBefore Namespace BeforeRouter filter // NSBefore Namespace BeforeRouter filter
func NSBefore(filiterList ...FilterFunc) LinkNamespace { func NSBefore(filterList ...FilterFunc) LinkNamespace {
return func(ns *Namespace) { return func(ns *Namespace) {
ns.Filter("before", filiterList...) ns.Filter("before", filterList...)
} }
} }
// NSAfter add Namespace FinishRouter filter // NSAfter add Namespace FinishRouter filter
func NSAfter(filiterList ...FilterFunc) LinkNamespace { func NSAfter(filterList ...FilterFunc) LinkNamespace {
return func(ns *Namespace) { return func(ns *Namespace) {
ns.Filter("after", filiterList...) ns.Filter("after", filterList...)
} }
} }

View File

@ -139,10 +139,7 @@ func TestNamespaceCond(t *testing.T) {
ns := NewNamespace("/v2") ns := NewNamespace("/v2")
ns.Cond(func(ctx *context.Context) bool { ns.Cond(func(ctx *context.Context) bool {
if ctx.Input.Domain() == "beego.me" { return ctx.Input.Domain() == "beego.me"
return true
}
return false
}). }).
AutoRouter(&TestController{}) AutoRouter(&TestController{})
AddNamespace(ns) AddNamespace(ns)

View File

@ -150,7 +150,7 @@ func (d *commandSyncDb) Run() error {
} }
for _, fi := range mi.fields.fieldsDB { for _, fi := range mi.fields.fieldsDB {
if _, ok := columns[fi.column]; ok == false { if _, ok := columns[fi.column]; !ok {
fields = append(fields, fi) fields = append(fields, fi)
} }
} }
@ -175,7 +175,7 @@ func (d *commandSyncDb) Run() error {
} }
for _, idx := range indexes[mi.table] { for _, idx := range indexes[mi.table] {
if d.al.DbBaser.IndexExists(db, idx.Table, idx.Name) == false { if !d.al.DbBaser.IndexExists(db, idx.Table, idx.Name) {
if !d.noInfo { if !d.noInfo {
fmt.Printf("create index `%s` for table `%s`\n", idx.Name, idx.Table) fmt.Printf("create index `%s` for table `%s`\n", idx.Name, idx.Table)
} }

View File

@ -89,7 +89,7 @@ checkColumn:
col = T["float64"] col = T["float64"]
case TypeDecimalField: case TypeDecimalField:
s := T["float64-decimal"] s := T["float64-decimal"]
if strings.Index(s, "%d") == -1 { if !strings.Contains(s, "%d") {
col = s col = s
} else { } else {
col = fmt.Sprintf(s, fi.digits, fi.decimals) col = fmt.Sprintf(s, fi.digits, fi.decimals)
@ -120,7 +120,7 @@ func getColumnAddQuery(al *alias, fi *fieldInfo) string {
Q := al.DbBaser.TableQuote() Q := al.DbBaser.TableQuote()
typ := getColumnTyp(al, fi) typ := getColumnTyp(al, fi)
if fi.null == false { if !fi.null {
typ += " " + "NOT NULL" typ += " " + "NOT NULL"
} }
@ -172,7 +172,7 @@ func getDbCreateSQL(al *alias) (sqls []string, tableIndexes map[string][]dbIndex
} else { } else {
column += col column += col
if fi.null == false { if !fi.null {
column += " " + "NOT NULL" column += " " + "NOT NULL"
} }
@ -192,7 +192,7 @@ func getDbCreateSQL(al *alias) (sqls []string, tableIndexes map[string][]dbIndex
} }
} }
if strings.Index(column, "%COL%") != -1 { if strings.Contains(column, "%COL%") {
column = strings.Replace(column, "%COL%", fi.column, -1) column = strings.Replace(column, "%COL%", fi.column, -1)
} }

View File

@ -87,7 +87,7 @@ func (d *dbBase) collectValues(mi *modelInfo, ind reflect.Value, cols []string,
} else { } else {
panic(fmt.Errorf("wrong db field/column name `%s` for model `%s`", column, mi.fullName)) panic(fmt.Errorf("wrong db field/column name `%s` for model `%s`", column, mi.fullName))
} }
if fi.dbcol == false || fi.auto && skipAuto { if !fi.dbcol || fi.auto && skipAuto {
continue continue
} }
value, err := d.collectFieldValue(mi, fi, ind, insert, tz) value, err := d.collectFieldValue(mi, fi, ind, insert, tz)
@ -224,7 +224,7 @@ func (d *dbBase) collectFieldValue(mi *modelInfo, fi *fieldInfo, ind reflect.Val
value = nil value = nil
} }
} }
if fi.null == false && value == nil { if !fi.null && value == nil {
return nil, fmt.Errorf("field `%s` cannot be NULL", fi.fullName) return nil, fmt.Errorf("field `%s` cannot be NULL", fi.fullName)
} }
} }
@ -271,7 +271,7 @@ func (d *dbBase) PrepareInsert(q dbQuerier, mi *modelInfo) (stmtQuerier, string,
dbcols := make([]string, 0, len(mi.fields.dbcols)) dbcols := make([]string, 0, len(mi.fields.dbcols))
marks := make([]string, 0, len(mi.fields.dbcols)) marks := make([]string, 0, len(mi.fields.dbcols))
for _, fi := range mi.fields.fieldsDB { for _, fi := range mi.fields.fieldsDB {
if fi.auto == false { if !fi.auto {
dbcols = append(dbcols, fi.column) dbcols = append(dbcols, fi.column)
marks = append(marks, "?") marks = append(marks, "?")
} }
@ -326,7 +326,7 @@ func (d *dbBase) Read(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Lo
} else { } else {
// default use pk value as where condtion. // default use pk value as where condtion.
pkColumn, pkValue, ok := getExistPk(mi, ind) pkColumn, pkValue, ok := getExistPk(mi, ind)
if ok == false { if !ok {
return ErrMissPK return ErrMissPK
} }
whereCols = []string{pkColumn} whereCols = []string{pkColumn}
@ -507,10 +507,9 @@ func (d *dbBase) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a
case DRPostgres: case DRPostgres:
if len(args) == 0 { if len(args) == 0 {
return 0, fmt.Errorf("`%s` use InsertOrUpdate must have a conflict column", a.DriverName) return 0, fmt.Errorf("`%s` use InsertOrUpdate must have a conflict column", a.DriverName)
} else { }
args0 = strings.ToLower(args[0]) args0 = strings.ToLower(args[0])
iouStr = fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET", args0) iouStr = fmt.Sprintf("ON CONFLICT (%s) DO UPDATE SET", args0)
}
default: default:
return 0, fmt.Errorf("`%s` nonsupport InsertOrUpdate in beego", a.DriverName) return 0, fmt.Errorf("`%s` nonsupport InsertOrUpdate in beego", a.DriverName)
} }
@ -592,7 +591,7 @@ func (d *dbBase) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a
row := q.QueryRow(query, values...) row := q.QueryRow(query, values...)
var id int64 var id int64
err = row.Scan(&id) err = row.Scan(&id)
if err.Error() == `pq: syntax error at or near "ON"` { if err != nil && err.Error() == `pq: syntax error at or near "ON"` {
err = fmt.Errorf("postgres version must 9.5 or higher") err = fmt.Errorf("postgres version must 9.5 or higher")
} }
return id, err return id, err
@ -601,7 +600,7 @@ func (d *dbBase) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a
// execute update sql dbQuerier with given struct reflect.Value. // execute update sql dbQuerier with given struct reflect.Value.
func (d *dbBase) Update(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string) (int64, error) { func (d *dbBase) Update(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string) (int64, error) {
pkName, pkValue, ok := getExistPk(mi, ind) pkName, pkValue, ok := getExistPk(mi, ind)
if ok == false { if !ok {
return 0, ErrMissPK return 0, ErrMissPK
} }
@ -654,7 +653,7 @@ func (d *dbBase) Delete(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.
} else { } else {
// default use pk value as where condtion. // default use pk value as where condtion.
pkColumn, pkValue, ok := getExistPk(mi, ind) pkColumn, pkValue, ok := getExistPk(mi, ind)
if ok == false { if !ok {
return 0, ErrMissPK return 0, ErrMissPK
} }
whereCols = []string{pkColumn} whereCols = []string{pkColumn}
@ -699,7 +698,7 @@ func (d *dbBase) UpdateBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Con
columns := make([]string, 0, len(params)) columns := make([]string, 0, len(params))
values := make([]interface{}, 0, len(params)) values := make([]interface{}, 0, len(params))
for col, val := range params { for col, val := range params {
if fi, ok := mi.fields.GetByAny(col); ok == false || fi.dbcol == false { if fi, ok := mi.fields.GetByAny(col); !ok || !fi.dbcol {
panic(fmt.Errorf("wrong field/column name `%s`", col)) panic(fmt.Errorf("wrong field/column name `%s`", col))
} else { } else {
columns = append(columns, fi.column) columns = append(columns, fi.column)
@ -834,7 +833,11 @@ func (d *dbBase) DeleteBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Con
if err := rs.Scan(&ref); err != nil { if err := rs.Scan(&ref); err != nil {
return 0, err return 0, err
} }
args = append(args, reflect.ValueOf(ref).Interface()) pkValue, err := d.convertValueFromDB(mi.fields.pk, reflect.ValueOf(ref).Interface(), tz)
if err != nil {
return 0, err
}
args = append(args, pkValue)
cnt++ cnt++
} }
@ -929,7 +932,7 @@ func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condi
if hasRel { if hasRel {
for _, fi := range mi.fields.fieldsDB { for _, fi := range mi.fields.fieldsDB {
if fi.fieldType&IsRelField > 0 { if fi.fieldType&IsRelField > 0 {
if maps[fi.column] == false { if !maps[fi.column] {
tCols = append(tCols, fi.column) tCols = append(tCols, fi.column)
} }
} }
@ -987,7 +990,7 @@ func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condi
var cnt int64 var cnt int64
for rs.Next() { for rs.Next() {
if one && cnt == 0 || one == false { if one && cnt == 0 || !one {
if err := rs.Scan(refs...); err != nil { if err := rs.Scan(refs...); err != nil {
return 0, err return 0, err
} }
@ -1067,7 +1070,7 @@ func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condi
cnt++ cnt++
} }
if one == false { if !one {
if cnt > 0 { if cnt > 0 {
ind.Set(slice) ind.Set(slice)
} else { } else {
@ -1110,7 +1113,7 @@ func (d *dbBase) Count(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition
// generate sql with replacing operator string placeholders and replaced values. // generate sql with replacing operator string placeholders and replaced values.
func (d *dbBase) GenerateOperatorSQL(mi *modelInfo, fi *fieldInfo, operator string, args []interface{}, tz *time.Location) (string, []interface{}) { func (d *dbBase) GenerateOperatorSQL(mi *modelInfo, fi *fieldInfo, operator string, args []interface{}, tz *time.Location) (string, []interface{}) {
sql := "" var sql string
params := getFlatParams(fi, args, tz) params := getFlatParams(fi, args, tz)
if len(params) == 0 { if len(params) == 0 {
@ -1357,7 +1360,7 @@ end:
func (d *dbBase) setFieldValue(fi *fieldInfo, value interface{}, field reflect.Value) (interface{}, error) { func (d *dbBase) setFieldValue(fi *fieldInfo, value interface{}, field reflect.Value) (interface{}, error) {
fieldType := fi.fieldType fieldType := fi.fieldType
isNative := fi.isFielder == false isNative := !fi.isFielder
setValue: setValue:
switch { switch {
@ -1533,7 +1536,7 @@ setValue:
} }
} }
if isNative == false { if !isNative {
fd := field.Addr().Interface().(Fielder) fd := field.Addr().Interface().(Fielder)
err := fd.SetRaw(value) err := fd.SetRaw(value)
if err != nil { if err != nil {
@ -1594,7 +1597,7 @@ func (d *dbBase) ReadValues(q dbQuerier, qs *querySet, mi *modelInfo, cond *Cond
infos = make([]*fieldInfo, 0, len(exprs)) infos = make([]*fieldInfo, 0, len(exprs))
for _, ex := range exprs { for _, ex := range exprs {
index, name, fi, suc := tables.parseExprs(mi, strings.Split(ex, ExprSep)) index, name, fi, suc := tables.parseExprs(mi, strings.Split(ex, ExprSep))
if suc == false { if !suc {
panic(fmt.Errorf("unknown field/column name `%s`", ex)) panic(fmt.Errorf("unknown field/column name `%s`", ex))
} }
cols = append(cols, fmt.Sprintf("%s.%s%s%s %s%s%s", index, Q, fi.column, Q, Q, name, Q)) cols = append(cols, fmt.Sprintf("%s.%s%s%s %s%s%s", index, Q, fi.column, Q, Q, name, Q))
@ -1733,7 +1736,7 @@ func (d *dbBase) TableQuote() string {
return "`" return "`"
} }
// replace value placeholer in parametered sql string. // replace value placeholder in parametered sql string.
func (d *dbBase) ReplaceMarks(query *string) { func (d *dbBase) ReplaceMarks(query *string) {
// default use `?` as mark, do nothing // default use `?` as mark, do nothing
} }

View File

@ -60,6 +60,8 @@ var (
"sqlite3": DRSqlite, "sqlite3": DRSqlite,
"tidb": DRTiDB, "tidb": DRTiDB,
"oracle": DROracle, "oracle": DROracle,
"oci8": DROracle, // github.com/mattn/go-oci8
"ora": DROracle, //https://github.com/rana/ora
} }
dbBasers = map[DriverType]dbBaser{ dbBasers = map[DriverType]dbBaser{
DRMySQL: newdbBaseMysql(), DRMySQL: newdbBaseMysql(),
@ -186,7 +188,7 @@ func addAliasWthDB(aliasName, driverName string, db *sql.DB) (*alias, error) {
return nil, fmt.Errorf("register db Ping `%s`, %s", aliasName, err.Error()) return nil, fmt.Errorf("register db Ping `%s`, %s", aliasName, err.Error())
} }
if dataBaseCache.add(aliasName, al) == false { if !dataBaseCache.add(aliasName, al) {
return nil, fmt.Errorf("DataBase alias name `%s` already registered, cannot reuse", aliasName) return nil, fmt.Errorf("DataBase alias name `%s` already registered, cannot reuse", aliasName)
} }
@ -244,11 +246,11 @@ end:
// RegisterDriver Register a database driver use specify driver name, this can be definition the driver is which database type. // RegisterDriver Register a database driver use specify driver name, this can be definition the driver is which database type.
func RegisterDriver(driverName string, typ DriverType) error { func RegisterDriver(driverName string, typ DriverType) error {
if t, ok := drivers[driverName]; ok == false { if t, ok := drivers[driverName]; !ok {
drivers[driverName] = typ drivers[driverName] = typ
} else { } else {
if t != typ { if t != typ {
return fmt.Errorf("driverName `%s` db driver already registered and is other type\n", driverName) return fmt.Errorf("driverName `%s` db driver already registered and is other type", driverName)
} }
} }
return nil return nil
@ -259,7 +261,7 @@ func SetDataBaseTZ(aliasName string, tz *time.Location) error {
if al, ok := dataBaseCache.get(aliasName); ok { if al, ok := dataBaseCache.get(aliasName); ok {
al.TZ = tz al.TZ = tz
} else { } else {
return fmt.Errorf("DataBase alias name `%s` not registered\n", aliasName) return fmt.Errorf("DataBase alias name `%s` not registered", aliasName)
} }
return nil return nil
} }
@ -294,5 +296,5 @@ func GetDB(aliasNames ...string) (*sql.DB, error) {
if ok { if ok {
return al.DB, nil return al.DB, nil
} }
return nil, fmt.Errorf("DataBase of alias name `%s` not found\n", name) return nil, fmt.Errorf("DataBase of alias name `%s` not found", name)
} }

View File

@ -103,8 +103,7 @@ func (d *dbBaseMysql) IndexExists(db dbQuerier, table string, name string) bool
// If no will insert // If no will insert
// Add "`" for mysql sql building // Add "`" for mysql sql building
func (d *dbBaseMysql) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a *alias, args ...string) (int64, error) { func (d *dbBaseMysql) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a *alias, args ...string) (int64, error) {
var iouStr string
iouStr := ""
argsMap := map[string]string{} argsMap := map[string]string{}
iouStr = "ON DUPLICATE KEY UPDATE" iouStr = "ON DUPLICATE KEY UPDATE"

View File

@ -94,3 +94,43 @@ func (d *dbBaseOracle) IndexExists(db dbQuerier, table string, name string) bool
row.Scan(&cnt) row.Scan(&cnt)
return cnt > 0 return cnt > 0
} }
// execute insert sql with given struct and given values.
// insert the given values, not the field values in struct.
func (d *dbBaseOracle) InsertValue(q dbQuerier, mi *modelInfo, isMulti bool, names []string, values []interface{}) (int64, error) {
Q := d.ins.TableQuote()
marks := make([]string, len(names))
for i := range marks {
marks[i] = ":" + names[i]
}
sep := fmt.Sprintf("%s, %s", Q, Q)
qmarks := strings.Join(marks, ", ")
columns := strings.Join(names, sep)
multi := len(values) / len(names)
if isMulti {
qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
}
query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks)
d.ins.ReplaceMarks(&query)
if isMulti || !d.ins.HasReturningID(mi, &query) {
res, err := q.Exec(query, values...)
if err == nil {
if isMulti {
return res.RowsAffected()
}
return res.LastInsertId()
}
return 0, err
}
row := q.QueryRow(query, values...)
var id int64
err := row.Scan(&id)
return id, err
}

View File

@ -134,7 +134,7 @@ func (d *dbBaseSqlite) IndexExists(db dbQuerier, table string, name string) bool
defer rows.Close() defer rows.Close()
for rows.Next() { for rows.Next() {
var tmp, index sql.NullString var tmp, index sql.NullString
rows.Scan(&tmp, &index, &tmp) rows.Scan(&tmp, &index, &tmp, &tmp, &tmp)
if name == index.String { if name == index.String {
return true return true
} }

View File

@ -63,7 +63,7 @@ func (t *dbTables) set(names []string, mi *modelInfo, fi *fieldInfo, inner bool)
// add table info to collection. // add table info to collection.
func (t *dbTables) add(names []string, mi *modelInfo, fi *fieldInfo, inner bool) (*dbTable, bool) { func (t *dbTables) add(names []string, mi *modelInfo, fi *fieldInfo, inner bool) (*dbTable, bool) {
name := strings.Join(names, ExprSep) name := strings.Join(names, ExprSep)
if _, ok := t.tablesM[name]; ok == false { if _, ok := t.tablesM[name]; !ok {
i := len(t.tables) + 1 i := len(t.tables) + 1
jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil} jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil}
t.tablesM[name] = jt t.tablesM[name] = jt
@ -261,7 +261,7 @@ loopFor:
fiN, okN = mmi.fields.GetByAny(exprs[i+1]) fiN, okN = mmi.fields.GetByAny(exprs[i+1])
} }
if isRel && (fi.mi.isThrough == false || num != i) { if isRel && (!fi.mi.isThrough || num != i) {
if fi.null || t.skipEnd { if fi.null || t.skipEnd {
inner = false inner = false
} }
@ -364,7 +364,7 @@ func (t *dbTables) getCondSQL(cond *Condition, sub bool, tz *time.Location) (whe
} }
index, _, fi, suc := t.parseExprs(mi, exprs) index, _, fi, suc := t.parseExprs(mi, exprs)
if suc == false { if !suc {
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(p.exprs, ExprSep))) panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(p.exprs, ExprSep)))
} }
@ -383,7 +383,7 @@ func (t *dbTables) getCondSQL(cond *Condition, sub bool, tz *time.Location) (whe
} }
} }
if sub == false && where != "" { if !sub && where != "" {
where = "WHERE " + where where = "WHERE " + where
} }
@ -403,7 +403,7 @@ func (t *dbTables) getGroupSQL(groups []string) (groupSQL string) {
exprs := strings.Split(group, ExprSep) exprs := strings.Split(group, ExprSep)
index, _, fi, suc := t.parseExprs(t.mi, exprs) index, _, fi, suc := t.parseExprs(t.mi, exprs)
if suc == false { if !suc {
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep))) panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep)))
} }
@ -432,7 +432,7 @@ func (t *dbTables) getOrderSQL(orders []string) (orderSQL string) {
exprs := strings.Split(order, ExprSep) exprs := strings.Split(order, ExprSep)
index, _, fi, suc := t.parseExprs(t.mi, exprs) index, _, fi, suc := t.parseExprs(t.mi, exprs)
if suc == false { if !suc {
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep))) panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep)))
} }

View File

@ -75,7 +75,7 @@ func registerModel(PrefixOrSuffix string, model interface{}, isPrefix bool) {
} }
if mi.fields.pk == nil { if mi.fields.pk == nil {
fmt.Printf("<orm.RegisterModel> `%s` need a primary key field, default use 'id' if not set\n", name) fmt.Printf("<orm.RegisterModel> `%s` needs a primary key field, default is to use 'id' if not set\n", name)
os.Exit(2) os.Exit(2)
} }
@ -128,7 +128,7 @@ func bootStrap() {
if i := strings.LastIndex(fi.relThrough, "."); i != -1 && len(fi.relThrough) > (i+1) { if i := strings.LastIndex(fi.relThrough, "."); i != -1 && len(fi.relThrough) > (i+1) {
pn := fi.relThrough[:i] pn := fi.relThrough[:i]
rmi, ok := modelCache.getByFullName(fi.relThrough) rmi, ok := modelCache.getByFullName(fi.relThrough)
if ok == false || pn != rmi.pkg { if !ok || pn != rmi.pkg {
err = fmt.Errorf("field `%s` wrong rel_through value `%s` cannot find table", fi.fullName, fi.relThrough) err = fmt.Errorf("field `%s` wrong rel_through value `%s` cannot find table", fi.fullName, fi.relThrough)
goto end goto end
} }
@ -171,7 +171,7 @@ func bootStrap() {
break break
} }
} }
if inModel == false { if !inModel {
rmi := fi.relModelInfo rmi := fi.relModelInfo
ffi := new(fieldInfo) ffi := new(fieldInfo)
ffi.name = mi.name ffi.name = mi.name
@ -185,7 +185,7 @@ func bootStrap() {
} else { } else {
ffi.fieldType = RelReverseMany ffi.fieldType = RelReverseMany
} }
if rmi.fields.Add(ffi) == false { if !rmi.fields.Add(ffi) {
added := false added := false
for cnt := 0; cnt < 5; cnt++ { for cnt := 0; cnt < 5; cnt++ {
ffi.name = fmt.Sprintf("%s%d", mi.name, cnt) ffi.name = fmt.Sprintf("%s%d", mi.name, cnt)
@ -195,7 +195,7 @@ func bootStrap() {
break break
} }
} }
if added == false { if !added {
panic(fmt.Errorf("cannot generate auto reverse field info `%s` to `%s`", fi.fullName, ffi.fullName)) panic(fmt.Errorf("cannot generate auto reverse field info `%s` to `%s`", fi.fullName, ffi.fullName))
} }
} }
@ -248,7 +248,7 @@ func bootStrap() {
break mForA break mForA
} }
} }
if found == false { if !found {
err = fmt.Errorf("reverse field `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName) err = fmt.Errorf("reverse field `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName)
goto end goto end
} }
@ -267,7 +267,7 @@ func bootStrap() {
break mForB break mForB
} }
} }
if found == false { if !found {
mForC: mForC:
for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelManyToMany] { for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelManyToMany] {
conditions := fi.relThrough != "" && fi.relThrough == ffi.relThrough || conditions := fi.relThrough != "" && fi.relThrough == ffi.relThrough ||
@ -287,7 +287,7 @@ func bootStrap() {
} }
} }
} }
if found == false { if !found {
err = fmt.Errorf("reverse field for `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName) err = fmt.Errorf("reverse field for `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName)
goto end goto end
} }

View File

@ -47,7 +47,7 @@ func (f *fields) Add(fi *fieldInfo) (added bool) {
} else { } else {
return return
} }
if _, ok := f.fieldsByType[fi.fieldType]; ok == false { if _, ok := f.fieldsByType[fi.fieldType]; !ok {
f.fieldsByType[fi.fieldType] = make([]*fieldInfo, 0) f.fieldsByType[fi.fieldType] = make([]*fieldInfo, 0)
} }
f.fieldsByType[fi.fieldType] = append(f.fieldsByType[fi.fieldType], fi) f.fieldsByType[fi.fieldType] = append(f.fieldsByType[fi.fieldType], fi)
@ -334,12 +334,12 @@ checkType:
switch onDelete { switch onDelete {
case odCascade, odDoNothing: case odCascade, odDoNothing:
case odSetDefault: case odSetDefault:
if initial.Exist() == false { if !initial.Exist() {
err = errors.New("on_delete: set_default need set field a default value") err = errors.New("on_delete: set_default need set field a default value")
goto end goto end
} }
case odSetNULL: case odSetNULL:
if fi.null == false { if !fi.null {
err = errors.New("on_delete: set_null need set field null") err = errors.New("on_delete: set_null need set field null")
goto end goto end
} }

View File

@ -78,7 +78,7 @@ func addModelFields(mi *modelInfo, ind reflect.Value, mName string, index []int)
fi.fieldIndex = append(index, i) fi.fieldIndex = append(index, i)
fi.mi = mi fi.mi = mi
fi.inModel = true fi.inModel = true
if mi.fields.Add(fi) == false { if !mi.fields.Add(fi) {
err = fmt.Errorf("duplicate column name: %s", fi.column) err = fmt.Errorf("duplicate column name: %s", fi.column)
break break
} }

View File

@ -107,7 +107,7 @@ func (o *orm) getMiInd(md interface{}, needPtr bool) (mi *modelInfo, ind reflect
if mi, ok := modelCache.getByFullName(name); ok { if mi, ok := modelCache.getByFullName(name); ok {
return mi, ind return mi, ind
} }
panic(fmt.Errorf("<Ormer> table: `%s` not found, maybe not RegisterModel", name)) panic(fmt.Errorf("<Ormer> table: `%s` not found, make sure it was registered with `RegisterModel()`", name))
} }
// get field info from model info by given field name // get field info from model info by given field name
@ -122,21 +122,13 @@ func (o *orm) getFieldInfo(mi *modelInfo, name string) *fieldInfo {
// read data to model // read data to model
func (o *orm) Read(md interface{}, cols ...string) error { func (o *orm) Read(md interface{}, cols ...string) error {
mi, ind := o.getMiInd(md, true) mi, ind := o.getMiInd(md, true)
err := o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, false) return o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, false)
if err != nil {
return err
}
return nil
} }
// read data to model, like Read(), but use "SELECT FOR UPDATE" form // read data to model, like Read(), but use "SELECT FOR UPDATE" form
func (o *orm) ReadForUpdate(md interface{}, cols ...string) error { func (o *orm) ReadForUpdate(md interface{}, cols ...string) error {
mi, ind := o.getMiInd(md, true) mi, ind := o.getMiInd(md, true)
err := o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, true) return o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols, true)
if err != nil {
return err
}
return nil
} }
// Try to read a row from the database, or insert one if it doesn't exist // Try to read a row from the database, or insert one if it doesn't exist
@ -238,15 +230,11 @@ func (o *orm) InsertOrUpdate(md interface{}, colConflitAndArgs ...string) (int64
// cols set the columns those want to update. // cols set the columns those want to update.
func (o *orm) Update(md interface{}, cols ...string) (int64, error) { func (o *orm) Update(md interface{}, cols ...string) (int64, error) {
mi, ind := o.getMiInd(md, true) mi, ind := o.getMiInd(md, true)
num, err := o.alias.DbBaser.Update(o.db, mi, ind, o.alias.TZ, cols) return o.alias.DbBaser.Update(o.db, mi, ind, o.alias.TZ, cols)
if err != nil {
return num, err
}
return num, nil
} }
// delete model in database // delete model in database
// cols shows the delete conditions values read from. deafult is pk // cols shows the delete conditions values read from. default is pk
func (o *orm) Delete(md interface{}, cols ...string) (int64, error) { func (o *orm) Delete(md interface{}, cols ...string) (int64, error) {
mi, ind := o.getMiInd(md, true) mi, ind := o.getMiInd(md, true)
num, err := o.alias.DbBaser.Delete(o.db, mi, ind, o.alias.TZ, cols) num, err := o.alias.DbBaser.Delete(o.db, mi, ind, o.alias.TZ, cols)
@ -361,7 +349,7 @@ func (o *orm) queryRelated(md interface{}, name string) (*modelInfo, *fieldInfo,
fi := o.getFieldInfo(mi, name) fi := o.getFieldInfo(mi, name)
_, _, exist := getExistPk(mi, ind) _, _, exist := getExistPk(mi, ind)
if exist == false { if !exist {
panic(ErrMissPK) panic(ErrMissPK)
} }
@ -432,7 +420,7 @@ func (o *orm) getRelQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet {
// table name can be string or struct. // table name can be string or struct.
// e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)), // e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)),
func (o *orm) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) { func (o *orm) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) {
name := "" var name string
if table, ok := ptrStructOrTableName.(string); ok { if table, ok := ptrStructOrTableName.(string); ok {
name = snakeString(table) name = snakeString(table)
if mi, ok := modelCache.get(name); ok { if mi, ok := modelCache.get(name); ok {
@ -489,7 +477,7 @@ func (o *orm) Begin() error {
// commit transaction // commit transaction
func (o *orm) Commit() error { func (o *orm) Commit() error {
if o.isTx == false { if !o.isTx {
return ErrTxDone return ErrTxDone
} }
err := o.db.(txEnder).Commit() err := o.db.(txEnder).Commit()
@ -504,7 +492,7 @@ func (o *orm) Commit() error {
// rollback transaction // rollback transaction
func (o *orm) Rollback() error { func (o *orm) Rollback() error {
if o.isTx == false { if !o.isTx {
return ErrTxDone return ErrTxDone
} }
err := o.db.(txEnder).Rollback() err := o.db.(txEnder).Rollback()

View File

@ -72,7 +72,7 @@ func (o *queryM2M) Add(mds ...interface{}) (int64, error) {
} }
_, v1, exist := getExistPk(o.mi, o.ind) _, v1, exist := getExistPk(o.mi, o.ind)
if exist == false { if !exist {
panic(ErrMissPK) panic(ErrMissPK)
} }
@ -87,7 +87,7 @@ func (o *queryM2M) Add(mds ...interface{}) (int64, error) {
v2 = ind.Interface() v2 = ind.Interface()
} else { } else {
_, v2, exist = getExistPk(fi.relModelInfo, ind) _, v2, exist = getExistPk(fi.relModelInfo, ind)
if exist == false { if !exist {
panic(ErrMissPK) panic(ErrMissPK)
} }
} }
@ -104,11 +104,7 @@ func (o *queryM2M) Remove(mds ...interface{}) (int64, error) {
fi := o.fi fi := o.fi
qs := o.qs.Filter(fi.reverseFieldInfo.name, o.md) qs := o.qs.Filter(fi.reverseFieldInfo.name, o.md)
nums, err := qs.Filter(fi.reverseFieldInfoTwo.name+ExprSep+"in", mds).Delete() return qs.Filter(fi.reverseFieldInfoTwo.name+ExprSep+"in", mds).Delete()
if err != nil {
return nums, err
}
return nums, nil
} }
// check model is existed in relationship of origin model // check model is existed in relationship of origin model

View File

@ -493,9 +493,19 @@ func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) {
} }
} }
} else { } else {
for i := 0; i < ind.NumField(); i++ { // define recursive function
f := ind.Field(i) var recursiveSetField func(rv reflect.Value)
fe := ind.Type().Field(i) recursiveSetField = func(rv reflect.Value) {
for i := 0; i < rv.NumField(); i++ {
f := rv.Field(i)
fe := rv.Type().Field(i)
// check if the field is a Struct
// recursive the Struct type
if fe.Type.Kind() == reflect.Struct {
recursiveSetField(f)
}
_, tags := parseStructTag(fe.Tag.Get(defaultStructTagName)) _, tags := parseStructTag(fe.Tag.Get(defaultStructTagName))
var col string var col string
if col = tags["column"]; col == "" { if col = tags["column"]; col == "" {
@ -508,6 +518,10 @@ func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) {
} }
} }
// init call the recursive function
recursiveSetField(ind)
}
if eTyps[0].Kind() == reflect.Ptr { if eTyps[0].Kind() == reflect.Ptr {
ind = ind.Addr() ind = ind.Addr()
} }
@ -671,7 +685,7 @@ func (o *rawSet) queryRowsTo(container interface{}, keyCol, valueCol string) (in
ind *reflect.Value ind *reflect.Value
) )
typ := 0 var typ int
switch container.(type) { switch container.(type) {
case *Params: case *Params:
typ = 1 typ = 1

View File

@ -93,14 +93,14 @@ wrongArg:
} }
func AssertIs(a interface{}, args ...interface{}) error { func AssertIs(a interface{}, args ...interface{}) error {
if ok, err := ValuesCompare(true, a, args...); ok == false { if ok, err := ValuesCompare(true, a, args...); !ok {
return err return err
} }
return nil return nil
} }
func AssertNot(a interface{}, args ...interface{}) error { func AssertNot(a interface{}, args ...interface{}) error {
if ok, err := ValuesCompare(false, a, args...); ok == false { if ok, err := ValuesCompare(false, a, args...); !ok {
return err return err
} }
return nil return nil
@ -135,7 +135,7 @@ func getCaller(skip int) string {
if i := strings.LastIndex(funName, "."); i > -1 { if i := strings.LastIndex(funName, "."); i > -1 {
funName = funName[i+1:] funName = funName[i+1:]
} }
return fmt.Sprintf("%s:%d: \n%s", fn, line, strings.Join(codes, "\n")) return fmt.Sprintf("%s:%s:%d: \n%s", fn, funName, line, strings.Join(codes, "\n"))
} }
func throwFail(t *testing.T, err error, args ...interface{}) { func throwFail(t *testing.T, err error, args ...interface{}) {
@ -1014,6 +1014,8 @@ func TestAll(t *testing.T) {
var users3 []*User var users3 []*User
qs = dORM.QueryTable("user") qs = dORM.QueryTable("user")
num, err = qs.Filter("user_name", "nothing").All(&users3) num, err = qs.Filter("user_name", "nothing").All(&users3)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 0))
throwFailNow(t, AssertIs(users3 == nil, false)) throwFailNow(t, AssertIs(users3 == nil, false))
} }
@ -1138,6 +1140,7 @@ func TestRelatedSel(t *testing.T) {
} }
err = qs.Filter("user_name", "nobody").RelatedSel("profile").One(&user) err = qs.Filter("user_name", "nobody").RelatedSel("profile").One(&user)
throwFail(t, err)
throwFail(t, AssertIs(num, 1)) throwFail(t, AssertIs(num, 1))
throwFail(t, AssertIs(user.Profile, nil)) throwFail(t, AssertIs(user.Profile, nil))
@ -1246,20 +1249,24 @@ func TestLoadRelated(t *testing.T) {
num, err = dORM.LoadRelated(&user, "Posts", true) num, err = dORM.LoadRelated(&user, "Posts", true)
throwFailNow(t, err) throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 2))
throwFailNow(t, AssertIs(len(user.Posts), 2)) throwFailNow(t, AssertIs(len(user.Posts), 2))
throwFailNow(t, AssertIs(user.Posts[0].User.UserName, "astaxie")) throwFailNow(t, AssertIs(user.Posts[0].User.UserName, "astaxie"))
num, err = dORM.LoadRelated(&user, "Posts", true, 1) num, err = dORM.LoadRelated(&user, "Posts", true, 1)
throwFailNow(t, err) throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(len(user.Posts), 1)) throwFailNow(t, AssertIs(len(user.Posts), 1))
num, err = dORM.LoadRelated(&user, "Posts", true, 0, 0, "-Id") num, err = dORM.LoadRelated(&user, "Posts", true, 0, 0, "-Id")
throwFailNow(t, err) throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 2))
throwFailNow(t, AssertIs(len(user.Posts), 2)) throwFailNow(t, AssertIs(len(user.Posts), 2))
throwFailNow(t, AssertIs(user.Posts[0].Title, "Formatting")) throwFailNow(t, AssertIs(user.Posts[0].Title, "Formatting"))
num, err = dORM.LoadRelated(&user, "Posts", true, 1, 1, "Id") num, err = dORM.LoadRelated(&user, "Posts", true, 1, 1, "Id")
throwFailNow(t, err) throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 1))
throwFailNow(t, AssertIs(len(user.Posts), 1)) throwFailNow(t, AssertIs(len(user.Posts), 1))
throwFailNow(t, AssertIs(user.Posts[0].Title, "Formatting")) throwFailNow(t, AssertIs(user.Posts[0].Title, "Formatting"))
@ -1654,6 +1661,13 @@ func TestRawQueryRow(t *testing.T) {
throwFail(t, AssertIs(pid, nil)) throwFail(t, AssertIs(pid, nil))
} }
// user_profile table
type userProfile struct {
User
Age int
Money float64
}
func TestQueryRows(t *testing.T) { func TestQueryRows(t *testing.T) {
Q := dDbBaser.TableQuote() Q := dDbBaser.TableQuote()
@ -1724,6 +1738,19 @@ func TestQueryRows(t *testing.T) {
throwFailNow(t, AssertIs(usernames[1], "astaxie")) throwFailNow(t, AssertIs(usernames[1], "astaxie"))
throwFailNow(t, AssertIs(ids[2], 4)) throwFailNow(t, AssertIs(ids[2], 4))
throwFailNow(t, AssertIs(usernames[2], "nobody")) throwFailNow(t, AssertIs(usernames[2], "nobody"))
//test query rows by nested struct
var l []userProfile
query = fmt.Sprintf("SELECT * FROM %suser_profile%s LEFT JOIN %suser%s ON %suser_profile%s.%sid%s = %suser%s.%sid%s", Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q, Q)
num, err = dORM.Raw(query).QueryRows(&l)
throwFailNow(t, err)
throwFailNow(t, AssertIs(num, 2))
throwFailNow(t, AssertIs(len(l), 2))
throwFailNow(t, AssertIs(l[0].UserName, "slene"))
throwFailNow(t, AssertIs(l[0].Age, 28))
throwFailNow(t, AssertIs(l[1].UserName, "astaxie"))
throwFailNow(t, AssertIs(l[1].Age, 30))
} }
func TestRawValues(t *testing.T) { func TestRawValues(t *testing.T) {
@ -1976,6 +2003,7 @@ func TestReadOrCreate(t *testing.T) {
created, pk, err := dORM.ReadOrCreate(u, "UserName") created, pk, err := dORM.ReadOrCreate(u, "UserName")
throwFail(t, err) throwFail(t, err)
throwFail(t, AssertIs(created, true)) throwFail(t, AssertIs(created, true))
throwFail(t, AssertIs(u.ID, pk))
throwFail(t, AssertIs(u.UserName, "Kyle")) throwFail(t, AssertIs(u.UserName, "Kyle"))
throwFail(t, AssertIs(u.Email, "kylemcc@gmail.com")) throwFail(t, AssertIs(u.Email, "kylemcc@gmail.com"))
throwFail(t, AssertIs(u.Password, "other_pass")) throwFail(t, AssertIs(u.Password, "other_pass"))
@ -2130,13 +2158,13 @@ func TestUintPk(t *testing.T) {
Name: name, Name: name,
} }
created, pk, err := dORM.ReadOrCreate(u, "ID") created, _, err := dORM.ReadOrCreate(u, "ID")
throwFail(t, err) throwFail(t, err)
throwFail(t, AssertIs(created, true)) throwFail(t, AssertIs(created, true))
throwFail(t, AssertIs(u.Name, name)) throwFail(t, AssertIs(u.Name, name))
nu := &UintPk{ID: 8} nu := &UintPk{ID: 8}
created, pk, err = dORM.ReadOrCreate(nu, "ID") created, pk, err := dORM.ReadOrCreate(nu, "ID")
throwFail(t, err) throwFail(t, err)
throwFail(t, AssertIs(created, false)) throwFail(t, AssertIs(created, false))
throwFail(t, AssertIs(nu.ID, u.ID)) throwFail(t, AssertIs(nu.ID, u.ID))

View File

@ -92,11 +92,11 @@ func (f StrTo) Int64() (int64, error) {
i := new(big.Int) i := new(big.Int)
ni, ok := i.SetString(f.String(), 10) // octal ni, ok := i.SetString(f.String(), 10) // octal
if !ok { if !ok {
return int64(v), err return v, err
} }
return ni.Int64(), nil return ni.Int64(), nil
} }
return int64(v), err return v, err
} }
// Uint string to uint // Uint string to uint
@ -130,11 +130,11 @@ func (f StrTo) Uint64() (uint64, error) {
i := new(big.Int) i := new(big.Int)
ni, ok := i.SetString(f.String(), 10) ni, ok := i.SetString(f.String(), 10)
if !ok { if !ok {
return uint64(v), err return v, err
} }
return ni.Uint64(), nil return ni.Uint64(), nil
} }
return uint64(v), err return v, err
} }
// String string to string // String string to string
@ -225,7 +225,7 @@ func camelString(s string) string {
if d == '_' { if d == '_' {
flag = true flag = true
continue continue
} else if flag == true { } else if flag {
if d >= 'a' && d <= 'z' { if d >= 'a' && d <= 'z' {
d = d - 32 d = d - 32
} }

204
parser.go
View File

@ -24,9 +24,13 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"sort" "sort"
"strconv"
"strings" "strings"
"unicode"
"github.com/astaxie/beego/context/param"
"github.com/astaxie/beego/logs" "github.com/astaxie/beego/logs"
"github.com/astaxie/beego/utils" "github.com/astaxie/beego/utils"
) )
@ -35,6 +39,7 @@ var globalRouterTemplate = `package routers
import ( import (
"github.com/astaxie/beego" "github.com/astaxie/beego"
"github.com/astaxie/beego/context/param"
) )
func init() { func init() {
@ -81,7 +86,7 @@ func parserPkg(pkgRealpath, pkgpath string) error {
if specDecl.Recv != nil { if specDecl.Recv != nil {
exp, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr) // Check that the type is correct first beforing throwing to parser exp, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr) // Check that the type is correct first beforing throwing to parser
if ok { if ok {
parserComments(specDecl.Doc, specDecl.Name.String(), fmt.Sprint(exp.X), pkgpath) parserComments(specDecl, fmt.Sprint(exp.X), pkgpath)
} }
} }
} }
@ -93,44 +98,170 @@ func parserPkg(pkgRealpath, pkgpath string) error {
return nil return nil
} }
func parserComments(comments *ast.CommentGroup, funcName, controllerName, pkgpath string) error { type parsedComment struct {
if comments != nil && comments.List != nil { routerPath string
for _, c := range comments.List { methods []string
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) params map[string]parsedParam
if strings.HasPrefix(t, "@router") {
elements := strings.TrimLeft(t, "@router ")
e1 := strings.SplitN(elements, " ", 2)
if len(e1) < 1 {
return errors.New("you should has router information")
} }
type parsedParam struct {
name string
datatype string
location string
defValue string
required bool
}
func parserComments(f *ast.FuncDecl, controllerName, pkgpath string) error {
if f.Doc != nil {
parsedComment, err := parseComment(f.Doc.List)
if err != nil {
return err
}
if parsedComment.routerPath != "" {
key := pkgpath + ":" + controllerName key := pkgpath + ":" + controllerName
cc := ControllerComments{} cc := ControllerComments{}
cc.Method = funcName cc.Method = f.Name.String()
cc.Router = e1[0] cc.Router = parsedComment.routerPath
if len(e1) == 2 && e1[1] != "" { cc.AllowHTTPMethods = parsedComment.methods
e1 = strings.SplitN(e1[1], " ", 2) cc.MethodParams = buildMethodParams(f.Type.Params.List, parsedComment)
if len(e1) >= 1 {
cc.AllowHTTPMethods = strings.Split(strings.Trim(e1[0], "[]"), ",")
} else {
cc.AllowHTTPMethods = append(cc.AllowHTTPMethods, "get")
}
} else {
cc.AllowHTTPMethods = append(cc.AllowHTTPMethods, "get")
}
if len(e1) == 2 && e1[1] != "" {
keyval := strings.Split(strings.Trim(e1[1], "[]"), " ")
for _, kv := range keyval {
kk := strings.Split(kv, ":")
cc.Params = append(cc.Params, map[string]string{strings.Join(kk[:len(kk)-1], ":"): kk[len(kk)-1]})
}
}
genInfoList[key] = append(genInfoList[key], cc) genInfoList[key] = append(genInfoList[key], cc)
} }
}
} }
return nil return nil
} }
func buildMethodParams(funcParams []*ast.Field, pc *parsedComment) []*param.MethodParam {
result := make([]*param.MethodParam, 0, len(funcParams))
for _, fparam := range funcParams {
for _, pName := range fparam.Names {
methodParam := buildMethodParam(fparam, pName.Name, pc)
result = append(result, methodParam)
}
}
return result
}
func buildMethodParam(fparam *ast.Field, name string, pc *parsedComment) *param.MethodParam {
options := []param.MethodParamOption{}
if cparam, ok := pc.params[name]; ok {
//Build param from comment info
name = cparam.name
if cparam.required {
options = append(options, param.IsRequired)
}
switch cparam.location {
case "body":
options = append(options, param.InBody)
case "header":
options = append(options, param.InHeader)
case "path":
options = append(options, param.InPath)
}
if cparam.defValue != "" {
options = append(options, param.Default(cparam.defValue))
}
} else {
if paramInPath(name, pc.routerPath) {
options = append(options, param.InPath)
}
}
return param.New(name, options...)
}
func paramInPath(name, route string) bool {
return strings.HasSuffix(route, ":"+name) ||
strings.Contains(route, ":"+name+"/")
}
var routeRegex = regexp.MustCompile(`@router\s+(\S+)(?:\s+\[(\S+)\])?`)
func parseComment(lines []*ast.Comment) (pc *parsedComment, err error) {
pc = &parsedComment{}
for _, c := range lines {
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
if strings.HasPrefix(t, "@router") {
matches := routeRegex.FindStringSubmatch(t)
if len(matches) == 3 {
pc.routerPath = matches[1]
methods := matches[2]
if methods == "" {
pc.methods = []string{"get"}
//pc.hasGet = true
} else {
pc.methods = strings.Split(methods, ",")
//pc.hasGet = strings.Contains(methods, "get")
}
} else {
return nil, errors.New("Router information is missing")
}
} else if strings.HasPrefix(t, "@Param") {
pv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Param")))
if len(pv) < 4 {
logs.Error("Invalid @Param format. Needs at least 4 parameters")
}
p := parsedParam{}
names := strings.SplitN(pv[0], "=>", 2)
p.name = names[0]
funcParamName := p.name
if len(names) > 1 {
funcParamName = names[1]
}
p.location = pv[1]
p.datatype = pv[2]
switch len(pv) {
case 5:
p.required, _ = strconv.ParseBool(pv[3])
case 6:
p.defValue = pv[3]
p.required, _ = strconv.ParseBool(pv[4])
}
if pc.params == nil {
pc.params = map[string]parsedParam{}
}
pc.params[funcParamName] = p
}
}
return
}
// direct copy from bee\g_docs.go
// analisys params return []string
// @Param query form string true "The email for login"
// [query form string true "The email for login"]
func getparams(str string) []string {
var s []rune
var j int
var start bool
var r []string
var quoted int8
for _, c := range str {
if unicode.IsSpace(c) && quoted == 0 {
if !start {
continue
} else {
start = false
j++
r = append(r, string(s))
s = make([]rune, 0)
continue
}
}
start = true
if c == '"' {
quoted ^= 1
continue
}
s = append(s, c)
}
if len(s) > 0 {
r = append(r, string(s))
}
return r
}
func genRouterCode(pkgRealpath string) { func genRouterCode(pkgRealpath string) {
os.Mkdir(getRouterDir(pkgRealpath), 0755) os.Mkdir(getRouterDir(pkgRealpath), 0755)
logs.Info("generate router from comments") logs.Info("generate router from comments")
@ -144,6 +275,7 @@ func genRouterCode(pkgRealpath string) {
sort.Strings(sortKey) sort.Strings(sortKey)
for _, k := range sortKey { for _, k := range sortKey {
cList := genInfoList[k] cList := genInfoList[k]
sort.Sort(ControllerCommentsSlice(cList))
for _, c := range cList { for _, c := range cList {
allmethod := "nil" allmethod := "nil"
if len(c.AllowHTTPMethods) > 0 { if len(c.AllowHTTPMethods) > 0 {
@ -163,12 +295,24 @@ func genRouterCode(pkgRealpath string) {
} }
params = strings.TrimRight(params, ",") + "}" params = strings.TrimRight(params, ",") + "}"
} }
methodParams := "param.Make("
if len(c.MethodParams) > 0 {
lines := make([]string, 0, len(c.MethodParams))
for _, m := range c.MethodParams {
lines = append(lines, fmt.Sprint(m))
}
methodParams += "\n " +
strings.Join(lines, ",\n ") +
",\n "
}
methodParams += ")"
globalinfo = globalinfo + ` globalinfo = globalinfo + `
beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"], beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"],
beego.ControllerComments{ beego.ControllerComments{
Method: "` + strings.TrimSpace(c.Method) + `", Method: "` + strings.TrimSpace(c.Method) + `",
` + "Router: `" + c.Router + "`" + `, ` + "Router: `" + c.Router + "`" + `,
AllowHTTPMethods: ` + allmethod + `, AllowHTTPMethods: ` + allmethod + `,
MethodParams: ` + methodParams + `,
Params: ` + params + `}) Params: ` + params + `})
` `
} }

86
plugins/authz/authz.go Normal file
View File

@ -0,0 +1,86 @@
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package authz provides handlers to enable ACL, RBAC, ABAC authorization support.
// Simple Usage:
// import(
// "github.com/astaxie/beego"
// "github.com/astaxie/beego/plugins/authz"
// "github.com/casbin/casbin"
// )
//
// func main(){
// // mediate the access for every request
// beego.InsertFilter("*", beego.BeforeRouter, authz.NewAuthorizer(casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")))
// beego.Run()
// }
//
//
// Advanced Usage:
//
// func main(){
// e := casbin.NewEnforcer("authz_model.conf", "")
// e.AddRoleForUser("alice", "admin")
// e.AddPolicy(...)
//
// beego.InsertFilter("*", beego.BeforeRouter, authz.NewAuthorizer(e))
// beego.Run()
// }
package authz
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
"github.com/casbin/casbin"
"net/http"
)
// NewAuthorizer returns the authorizer.
// Use a casbin enforcer as input
func NewAuthorizer(e *casbin.Enforcer) beego.FilterFunc {
return func(ctx *context.Context) {
a := &BasicAuthorizer{enforcer: e}
if !a.CheckPermission(ctx.Request) {
a.RequirePermission(ctx.ResponseWriter)
}
}
}
// BasicAuthorizer stores the casbin handler
type BasicAuthorizer struct {
enforcer *casbin.Enforcer
}
// GetUserName gets the user name from the request.
// Currently, only HTTP basic authentication is supported
func (a *BasicAuthorizer) GetUserName(r *http.Request) string {
username, _, _ := r.BasicAuth()
return username
}
// CheckPermission checks the user/method/path combination from the request.
// Returns true (permission granted) or false (permission forbidden)
func (a *BasicAuthorizer) CheckPermission(r *http.Request) bool {
user := a.GetUserName(r)
method := r.Method
path := r.URL.Path
return a.enforcer.Enforce(user, path, method)
}
// RequirePermission returns the 403 Forbidden to the client
func (a *BasicAuthorizer) RequirePermission(w http.ResponseWriter) {
w.WriteHeader(403)
w.Write([]byte("403 Forbidden\n"))
}

View File

@ -0,0 +1,14 @@
[request_definition]
r = sub, obj, act
[policy_definition]
p = sub, obj, act
[role_definition]
g = _, _
[policy_effect]
e = some(where (p.eft == allow))
[matchers]
m = g(r.sub, p.sub) && keyMatch(r.obj, p.obj) && (r.act == p.act || p.act == "*")

View File

@ -0,0 +1,7 @@
p, alice, /dataset1/*, GET
p, alice, /dataset1/resource1, POST
p, bob, /dataset2/resource1, *
p, bob, /dataset2/resource2, GET
p, bob, /dataset2/folder1/*, POST
p, dataset1_admin, /dataset1/*, *
g, cathy, dataset1_admin
1 p, alice, /dataset1/*, GET
2 p, alice, /dataset1/resource1, POST
3 p, bob, /dataset2/resource1, *
4 p, bob, /dataset2/resource2, GET
5 p, bob, /dataset2/folder1/*, POST
6 p, dataset1_admin, /dataset1/*, *
7 g, cathy, dataset1_admin

107
plugins/authz/authz_test.go Normal file
View File

@ -0,0 +1,107 @@
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/context"
"github.com/astaxie/beego/plugins/auth"
"github.com/casbin/casbin"
"net/http"
"net/http/httptest"
"testing"
)
func testRequest(t *testing.T, handler *beego.ControllerRegister, user string, path string, method string, code int) {
r, _ := http.NewRequest(method, path, nil)
r.SetBasicAuth(user, "123")
w := httptest.NewRecorder()
handler.ServeHTTP(w, r)
if w.Code != code {
t.Errorf("%s, %s, %s: %d, supposed to be %d", user, path, method, w.Code, code)
}
}
func TestBasic(t *testing.T) {
handler := beego.NewControllerRegister()
handler.InsertFilter("*", beego.BeforeRouter, auth.Basic("alice", "123"))
handler.InsertFilter("*", beego.BeforeRouter, NewAuthorizer(casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")))
handler.Any("*", func(ctx *context.Context) {
ctx.Output.SetStatus(200)
})
testRequest(t, handler, "alice", "/dataset1/resource1", "GET", 200)
testRequest(t, handler, "alice", "/dataset1/resource1", "POST", 200)
testRequest(t, handler, "alice", "/dataset1/resource2", "GET", 200)
testRequest(t, handler, "alice", "/dataset1/resource2", "POST", 403)
}
func TestPathWildcard(t *testing.T) {
handler := beego.NewControllerRegister()
handler.InsertFilter("*", beego.BeforeRouter, auth.Basic("bob", "123"))
handler.InsertFilter("*", beego.BeforeRouter, NewAuthorizer(casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")))
handler.Any("*", func(ctx *context.Context) {
ctx.Output.SetStatus(200)
})
testRequest(t, handler, "bob", "/dataset2/resource1", "GET", 200)
testRequest(t, handler, "bob", "/dataset2/resource1", "POST", 200)
testRequest(t, handler, "bob", "/dataset2/resource1", "DELETE", 200)
testRequest(t, handler, "bob", "/dataset2/resource2", "GET", 200)
testRequest(t, handler, "bob", "/dataset2/resource2", "POST", 403)
testRequest(t, handler, "bob", "/dataset2/resource2", "DELETE", 403)
testRequest(t, handler, "bob", "/dataset2/folder1/item1", "GET", 403)
testRequest(t, handler, "bob", "/dataset2/folder1/item1", "POST", 200)
testRequest(t, handler, "bob", "/dataset2/folder1/item1", "DELETE", 403)
testRequest(t, handler, "bob", "/dataset2/folder1/item2", "GET", 403)
testRequest(t, handler, "bob", "/dataset2/folder1/item2", "POST", 200)
testRequest(t, handler, "bob", "/dataset2/folder1/item2", "DELETE", 403)
}
func TestRBAC(t *testing.T) {
handler := beego.NewControllerRegister()
handler.InsertFilter("*", beego.BeforeRouter, auth.Basic("cathy", "123"))
e := casbin.NewEnforcer("authz_model.conf", "authz_policy.csv")
handler.InsertFilter("*", beego.BeforeRouter, NewAuthorizer(e))
handler.Any("*", func(ctx *context.Context) {
ctx.Output.SetStatus(200)
})
// cathy can access all /dataset1/* resources via all methods because it has the dataset1_admin role.
testRequest(t, handler, "cathy", "/dataset1/item", "GET", 200)
testRequest(t, handler, "cathy", "/dataset1/item", "POST", 200)
testRequest(t, handler, "cathy", "/dataset1/item", "DELETE", 200)
testRequest(t, handler, "cathy", "/dataset2/item", "GET", 403)
testRequest(t, handler, "cathy", "/dataset2/item", "POST", 403)
testRequest(t, handler, "cathy", "/dataset2/item", "DELETE", 403)
// delete all roles on user cathy, so cathy cannot access any resources now.
e.DeleteRolesForUser("cathy")
testRequest(t, handler, "cathy", "/dataset1/item", "GET", 403)
testRequest(t, handler, "cathy", "/dataset1/item", "POST", 403)
testRequest(t, handler, "cathy", "/dataset1/item", "DELETE", 403)
testRequest(t, handler, "cathy", "/dataset2/item", "GET", 403)
testRequest(t, handler, "cathy", "/dataset2/item", "POST", 403)
testRequest(t, handler, "cathy", "/dataset2/item", "DELETE", 403)
}

View File

@ -23,7 +23,7 @@ import (
// PolicyFunc defines a policy function which is invoked before the controller handler is executed. // PolicyFunc defines a policy function which is invoked before the controller handler is executed.
type PolicyFunc func(*context.Context) type PolicyFunc func(*context.Context)
// FindRouter Find Router info for URL // FindPolicy Find Router info for URL
func (p *ControllerRegister) FindPolicy(cont *context.Context) []PolicyFunc { func (p *ControllerRegister) FindPolicy(cont *context.Context) []PolicyFunc {
var urlPath = cont.Input.URL() var urlPath = cont.Input.URL()
if !BConfig.RouterCaseSensitive { if !BConfig.RouterCaseSensitive {
@ -71,7 +71,7 @@ func (p *ControllerRegister) addToPolicy(method, pattern string, r ...PolicyFunc
} }
} }
// Register new policy in beego // Policy Register new policy in beego
func Policy(pattern, method string, policy ...PolicyFunc) { func Policy(pattern, method string, policy ...PolicyFunc) {
BeeApp.Handlers.addToPolicy(method, pattern, policy...) BeeApp.Handlers.addToPolicy(method, pattern, policy...)
} }

101
router.go
View File

@ -17,7 +17,6 @@ package beego
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"os"
"path" "path"
"path/filepath" "path/filepath"
"reflect" "reflect"
@ -28,6 +27,7 @@ import (
"time" "time"
beecontext "github.com/astaxie/beego/context" beecontext "github.com/astaxie/beego/context"
"github.com/astaxie/beego/context/param"
"github.com/astaxie/beego/logs" "github.com/astaxie/beego/logs"
"github.com/astaxie/beego/toolbox" "github.com/astaxie/beego/toolbox"
"github.com/astaxie/beego/utils" "github.com/astaxie/beego/utils"
@ -109,13 +109,15 @@ func ExceptMethodAppend(action string) {
exceptMethod = append(exceptMethod, action) exceptMethod = append(exceptMethod, action)
} }
type controllerInfo struct { // ControllerInfo holds information about the controller.
type ControllerInfo struct {
pattern string pattern string
controllerType reflect.Type controllerType reflect.Type
methods map[string]string methods map[string]string
handler http.Handler handler http.Handler
runFunction FilterFunc runFunction FilterFunc
routerType int routerType int
methodParams []*param.MethodParam
} }
// ControllerRegister containers registered router rules, controller handlers and filters. // ControllerRegister containers registered router rules, controller handlers and filters.
@ -151,6 +153,10 @@ func NewControllerRegister() *ControllerRegister {
// Add("/api",&RestController{},"get,post:ApiFunc" // Add("/api",&RestController{},"get,post:ApiFunc"
// Add("/simple",&SimpleController{},"get:GetFunc;post:PostFunc") // Add("/simple",&SimpleController{},"get:GetFunc;post:PostFunc")
func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingMethods ...string) { func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingMethods ...string) {
p.addWithMethodParams(pattern, c, nil, mappingMethods...)
}
func (p *ControllerRegister) addWithMethodParams(pattern string, c ControllerInterface, methodParams []*param.MethodParam, mappingMethods ...string) {
reflectVal := reflect.ValueOf(c) reflectVal := reflect.ValueOf(c)
t := reflect.Indirect(reflectVal).Type() t := reflect.Indirect(reflectVal).Type()
methods := make(map[string]string) methods := make(map[string]string)
@ -176,11 +182,12 @@ func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingM
} }
} }
route := &controllerInfo{} route := &ControllerInfo{}
route.pattern = pattern route.pattern = pattern
route.methods = methods route.methods = methods
route.routerType = routerTypeBeego route.routerType = routerTypeBeego
route.controllerType = t route.controllerType = t
route.methodParams = methodParams
if len(methods) == 0 { if len(methods) == 0 {
for _, m := range HTTPMETHOD { for _, m := range HTTPMETHOD {
p.addToRouter(m, pattern, route) p.addToRouter(m, pattern, route)
@ -198,7 +205,7 @@ func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingM
} }
} }
func (p *ControllerRegister) addToRouter(method, pattern string, r *controllerInfo) { func (p *ControllerRegister) addToRouter(method, pattern string, r *ControllerInfo) {
if !BConfig.RouterCaseSensitive { if !BConfig.RouterCaseSensitive {
pattern = strings.ToLower(pattern) pattern = strings.ToLower(pattern)
} }
@ -219,13 +226,11 @@ func (p *ControllerRegister) Include(cList ...ControllerInterface) {
for _, c := range cList { for _, c := range cList {
reflectVal := reflect.ValueOf(c) reflectVal := reflect.ValueOf(c)
t := reflect.Indirect(reflectVal).Type() t := reflect.Indirect(reflectVal).Type()
gopath := os.Getenv("GOPATH") wgopath := utils.GetGOPATHs()
if gopath == "" { if len(wgopath) == 0 {
panic("you are in dev mode. So please set gopath") panic("you are in dev mode. So please set gopath")
} }
pkgpath := "" pkgpath := ""
wgopath := filepath.SplitList(gopath)
for _, wg := range wgopath { for _, wg := range wgopath {
wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath())) wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath()))
if utils.FileExists(wg) { if utils.FileExists(wg) {
@ -247,7 +252,7 @@ func (p *ControllerRegister) Include(cList ...ControllerInterface) {
key := t.PkgPath() + ":" + t.Name() key := t.PkgPath() + ":" + t.Name()
if comm, ok := GlobalControllerRouter[key]; ok { if comm, ok := GlobalControllerRouter[key]; ok {
for _, a := range comm { for _, a := range comm {
p.Add(a.Router, c, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method) p.addWithMethodParams(a.Router, c, a.MethodParams, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method)
} }
} }
} }
@ -335,7 +340,7 @@ func (p *ControllerRegister) AddMethod(method, pattern string, f FilterFunc) {
if _, ok := HTTPMETHOD[method]; method != "*" && !ok { if _, ok := HTTPMETHOD[method]; method != "*" && !ok {
panic("not support http method: " + method) panic("not support http method: " + method)
} }
route := &controllerInfo{} route := &ControllerInfo{}
route.pattern = pattern route.pattern = pattern
route.routerType = routerTypeRESTFul route.routerType = routerTypeRESTFul
route.runFunction = f route.runFunction = f
@ -361,7 +366,7 @@ func (p *ControllerRegister) AddMethod(method, pattern string, f FilterFunc) {
// Handler add user defined Handler // Handler add user defined Handler
func (p *ControllerRegister) Handler(pattern string, h http.Handler, options ...interface{}) { func (p *ControllerRegister) Handler(pattern string, h http.Handler, options ...interface{}) {
route := &controllerInfo{} route := &ControllerInfo{}
route.pattern = pattern route.pattern = pattern
route.routerType = routerTypeHandler route.routerType = routerTypeHandler
route.handler = h route.handler = h
@ -396,7 +401,7 @@ func (p *ControllerRegister) AddAutoPrefix(prefix string, c ControllerInterface)
controllerName := strings.TrimSuffix(ct.Name(), "Controller") controllerName := strings.TrimSuffix(ct.Name(), "Controller")
for i := 0; i < rt.NumMethod(); i++ { for i := 0; i < rt.NumMethod(); i++ {
if !utils.InSlice(rt.Method(i).Name, exceptMethod) { if !utils.InSlice(rt.Method(i).Name, exceptMethod) {
route := &controllerInfo{} route := &ControllerInfo{}
route.routerType = routerTypeBeego route.routerType = routerTypeBeego
route.methods = map[string]string{"*": rt.Method(i).Name} route.methods = map[string]string{"*": rt.Method(i).Name}
route.controllerType = ct route.controllerType = ct
@ -502,7 +507,7 @@ func (p *ControllerRegister) geturl(t *Tree, url, controllName, methodName strin
} }
} }
for _, l := range t.leaves { for _, l := range t.leaves {
if c, ok := l.runObject.(*controllerInfo); ok { if c, ok := l.runObject.(*ControllerInfo); ok {
if c.routerType == routerTypeBeego && if c.routerType == routerTypeBeego &&
strings.HasSuffix(path.Join(c.controllerType.PkgPath(), c.controllerType.Name()), controllName) { strings.HasSuffix(path.Join(c.controllerType.PkgPath(), c.controllerType.Name()), controllName) {
find := false find := false
@ -629,7 +634,8 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
runRouter reflect.Type runRouter reflect.Type
findRouter bool findRouter bool
runMethod string runMethod string
routerInfo *controllerInfo methodParams []*param.MethodParam
routerInfo *ControllerInfo
isRunnable bool isRunnable bool
) )
context := p.pool.Get().(*beecontext.Context) context := p.pool.Get().(*beecontext.Context)
@ -670,7 +676,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
goto Admin goto Admin
} }
if r.Method != "GET" && r.Method != "HEAD" { if r.Method != http.MethodGet && r.Method != http.MethodHead {
if BConfig.CopyRequestBody && !context.Input.IsUpload() { if BConfig.CopyRequestBody && !context.Input.IsUpload() {
context.Input.CopyBody(BConfig.MaxMemory) context.Input.CopyBody(BConfig.MaxMemory)
} }
@ -698,7 +704,6 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
// User can define RunController and RunMethod in filter // User can define RunController and RunMethod in filter
if context.Input.RunController != nil && context.Input.RunMethod != "" { if context.Input.RunController != nil && context.Input.RunMethod != "" {
findRouter = true findRouter = true
isRunnable = true
runMethod = context.Input.RunMethod runMethod = context.Input.RunMethod
runRouter = context.Input.RunController runRouter = context.Input.RunController
} else { } else {
@ -742,12 +747,13 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
routerInfo.handler.ServeHTTP(rw, r) routerInfo.handler.ServeHTTP(rw, r)
} else { } else {
runRouter = routerInfo.controllerType runRouter = routerInfo.controllerType
methodParams = routerInfo.methodParams
method := r.Method method := r.Method
if r.Method == "POST" && context.Input.Query("_method") == "PUT" { if r.Method == http.MethodPost && context.Input.Query("_method") == http.MethodPost {
method = "PUT" method = http.MethodPut
} }
if r.Method == "POST" && context.Input.Query("_method") == "DELETE" { if r.Method == http.MethodPost && context.Input.Query("_method") == http.MethodDelete {
method = "DELETE" method = http.MethodDelete
} }
if m, ok := routerInfo.methods[method]; ok { if m, ok := routerInfo.methods[method]; ok {
runMethod = m runMethod = m
@ -777,8 +783,8 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
//if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf //if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf
if BConfig.WebConfig.EnableXSRF { if BConfig.WebConfig.EnableXSRF {
execController.XSRFToken() execController.XSRFToken()
if r.Method == "POST" || r.Method == "DELETE" || r.Method == "PUT" || if r.Method == http.MethodPost || r.Method == http.MethodDelete || r.Method == http.MethodPut ||
(r.Method == "POST" && (context.Input.Query("_method") == "DELETE" || context.Input.Query("_method") == "PUT")) { (r.Method == http.MethodPost && (context.Input.Query("_method") == http.MethodDelete || context.Input.Query("_method") == http.MethodPut)) {
execController.CheckXSRFCookie() execController.CheckXSRFCookie()
} }
} }
@ -788,25 +794,30 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
if !context.ResponseWriter.Started { if !context.ResponseWriter.Started {
//exec main logic //exec main logic
switch runMethod { switch runMethod {
case "GET": case http.MethodGet:
execController.Get() execController.Get()
case "POST": case http.MethodPost:
execController.Post() execController.Post()
case "DELETE": case http.MethodDelete:
execController.Delete() execController.Delete()
case "PUT": case http.MethodPut:
execController.Put() execController.Put()
case "HEAD": case http.MethodHead:
execController.Head() execController.Head()
case "PATCH": case http.MethodPatch:
execController.Patch() execController.Patch()
case "OPTIONS": case http.MethodOptions:
execController.Options() execController.Options()
default: default:
if !execController.HandlerFunc(runMethod) { if !execController.HandlerFunc(runMethod) {
var in []reflect.Value
method := vc.MethodByName(runMethod) method := vc.MethodByName(runMethod)
method.Call(in) in := param.ConvertParams(methodParams, method.Type(), context)
out := method.Call(in)
//For backward compatibility we only handle response if we had incoming methodParams
if methodParams != nil {
p.handleParamResponse(context, execController, out)
}
} }
} }
@ -837,7 +848,15 @@ Admin:
//admin module record QPS //admin module record QPS
if BConfig.Listen.EnableAdmin { if BConfig.Listen.EnableAdmin {
timeDur := time.Since(startTime) timeDur := time.Since(startTime)
if FilterMonitorFunc(r.Method, r.URL.Path, timeDur) { pattern := ""
if routerInfo != nil {
pattern = routerInfo.pattern
}
statusCode := context.ResponseWriter.Status
if statusCode == 0 {
statusCode = 200
}
if FilterMonitorFunc(r.Method, r.URL.Path, timeDur, pattern, statusCode) {
if runRouter != nil { if runRouter != nil {
go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, runRouter.Name(), timeDur) go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, runRouter.Name(), timeDur)
} else { } else {
@ -886,8 +905,22 @@ Admin:
} }
} }
func (p *ControllerRegister) handleParamResponse(context *beecontext.Context, execController ControllerInterface, results []reflect.Value) {
//looping in reverse order for the case when both error and value are returned and error sets the response status code
for i := len(results) - 1; i >= 0; i-- {
result := results[i]
if result.Kind() != reflect.Interface || !result.IsNil() {
resultValue := result.Interface()
context.RenderMethodResult(resultValue)
}
}
if !context.ResponseWriter.Started && context.Output.Status == 0 {
context.Output.SetStatus(200)
}
}
// FindRouter Find Router info for URL // FindRouter Find Router info for URL
func (p *ControllerRegister) FindRouter(context *beecontext.Context) (routerInfo *controllerInfo, isFind bool) { func (p *ControllerRegister) FindRouter(context *beecontext.Context) (routerInfo *ControllerInfo, isFind bool) {
var urlPath = context.Input.URL() var urlPath = context.Input.URL()
if !BConfig.RouterCaseSensitive { if !BConfig.RouterCaseSensitive {
urlPath = strings.ToLower(urlPath) urlPath = strings.ToLower(urlPath)
@ -895,7 +928,7 @@ func (p *ControllerRegister) FindRouter(context *beecontext.Context) (routerInfo
httpMethod := context.Input.Method() httpMethod := context.Input.Method()
if t, ok := p.routers[httpMethod]; ok { if t, ok := p.routers[httpMethod]; ok {
runObject := t.Match(urlPath, context) runObject := t.Match(urlPath, context)
if r, ok := runObject.(*controllerInfo); ok { if r, ok := runObject.(*ControllerInfo); ok {
return r, true return r, true
} }
} }

View File

@ -502,10 +502,10 @@ func TestFilterBeforeRouter(t *testing.T) {
rw, r := testRequest("GET", url) rw, r := testRequest("GET", url)
mux.ServeHTTP(rw, r) mux.ServeHTTP(rw, r)
if strings.Contains(rw.Body.String(), "BeforeRouter1") == false { if !strings.Contains(rw.Body.String(), "BeforeRouter1") {
t.Errorf(testName + " BeforeRouter did not run") t.Errorf(testName + " BeforeRouter did not run")
} }
if strings.Contains(rw.Body.String(), "hello") == true { if strings.Contains(rw.Body.String(), "hello") {
t.Errorf(testName + " BeforeRouter did not return properly") t.Errorf(testName + " BeforeRouter did not return properly")
} }
} }
@ -525,13 +525,13 @@ func TestFilterBeforeExec(t *testing.T) {
rw, r := testRequest("GET", url) rw, r := testRequest("GET", url)
mux.ServeHTTP(rw, r) mux.ServeHTTP(rw, r)
if strings.Contains(rw.Body.String(), "BeforeExec1") == false { if !strings.Contains(rw.Body.String(), "BeforeExec1") {
t.Errorf(testName + " BeforeExec did not run") t.Errorf(testName + " BeforeExec did not run")
} }
if strings.Contains(rw.Body.String(), "hello") == true { if strings.Contains(rw.Body.String(), "hello") {
t.Errorf(testName + " BeforeExec did not return properly") t.Errorf(testName + " BeforeExec did not return properly")
} }
if strings.Contains(rw.Body.String(), "BeforeRouter") == true { if strings.Contains(rw.Body.String(), "BeforeRouter") {
t.Errorf(testName + " BeforeRouter ran in error") t.Errorf(testName + " BeforeRouter ran in error")
} }
} }
@ -552,16 +552,16 @@ func TestFilterAfterExec(t *testing.T) {
rw, r := testRequest("GET", url) rw, r := testRequest("GET", url)
mux.ServeHTTP(rw, r) mux.ServeHTTP(rw, r)
if strings.Contains(rw.Body.String(), "AfterExec1") == false { if !strings.Contains(rw.Body.String(), "AfterExec1") {
t.Errorf(testName + " AfterExec did not run") t.Errorf(testName + " AfterExec did not run")
} }
if strings.Contains(rw.Body.String(), "hello") == false { if !strings.Contains(rw.Body.String(), "hello") {
t.Errorf(testName + " handler did not run properly") t.Errorf(testName + " handler did not run properly")
} }
if strings.Contains(rw.Body.String(), "BeforeRouter") == true { if strings.Contains(rw.Body.String(), "BeforeRouter") {
t.Errorf(testName + " BeforeRouter ran in error") t.Errorf(testName + " BeforeRouter ran in error")
} }
if strings.Contains(rw.Body.String(), "BeforeExec") == true { if strings.Contains(rw.Body.String(), "BeforeExec") {
t.Errorf(testName + " BeforeExec ran in error") t.Errorf(testName + " BeforeExec ran in error")
} }
} }
@ -583,19 +583,19 @@ func TestFilterFinishRouter(t *testing.T) {
rw, r := testRequest("GET", url) rw, r := testRequest("GET", url)
mux.ServeHTTP(rw, r) mux.ServeHTTP(rw, r)
if strings.Contains(rw.Body.String(), "FinishRouter1") == true { if strings.Contains(rw.Body.String(), "FinishRouter1") {
t.Errorf(testName + " FinishRouter did not run") t.Errorf(testName + " FinishRouter did not run")
} }
if strings.Contains(rw.Body.String(), "hello") == false { if !strings.Contains(rw.Body.String(), "hello") {
t.Errorf(testName + " handler did not run properly") t.Errorf(testName + " handler did not run properly")
} }
if strings.Contains(rw.Body.String(), "AfterExec1") == true { if strings.Contains(rw.Body.String(), "AfterExec1") {
t.Errorf(testName + " AfterExec ran in error") t.Errorf(testName + " AfterExec ran in error")
} }
if strings.Contains(rw.Body.String(), "BeforeRouter") == true { if strings.Contains(rw.Body.String(), "BeforeRouter") {
t.Errorf(testName + " BeforeRouter ran in error") t.Errorf(testName + " BeforeRouter ran in error")
} }
if strings.Contains(rw.Body.String(), "BeforeExec") == true { if strings.Contains(rw.Body.String(), "BeforeExec") {
t.Errorf(testName + " BeforeExec ran in error") t.Errorf(testName + " BeforeExec ran in error")
} }
} }
@ -615,14 +615,14 @@ func TestFilterFinishRouterMultiFirstOnly(t *testing.T) {
rw, r := testRequest("GET", url) rw, r := testRequest("GET", url)
mux.ServeHTTP(rw, r) mux.ServeHTTP(rw, r)
if strings.Contains(rw.Body.String(), "FinishRouter1") == false { if !strings.Contains(rw.Body.String(), "FinishRouter1") {
t.Errorf(testName + " FinishRouter1 did not run") t.Errorf(testName + " FinishRouter1 did not run")
} }
if strings.Contains(rw.Body.String(), "hello") == false { if !strings.Contains(rw.Body.String(), "hello") {
t.Errorf(testName + " handler did not run properly") t.Errorf(testName + " handler did not run properly")
} }
// not expected in body // not expected in body
if strings.Contains(rw.Body.String(), "FinishRouter2") == true { if strings.Contains(rw.Body.String(), "FinishRouter2") {
t.Errorf(testName + " FinishRouter2 did run") t.Errorf(testName + " FinishRouter2 did run")
} }
} }
@ -642,44 +642,52 @@ func TestFilterFinishRouterMulti(t *testing.T) {
rw, r := testRequest("GET", url) rw, r := testRequest("GET", url)
mux.ServeHTTP(rw, r) mux.ServeHTTP(rw, r)
if strings.Contains(rw.Body.String(), "FinishRouter1") == false { if !strings.Contains(rw.Body.String(), "FinishRouter1") {
t.Errorf(testName + " FinishRouter1 did not run") t.Errorf(testName + " FinishRouter1 did not run")
} }
if strings.Contains(rw.Body.String(), "hello") == false { if !strings.Contains(rw.Body.String(), "hello") {
t.Errorf(testName + " handler did not run properly") t.Errorf(testName + " handler did not run properly")
} }
if strings.Contains(rw.Body.String(), "FinishRouter2") == false { if !strings.Contains(rw.Body.String(), "FinishRouter2") {
t.Errorf(testName + " FinishRouter2 did not run properly") t.Errorf(testName + " FinishRouter2 did not run properly")
} }
} }
func beegoFilterNoOutput(ctx *context.Context) { func beegoFilterNoOutput(ctx *context.Context) {
return
} }
func beegoBeforeRouter1(ctx *context.Context) { func beegoBeforeRouter1(ctx *context.Context) {
ctx.WriteString("|BeforeRouter1") ctx.WriteString("|BeforeRouter1")
} }
func beegoBeforeRouter2(ctx *context.Context) { func beegoBeforeRouter2(ctx *context.Context) {
ctx.WriteString("|BeforeRouter2") ctx.WriteString("|BeforeRouter2")
} }
func beegoBeforeExec1(ctx *context.Context) { func beegoBeforeExec1(ctx *context.Context) {
ctx.WriteString("|BeforeExec1") ctx.WriteString("|BeforeExec1")
} }
func beegoBeforeExec2(ctx *context.Context) { func beegoBeforeExec2(ctx *context.Context) {
ctx.WriteString("|BeforeExec2") ctx.WriteString("|BeforeExec2")
} }
func beegoAfterExec1(ctx *context.Context) { func beegoAfterExec1(ctx *context.Context) {
ctx.WriteString("|AfterExec1") ctx.WriteString("|AfterExec1")
} }
func beegoAfterExec2(ctx *context.Context) { func beegoAfterExec2(ctx *context.Context) {
ctx.WriteString("|AfterExec2") ctx.WriteString("|AfterExec2")
} }
func beegoFinishRouter1(ctx *context.Context) { func beegoFinishRouter1(ctx *context.Context) {
ctx.WriteString("|FinishRouter1") ctx.WriteString("|FinishRouter1")
} }
func beegoFinishRouter2(ctx *context.Context) { func beegoFinishRouter2(ctx *context.Context) {
ctx.WriteString("|FinishRouter2") ctx.WriteString("|FinishRouter2")
} }
func beegoResetParams(ctx *context.Context) { func beegoResetParams(ctx *context.Context) {
ctx.ResponseWriter.Header().Set("splat", ctx.Input.Param(":splat")) ctx.ResponseWriter.Header().Set("splat", ctx.Input.Param(":splat"))
} }

View File

@ -155,11 +155,16 @@ func (cp *Provider) SessionInit(maxlifetime int64, savePath string) error {
func (cp *Provider) SessionRead(sid string) (session.Store, error) { func (cp *Provider) SessionRead(sid string) (session.Store, error) {
cp.b = cp.getBucket() cp.b = cp.getBucket()
var doc []byte var (
kv map[interface{}]interface{}
err error
doc []byte
)
err := cp.b.Get(sid, &doc) err = cp.b.Get(sid, &doc)
var kv map[interface{}]interface{} if err != nil {
if doc == nil { return nil, err
} else if doc == nil {
kv = make(map[interface{}]interface{}) kv = make(map[interface{}]interface{})
} else { } else {
kv, err = session.DecodeGob(doc) kv, err = session.DecodeGob(doc)
@ -230,7 +235,6 @@ func (cp *Provider) SessionDestroy(sid string) error {
// SessionGC Recycle // SessionGC Recycle
func (cp *Provider) SessionGC() { func (cp *Provider) SessionGC() {
return
} }
// SessionAll return all active session // SessionAll return all active session

View File

@ -12,8 +12,10 @@ import (
"github.com/siddontang/ledisdb/ledis" "github.com/siddontang/ledisdb/ledis"
) )
var ledispder = &Provider{} var (
var c *ledis.DB ledispder = &Provider{}
c *ledis.DB
)
// SessionStore ledis session store // SessionStore ledis session store
type SessionStore struct { type SessionStore struct {
@ -97,27 +99,33 @@ func (lp *Provider) SessionInit(maxlifetime int64, savePath string) error {
} }
cfg := new(config.Config) cfg := new(config.Config)
cfg.DataDir = lp.savePath cfg.DataDir = lp.savePath
nowLedis, err := ledis.Open(cfg)
c, err = nowLedis.Select(lp.db) var ledisInstance *ledis.Ledis
ledisInstance, err = ledis.Open(cfg)
if err != nil { if err != nil {
println(err) return err
return nil
} }
return nil c, err = ledisInstance.Select(lp.db)
return err
} }
// SessionRead read ledis session by sid // SessionRead read ledis session by sid
func (lp *Provider) SessionRead(sid string) (session.Store, error) { func (lp *Provider) SessionRead(sid string) (session.Store, error) {
kvs, err := c.Get([]byte(sid)) var (
var kv map[interface{}]interface{} kv map[interface{}]interface{}
err error
)
kvs, _ := c.Get([]byte(sid))
if len(kvs) == 0 { if len(kvs) == 0 {
kv = make(map[interface{}]interface{}) kv = make(map[interface{}]interface{})
} else { } else {
kv, err = session.DecodeGob(kvs) if kv, err = session.DecodeGob(kvs); err != nil {
if err != nil {
return nil, err return nil, err
} }
} }
ls := &SessionStore{sid: sid, values: kv, maxlifetime: lp.maxlifetime} ls := &SessionStore{sid: sid, values: kv, maxlifetime: lp.maxlifetime}
return ls, nil return ls, nil
} }
@ -125,10 +133,7 @@ func (lp *Provider) SessionRead(sid string) (session.Store, error) {
// SessionExist check ledis session exist by sid // SessionExist check ledis session exist by sid
func (lp *Provider) SessionExist(sid string) bool { func (lp *Provider) SessionExist(sid string) bool {
count, _ := c.Exists([]byte(sid)) count, _ := c.Exists([]byte(sid))
if count == 0 { return !(count == 0)
return false
}
return true
} }
// SessionRegenerate generate new sid for ledis session // SessionRegenerate generate new sid for ledis session
@ -145,18 +150,7 @@ func (lp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error)
c.Set([]byte(sid), data) c.Set([]byte(sid), data)
c.Expire([]byte(sid), lp.maxlifetime) c.Expire([]byte(sid), lp.maxlifetime)
} }
kvs, err := c.Get([]byte(sid)) return lp.SessionRead(sid)
var kv map[interface{}]interface{}
if len(kvs) == 0 {
kv = make(map[interface{}]interface{})
} else {
kv, err = session.DecodeGob([]byte(kvs))
if err != nil {
return nil, err
}
}
ls := &SessionStore{sid: sid, values: kv, maxlifetime: lp.maxlifetime}
return ls, nil
} }
// SessionDestroy delete ledis session by id // SessionDestroy delete ledis session by id
@ -167,7 +161,6 @@ func (lp *Provider) SessionDestroy(sid string) error {
// SessionGC Impelment method, no used. // SessionGC Impelment method, no used.
func (lp *Provider) SessionGC() { func (lp *Provider) SessionGC() {
return
} }
// SessionAll return all active session // SessionAll return all active session

View File

@ -205,11 +205,7 @@ func (rp *MemProvider) SessionDestroy(sid string) error {
} }
} }
err := client.Delete(sid) return client.Delete(sid)
if err != nil {
return err
}
return nil
} }
func (rp *MemProvider) connectInit() error { func (rp *MemProvider) connectInit() error {
@ -219,7 +215,6 @@ func (rp *MemProvider) connectInit() error {
// SessionGC Impelment method, no used. // SessionGC Impelment method, no used.
func (rp *MemProvider) SessionGC() { func (rp *MemProvider) SessionGC() {
return
} }
// SessionAll return all activeSession // SessionAll return all activeSession

View File

@ -143,7 +143,6 @@ func (mp *Provider) SessionInit(maxlifetime int64, savePath string) error {
// SessionRead get mysql session by sid // SessionRead get mysql session by sid
func (mp *Provider) SessionRead(sid string) (session.Store, error) { func (mp *Provider) SessionRead(sid string) (session.Store, error) {
c := mp.connectInit() c := mp.connectInit()
defer c.Close()
row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid) row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid)
var sessiondata []byte var sessiondata []byte
err := row.Scan(&sessiondata) err := row.Scan(&sessiondata)
@ -171,16 +170,12 @@ func (mp *Provider) SessionExist(sid string) bool {
row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid) row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid)
var sessiondata []byte var sessiondata []byte
err := row.Scan(&sessiondata) err := row.Scan(&sessiondata)
if err == sql.ErrNoRows { return !(err == sql.ErrNoRows)
return false
}
return true
} }
// SessionRegenerate generate new sid for mysql session // SessionRegenerate generate new sid for mysql session
func (mp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { func (mp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
c := mp.connectInit() c := mp.connectInit()
defer c.Close()
row := c.QueryRow("select session_data from "+TableName+" where session_key=?", oldsid) row := c.QueryRow("select session_data from "+TableName+" where session_key=?", oldsid)
var sessiondata []byte var sessiondata []byte
err := row.Scan(&sessiondata) err := row.Scan(&sessiondata)
@ -214,7 +209,6 @@ func (mp *Provider) SessionGC() {
c := mp.connectInit() c := mp.connectInit()
c.Exec("DELETE from "+TableName+" where session_expiry < ?", time.Now().Unix()-mp.maxlifetime) c.Exec("DELETE from "+TableName+" where session_expiry < ?", time.Now().Unix()-mp.maxlifetime)
c.Close() c.Close()
return
} }
// SessionAll count values in mysql session // SessionAll count values in mysql session

View File

@ -184,11 +184,7 @@ func (mp *Provider) SessionExist(sid string) bool {
row := c.QueryRow("select session_data from session where session_key=$1", sid) row := c.QueryRow("select session_data from session where session_key=$1", sid)
var sessiondata []byte var sessiondata []byte
err := row.Scan(&sessiondata) err := row.Scan(&sessiondata)
return !(err == sql.ErrNoRows)
if err == sql.ErrNoRows {
return false
}
return true
} }
// SessionRegenerate generate new sid for postgresql session // SessionRegenerate generate new sid for postgresql session
@ -228,7 +224,6 @@ func (mp *Provider) SessionGC() {
c := mp.connectInit() c := mp.connectInit()
c.Exec("DELETE from session where EXTRACT(EPOCH FROM (current_timestamp - session_expiry)) > $1", mp.maxlifetime) c.Exec("DELETE from session where EXTRACT(EPOCH FROM (current_timestamp - session_expiry)) > $1", mp.maxlifetime)
c.Close() c.Close()
return
} }
// SessionAll count values in postgresql session // SessionAll count values in postgresql session

View File

@ -128,7 +128,7 @@ func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
} }
if len(configs) > 1 { if len(configs) > 1 {
poolsize, err := strconv.Atoi(configs[1]) poolsize, err := strconv.Atoi(configs[1])
if err != nil || poolsize <= 0 { if err != nil || poolsize < 0 {
rp.poolsize = MaxPoolSize rp.poolsize = MaxPoolSize
} else { } else {
rp.poolsize = poolsize rp.poolsize = poolsize
@ -155,7 +155,7 @@ func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
return nil, err return nil, err
} }
if rp.password != "" { if rp.password != "" {
if _, err := c.Do("AUTH", rp.password); err != nil { if _, err = c.Do("AUTH", rp.password); err != nil {
c.Close() c.Close()
return nil, err return nil, err
} }
@ -176,13 +176,16 @@ func (rp *Provider) SessionRead(sid string) (session.Store, error) {
c := rp.poollist.Get() c := rp.poollist.Get()
defer c.Close() defer c.Close()
kvs, err := redis.String(c.Do("GET", sid))
var kv map[interface{}]interface{} var kv map[interface{}]interface{}
kvs, err := redis.String(c.Do("GET", sid))
if err != nil && err != redis.ErrNil {
return nil, err
}
if len(kvs) == 0 { if len(kvs) == 0 {
kv = make(map[interface{}]interface{}) kv = make(map[interface{}]interface{})
} else { } else {
kv, err = session.DecodeGob([]byte(kvs)) if kv, err = session.DecodeGob([]byte(kvs)); err != nil {
if err != nil {
return nil, err return nil, err
} }
} }
@ -216,20 +219,7 @@ func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error)
c.Do("RENAME", oldsid, sid) c.Do("RENAME", oldsid, sid)
c.Do("EXPIRE", sid, rp.maxlifetime) c.Do("EXPIRE", sid, rp.maxlifetime)
} }
return rp.SessionRead(sid)
kvs, err := redis.String(c.Do("GET", sid))
var kv map[interface{}]interface{}
if len(kvs) == 0 {
kv = make(map[interface{}]interface{})
} else {
kv, err = session.DecodeGob([]byte(kvs))
if err != nil {
return nil, err
}
}
rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime}
return rs, nil
} }
// SessionDestroy delete redis session by id // SessionDestroy delete redis session by id
@ -243,7 +233,6 @@ func (rp *Provider) SessionDestroy(sid string) error {
// SessionGC Impelment method, no used. // SessionGC Impelment method, no used.
func (rp *Provider) SessionGC() { func (rp *Provider) SessionGC() {
return
} }
// SessionAll return all activeSession // SessionAll return all activeSession

View File

@ -74,21 +74,16 @@ func (st *CookieSessionStore) SessionID() string {
// SessionRelease Write cookie session to http response cookie // SessionRelease Write cookie session to http response cookie
func (st *CookieSessionStore) SessionRelease(w http.ResponseWriter) { func (st *CookieSessionStore) SessionRelease(w http.ResponseWriter) {
str, err := encodeCookie(cookiepder.block, encodedCookie, err := encodeCookie(cookiepder.block, cookiepder.config.SecurityKey, cookiepder.config.SecurityName, st.values)
cookiepder.config.SecurityKey, if err == nil {
cookiepder.config.SecurityName,
st.values)
if err != nil {
return
}
cookie := &http.Cookie{Name: cookiepder.config.CookieName, cookie := &http.Cookie{Name: cookiepder.config.CookieName,
Value: url.QueryEscape(str), Value: url.QueryEscape(encodedCookie),
Path: "/", Path: "/",
HttpOnly: true, HttpOnly: true,
Secure: cookiepder.config.Secure, Secure: cookiepder.config.Secure,
MaxAge: cookiepder.config.Maxage} MaxAge: cookiepder.config.Maxage}
http.SetCookie(w, cookie) http.SetCookie(w, cookie)
return }
} }
type cookieConfig struct { type cookieConfig struct {
@ -166,7 +161,6 @@ func (pder *CookieProvider) SessionDestroy(sid string) error {
// SessionGC Implement method, no used. // SessionGC Implement method, no used.
func (pder *CookieProvider) SessionGC() { func (pder *CookieProvider) SessionGC() {
return
} }
// SessionAll Implement method, return 0. // SessionAll Implement method, return 0.

View File

@ -87,9 +87,16 @@ func (fs *FileSessionStore) SessionRelease(w http.ResponseWriter) {
var f *os.File var f *os.File
if err == nil { if err == nil {
f, err = os.OpenFile(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid), os.O_RDWR, 0777) f, err = os.OpenFile(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid), os.O_RDWR, 0777)
if err != nil {
SLogger.Println(err)
return
}
} else if os.IsNotExist(err) { } else if os.IsNotExist(err) {
f, err = os.Create(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid)) f, err = os.Create(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid))
if err != nil {
SLogger.Println(err)
return
}
} else { } else {
return return
} }
@ -163,10 +170,7 @@ func (fp *FileProvider) SessionExist(sid string) bool {
defer filepder.lock.Unlock() defer filepder.lock.Unlock()
_, err := os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) _, err := os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid))
if err == nil { return err == nil
return true
}
return false
} }
// SessionDestroy Remove all files in this save path // SessionDestroy Remove all files in this save path

View File

@ -74,8 +74,7 @@ func TestCookieEncodeDecode(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("encodeCookie:", err) t.Fatal("encodeCookie:", err)
} }
dst := make(map[interface{}]interface{}) dst, err := decodeCookie(block, hashKey, securityName, str, 3600)
dst, err = decodeCookie(block, hashKey, securityName, str, 3600)
if err != nil { if err != nil {
t.Fatal("decodeCookie", err) t.Fatal("decodeCookie", err)
} }
@ -115,7 +114,7 @@ func TestParseConfig(t *testing.T) {
if cf2.Gclifetime != 3600 { if cf2.Gclifetime != 3600 {
t.Fatal("parseconfig get gclifetime error") t.Fatal("parseconfig get gclifetime error")
} }
if cf2.EnableSetCookie != false { if cf2.EnableSetCookie {
t.Fatal("parseconfig get enableSetCookie error") t.Fatal("parseconfig get enableSetCookie error")
} }
cconfig := new(cookieConfig) cconfig := new(cookieConfig)

View File

@ -81,6 +81,7 @@ func Register(name string, provide Provider) {
provides[name] = provide provides[name] = provide
} }
// ManagerConfig define the session config
type ManagerConfig struct { type ManagerConfig struct {
CookieName string `json:"cookieName"` CookieName string `json:"cookieName"`
EnableSetCookie bool `json:"enableSetCookie,omitempty"` EnableSetCookie bool `json:"enableSetCookie,omitempty"`
@ -92,9 +93,9 @@ type ManagerConfig struct {
ProviderConfig string `json:"providerConfig"` ProviderConfig string `json:"providerConfig"`
Domain string `json:"domain"` Domain string `json:"domain"`
SessionIDLength int64 `json:"sessionIDLength"` SessionIDLength int64 `json:"sessionIDLength"`
EnableSidInHttpHeader bool `json:"enableSidInHttpHeader"` EnableSidInHTTPHeader bool `json:"EnableSidInHTTPHeader"`
SessionNameInHttpHeader string `json:"sessionNameInHttpHeader"` SessionNameInHTTPHeader string `json:"SessionNameInHTTPHeader"`
EnableSidInUrlQuery bool `json:"enableSidInUrlQuery"` EnableSidInURLQuery bool `json:"EnableSidInURLQuery"`
} }
// Manager contains Provider and its configuration. // Manager contains Provider and its configuration.
@ -125,14 +126,14 @@ func NewManager(provideName string, cf *ManagerConfig) (*Manager, error) {
cf.Maxlifetime = cf.Gclifetime cf.Maxlifetime = cf.Gclifetime
} }
if cf.EnableSidInHttpHeader { if cf.EnableSidInHTTPHeader {
if cf.SessionNameInHttpHeader == "" { if cf.SessionNameInHTTPHeader == "" {
panic(errors.New("SessionNameInHttpHeader is empty")) panic(errors.New("SessionNameInHTTPHeader is empty"))
} }
strMimeHeader := textproto.CanonicalMIMEHeaderKey(cf.SessionNameInHttpHeader) strMimeHeader := textproto.CanonicalMIMEHeaderKey(cf.SessionNameInHTTPHeader)
if cf.SessionNameInHttpHeader != strMimeHeader { if cf.SessionNameInHTTPHeader != strMimeHeader {
strErrMsg := "SessionNameInHttpHeader (" + cf.SessionNameInHttpHeader + ") has the wrong format, it should be like this : " + strMimeHeader strErrMsg := "SessionNameInHTTPHeader (" + cf.SessionNameInHTTPHeader + ") has the wrong format, it should be like this : " + strMimeHeader
panic(errors.New(strErrMsg)) panic(errors.New(strErrMsg))
} }
} }
@ -163,7 +164,7 @@ func (manager *Manager) getSid(r *http.Request) (string, error) {
cookie, errs := r.Cookie(manager.config.CookieName) cookie, errs := r.Cookie(manager.config.CookieName)
if errs != nil || cookie.Value == "" { if errs != nil || cookie.Value == "" {
var sid string var sid string
if manager.config.EnableSidInUrlQuery { if manager.config.EnableSidInURLQuery {
errs := r.ParseForm() errs := r.ParseForm()
if errs != nil { if errs != nil {
return "", errs return "", errs
@ -173,8 +174,8 @@ func (manager *Manager) getSid(r *http.Request) (string, error) {
} }
// if not found in Cookie / param, then read it from request headers // if not found in Cookie / param, then read it from request headers
if manager.config.EnableSidInHttpHeader && sid == "" { if manager.config.EnableSidInHTTPHeader && sid == "" {
sids, isFound := r.Header[manager.config.SessionNameInHttpHeader] sids, isFound := r.Header[manager.config.SessionNameInHTTPHeader]
if isFound && len(sids) != 0 { if isFound && len(sids) != 0 {
return sids[0], nil return sids[0], nil
} }
@ -226,9 +227,9 @@ func (manager *Manager) SessionStart(w http.ResponseWriter, r *http.Request) (se
} }
r.AddCookie(cookie) r.AddCookie(cookie)
if manager.config.EnableSidInHttpHeader { if manager.config.EnableSidInHTTPHeader {
r.Header.Set(manager.config.SessionNameInHttpHeader, sid) r.Header.Set(manager.config.SessionNameInHTTPHeader, sid)
w.Header().Set(manager.config.SessionNameInHttpHeader, sid) w.Header().Set(manager.config.SessionNameInHTTPHeader, sid)
} }
return return
@ -236,9 +237,9 @@ func (manager *Manager) SessionStart(w http.ResponseWriter, r *http.Request) (se
// SessionDestroy Destroy session by its id in http request cookie. // SessionDestroy Destroy session by its id in http request cookie.
func (manager *Manager) SessionDestroy(w http.ResponseWriter, r *http.Request) { func (manager *Manager) SessionDestroy(w http.ResponseWriter, r *http.Request) {
if manager.config.EnableSidInHttpHeader { if manager.config.EnableSidInHTTPHeader {
r.Header.Del(manager.config.SessionNameInHttpHeader) r.Header.Del(manager.config.SessionNameInHTTPHeader)
w.Header().Del(manager.config.SessionNameInHttpHeader) w.Header().Del(manager.config.SessionNameInHTTPHeader)
} }
cookie, err := r.Cookie(manager.config.CookieName) cookie, err := r.Cookie(manager.config.CookieName)
@ -306,9 +307,9 @@ func (manager *Manager) SessionRegenerateID(w http.ResponseWriter, r *http.Reque
} }
r.AddCookie(cookie) r.AddCookie(cookie)
if manager.config.EnableSidInHttpHeader { if manager.config.EnableSidInHTTPHeader {
r.Header.Set(manager.config.SessionNameInHttpHeader, sid) r.Header.Set(manager.config.SessionNameInHTTPHeader, sid)
w.Header().Set(manager.config.SessionNameInHttpHeader, sid) w.Header().Set(manager.config.SessionNameInHTTPHeader, sid)
} }
return return
@ -328,7 +329,7 @@ func (manager *Manager) sessionID() (string, error) {
b := make([]byte, manager.config.SessionIDLength) b := make([]byte, manager.config.SessionIDLength)
n, err := rand.Read(b) n, err := rand.Read(b)
if n != len(b) || err != nil { if n != len(b) || err != nil {
return "", fmt.Errorf("Could not successfully read from the system CSPRNG.") return "", fmt.Errorf("Could not successfully read from the system CSPRNG")
} }
return hex.EncodeToString(b), nil return hex.EncodeToString(b), nil
} }

View File

@ -11,44 +11,40 @@ import (
"github.com/ssdb/gossdb/ssdb" "github.com/ssdb/gossdb/ssdb"
) )
var ssdbProvider = &SsdbProvider{} var ssdbProvider = &Provider{}
type SsdbProvider struct { // Provider holds ssdb client and configs
type Provider struct {
client *ssdb.Client client *ssdb.Client
host string host string
port int port int
maxLifetime int64 maxLifetime int64
} }
func (p *SsdbProvider) connectInit() error { func (p *Provider) connectInit() error {
var err error var err error
if p.host == "" || p.port == 0 { if p.host == "" || p.port == 0 {
return errors.New("SessionInit First") return errors.New("SessionInit First")
} }
p.client, err = ssdb.Connect(p.host, p.port) p.client, err = ssdb.Connect(p.host, p.port)
if err != nil {
return err return err
} }
return nil
}
func (p *SsdbProvider) SessionInit(maxLifetime int64, savePath string) error { // SessionInit init the ssdb with the config
var e error = nil func (p *Provider) SessionInit(maxLifetime int64, savePath string) error {
p.maxLifetime = maxLifetime p.maxLifetime = maxLifetime
address := strings.Split(savePath, ":") address := strings.Split(savePath, ":")
p.host = address[0] p.host = address[0]
p.port, e = strconv.Atoi(address[1])
if e != nil { var err error
return e if p.port, err = strconv.Atoi(address[1]); err != nil {
}
err := p.connectInit()
if err != nil {
return err return err
} }
return nil return p.connectInit()
} }
func (p *SsdbProvider) SessionRead(sid string) (session.Store, error) { // SessionRead return a ssdb client session Store
func (p *Provider) SessionRead(sid string) (session.Store, error) {
if p.client == nil { if p.client == nil {
if err := p.connectInit(); err != nil { if err := p.connectInit(); err != nil {
return nil, err return nil, err
@ -71,7 +67,8 @@ func (p *SsdbProvider) SessionRead(sid string) (session.Store, error) {
return rs, nil return rs, nil
} }
func (p *SsdbProvider) SessionExist(sid string) bool { // SessionExist judged whether sid is exist in session
func (p *Provider) SessionExist(sid string) bool {
if p.client == nil { if p.client == nil {
if err := p.connectInit(); err != nil { if err := p.connectInit(); err != nil {
panic(err) panic(err)
@ -85,9 +82,10 @@ func (p *SsdbProvider) SessionExist(sid string) bool {
return false return false
} }
return true return true
} }
func (p *SsdbProvider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
// SessionRegenerate regenerate session with new sid and delete oldsid
func (p *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
//conn.Do("setx", key, v, ttl) //conn.Do("setx", key, v, ttl)
if p.client == nil { if p.client == nil {
if err := p.connectInit(); err != nil { if err := p.connectInit(); err != nil {
@ -119,27 +117,27 @@ func (p *SsdbProvider) SessionRegenerate(oldsid, sid string) (session.Store, err
return rs, nil return rs, nil
} }
func (p *SsdbProvider) SessionDestroy(sid string) error { // SessionDestroy destroy the sid
func (p *Provider) SessionDestroy(sid string) error {
if p.client == nil { if p.client == nil {
if err := p.connectInit(); err != nil { if err := p.connectInit(); err != nil {
return err return err
} }
} }
_, err := p.client.Del(sid) _, err := p.client.Del(sid)
if err != nil {
return err return err
} }
return nil
// SessionGC not implemented
func (p *Provider) SessionGC() {
} }
func (p *SsdbProvider) SessionGC() { // SessionAll not implemented
return func (p *Provider) SessionAll() int {
}
func (p *SsdbProvider) SessionAll() int {
return 0 return 0
} }
// SessionStore holds the session information which stored in ssdb
type SessionStore struct { type SessionStore struct {
sid string sid string
lock sync.RWMutex lock sync.RWMutex
@ -148,12 +146,15 @@ type SessionStore struct {
client *ssdb.Client client *ssdb.Client
} }
// Set the key and value
func (s *SessionStore) Set(key, value interface{}) error { func (s *SessionStore) Set(key, value interface{}) error {
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()
s.values[key] = value s.values[key] = value
return nil return nil
} }
// Get return the value by the key
func (s *SessionStore) Get(key interface{}) interface{} { func (s *SessionStore) Get(key interface{}) interface{} {
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()
@ -163,30 +164,36 @@ func (s *SessionStore) Get(key interface{}) interface{} {
return nil return nil
} }
// Delete the key in session store
func (s *SessionStore) Delete(key interface{}) error { func (s *SessionStore) Delete(key interface{}) error {
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()
delete(s.values, key) delete(s.values, key)
return nil return nil
} }
// Flush delete all keys and values
func (s *SessionStore) Flush() error { func (s *SessionStore) Flush() error {
s.lock.Lock() s.lock.Lock()
defer s.lock.Unlock() defer s.lock.Unlock()
s.values = make(map[interface{}]interface{}) s.values = make(map[interface{}]interface{})
return nil return nil
} }
// SessionID return the sessionID
func (s *SessionStore) SessionID() string { func (s *SessionStore) SessionID() string {
return s.sid return s.sid
} }
// SessionRelease Store the keyvalues into ssdb
func (s *SessionStore) SessionRelease(w http.ResponseWriter) { func (s *SessionStore) SessionRelease(w http.ResponseWriter) {
b, err := session.EncodeGob(s.values) b, err := session.EncodeGob(s.values)
if err != nil { if err != nil {
return return
} }
s.client.Do("setx", s.sid, string(b), s.maxLifetime) s.client.Do("setx", s.sid, string(b), s.maxLifetime)
} }
func init() { func init() {
session.Register("ssdb", ssdbProvider) session.Register("ssdb", ssdbProvider)
} }

View File

@ -90,8 +90,6 @@ func serverStaticRouter(ctx *context.Context) {
} }
http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, sch) http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, sch)
return
} }
type serveContentHolder struct { type serveContentHolder struct {
@ -109,14 +107,14 @@ var (
func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, error) { func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, error) {
mapKey := acceptEncoding + ":" + filePath mapKey := acceptEncoding + ":" + filePath
mapLock.RLock() mapLock.RLock()
mapFile, _ := staticFileMap[mapKey] mapFile := staticFileMap[mapKey]
mapLock.RUnlock() mapLock.RUnlock()
if isOk(mapFile, fi) { if isOk(mapFile, fi) {
return mapFile.encoding != "", mapFile.encoding, mapFile, nil return mapFile.encoding != "", mapFile.encoding, mapFile, nil
} }
mapLock.Lock() mapLock.Lock()
defer mapLock.Unlock() defer mapLock.Unlock()
if mapFile, _ = staticFileMap[mapKey]; !isOk(mapFile, fi) { if mapFile = staticFileMap[mapKey]; !isOk(mapFile, fi) {
file, err := os.Open(filePath) file, err := os.Open(filePath)
if err != nil { if err != nil {
return false, "", nil, err return false, "", nil, err

View File

@ -32,7 +32,7 @@ type Swagger struct {
Paths map[string]*Item `json:"paths" yaml:"paths"` Paths map[string]*Item `json:"paths" yaml:"paths"`
Definitions map[string]Schema `json:"definitions,omitempty" yaml:"definitions,omitempty"` Definitions map[string]Schema `json:"definitions,omitempty" yaml:"definitions,omitempty"`
SecurityDefinitions map[string]Security `json:"securityDefinitions,omitempty" yaml:"securityDefinitions,omitempty"` SecurityDefinitions map[string]Security `json:"securityDefinitions,omitempty" yaml:"securityDefinitions,omitempty"`
Security map[string][]string `json:"security,omitempty" yaml:"security,omitempty"` Security []map[string][]string `json:"security,omitempty" yaml:"security,omitempty"`
Tags []Tag `json:"tags,omitempty" yaml:"tags,omitempty"` Tags []Tag `json:"tags,omitempty" yaml:"tags,omitempty"`
ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"` ExternalDocs *ExternalDocs `json:"externalDocs,omitempty" yaml:"externalDocs,omitempty"`
} }
@ -84,6 +84,7 @@ type Operation struct {
Schemes []string `json:"schemes,omitempty" yaml:"schemes,omitempty"` Schemes []string `json:"schemes,omitempty" yaml:"schemes,omitempty"`
Parameters []Parameter `json:"parameters,omitempty" yaml:"parameters,omitempty"` Parameters []Parameter `json:"parameters,omitempty" yaml:"parameters,omitempty"`
Responses map[string]Response `json:"responses,omitempty" yaml:"responses,omitempty"` Responses map[string]Response `json:"responses,omitempty" yaml:"responses,omitempty"`
Security []map[string][]string `json:"security,omitempty" yaml:"security,omitempty"`
Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"`
} }
@ -100,7 +101,7 @@ type Parameter struct {
Default interface{} `json:"default,omitempty" yaml:"default,omitempty"` Default interface{} `json:"default,omitempty" yaml:"default,omitempty"`
} }
// A limited subset of JSON-Schema's items object. It is used by parameter definitions that are not located in "body". // ParameterItems A limited subset of JSON-Schema's items object. It is used by parameter definitions that are not located in "body".
// http://swagger.io/specification/#itemsObject // http://swagger.io/specification/#itemsObject
type ParameterItems struct { type ParameterItems struct {
Type string `json:"type,omitempty" yaml:"type,omitempty"` Type string `json:"type,omitempty" yaml:"type,omitempty"`

Some files were not shown because too many files have changed in this diff Show More