1
0
mirror of https://github.com/astaxie/beego.git synced 2025-07-11 12:01:01 +00:00

267 Commits

Author SHA1 Message Date
1b6edafc96 Merge pull request #3412 from astaxie/develop
v1.11.1
2018-11-30 21:54:26 +08:00
8152ade1b6 Merge pull request #3419 from xpzouying/close_fs_when_is_not_nil
Close fs when is not nil
2018-11-30 16:57:49 +08:00
6350f8b904 Merge pull request #3420 from xpzouying/format_list_travis_yml
format list in .travis.yml
2018-11-30 16:57:25 +08:00
0c5398a19c update travis 2018-11-30 16:42:44 +08:00
4b656268d3 travis 2018-11-28 16:17:53 +08:00
10729a1fc5 update vendor & module 2018-11-28 16:05:15 +08:00
cdb3ef808f format list in .travis.yml 2018-11-28 08:59:27 +08:00
a5a2471f2c close fs only when fs open without error
panic when close nil filesystem
2018-11-28 08:55:20 +08:00
6282747f6d update vendor 2018-11-27 14:11:14 +08:00
d5fd5cad38 Merge pull request #3417 from xpzouying/update_code_format
better format
2018-11-27 14:01:59 +08:00
fab7c6b6d0 better format
- add comments for public function
- format import order in admin.go
- better format for NewControllerRegister in router.go
2018-11-26 23:19:05 +08:00
42ade6aa49 v1.11.1 2018-11-22 13:10:52 +08:00
55d9b69cd9 update mod 2018-11-22 13:08:39 +08:00
2a8d6f943f Merge pull request #3408 from nlimpid/develop
add different column name parse strategy
2018-11-21 17:18:39 +08:00
6b0155c4fb add different column name parse strategy 2018-11-20 22:47:56 +08:00
e22a5143bc Merge pull request #3403 from nlimpid/develop
add context for db operation
2018-11-20 15:44:25 +08:00
a17eb54515 Merge pull request #3405 from coldnight/feature-add-elapsed-in-response
Add .Elapsed in context.ResponseWriter for monitor purpose
2018-11-20 15:39:13 +08:00
d5cf1050db check qs is nil before get forContext 2018-11-19 23:42:56 +08:00
b021686521 Add .Elapsed in context.ResponseWriter for monitor purpose
With this commit we can record per requests's elapsed time,
so we can easy to monitor that by use a filter.
2018-11-19 16:38:14 +08:00
b0e2bbce2a Merge pull request #3391 from astaxie/develop
V1.11.0
2018-11-19 14:40:43 +08:00
7a50ea7e36 Merge pull request #3395 from astaxie/AddTestForSnakeString
#3192 AddTestSnakeString
2018-11-18 21:59:48 +08:00
3070cfc60b Merge pull request #3396 from astaxie/FixAnnotationOnSnakeString
Fix #3192
2018-11-18 21:59:35 +08:00
e56d1b718f add context for db operation 2018-11-18 21:54:25 +08:00
c4c3067a31 Update utils.go 2018-11-15 15:16:41 +08:00
81346fe641 Update utils_test.go 2018-11-15 14:50:38 +08:00
1a66ad56c6 Merge pull request #3317 from HSoshiant/master
Lock
2018-11-14 19:24:23 +08:00
31f2adb79d Update session.go 2018-11-14 19:24:10 +08:00
f514ae309b Update session.go 2018-11-14 19:23:10 +08:00
277d3d98e3 v1.11.0 2018-11-14 17:01:12 +08:00
2c46877b36 Merge pull request #3390 from astaxie/FixDataRaceOnCache
Fix  #3354
2018-11-14 15:58:20 +08:00
cfe54a02c5 Merge pull request #3388 from s00500/develop
Closes #2515 Autodetect timezone in NewOrmWithDB()
2018-11-13 16:48:18 +08:00
55f390d08a Merge pull request #3387 from wuyumin/master
dynamically add task #2708
2018-11-13 14:51:41 +08:00
cf31222643 Merge pull request #3386 from DennisMao/AddTransportSetting
add transport setting
2018-11-13 14:50:28 +08:00
9e036bcab5 Update memory.go 2018-11-13 14:43:23 +08:00
d02170e3cb Closes #2515 Autodetect timezone in NewOrmWithDB() 2018-11-12 10:18:05 +01:00
2d6f1af1a5 dynamically add task 2018-11-12 17:17:06 +08:00
430457609f Update httplib_test.go 2018-11-12 13:04:17 +08:00
0333e26b3e Merge pull request #3382 from lxShaDoWxl/add_custom_fs_template
Add custom fs template
2018-11-10 22:44:28 +08:00
d0d28566b9 chore(GoMod): add dependency go-bindata-assetfs in vendor dir 2018-11-10 13:41:47 +06:00
872b787e6c refactor(FileSystem): add comments function 2018-11-10 12:34:53 +06:00
01a99edf80 chore(GoMod): add dependency go-bindata-assetfs 2018-11-10 12:26:19 +06:00
6050d37d2a Merge remote-tracking branch 'me/develop' into add_custom_fs_template 2018-11-10 11:39:28 +06:00
876dce8e54 fix the routerInfo is nil 2018-11-09 18:03:26 +08:00
24885c28f2 fix the comments update 2018-11-09 17:54:20 +08:00
5ea04bdfd3 update mod 2018-11-09 12:37:28 +08:00
9fdc1eaf3a Merge pull request #3352 from SongLiangChen/develop
add sessionid prefix
2018-11-08 23:29:05 +08:00
6b5a70d246 Merge pull request #3378 from nukc/develop
orm: support filter raw sql
2018-11-08 23:28:30 +08:00
8391d26220 Merge pull request #3383 from LockGit/develop
security question, fix arbitrary file read
2018-11-08 23:21:18 +08:00
f193e313a3 refactor(FileSystem): using single-line if 2018-11-07 20:21:34 +06:00
9ac4928113 refactor(Template): a detailed description of the error 2018-11-07 20:20:10 +06:00
9865779f14 security question, fix arbitrary file read 2018-11-07 11:31:27 +08:00
aa6d0f9f0b fix(Template): correct check error 2018-11-06 21:40:43 +06:00
68b0bd98fd fix(Template): error handling when reading files 2018-11-06 20:41:05 +06:00
3447798494 fix(Template): dependencies in travis 2018-11-06 20:18:48 +06:00
ca1b96f986 feat(Template): use interface http.FileSystem 2018-11-06 20:06:21 +06:00
771fe35431 feat(Template): testing fs bindata 2018-11-05 22:58:59 +06:00
2f00ad1602 fix(Template): close open file 2018-11-05 22:51:20 +06:00
f740b71ded fix(Template): remove duplicate check open/exists 2018-11-05 21:08:03 +06:00
7aae58a543 feat(Template): create interface FileSystem for to create custom fs 2018-11-05 21:05:19 +06:00
c8da875f83 add sessionId prefix 2018-11-05 09:51:27 +08:00
501d8a97f6 add sessionId prefix 2018-11-05 09:50:19 +08:00
736e66fcda orm: support filter raw sql 2018-11-05 03:47:21 +08:00
d3ad810f16 add sessionId prefix 2018-10-29 13:35:31 +08:00
abc8b78065 add sessionId prefix 2018-10-29 12:18:06 +08:00
f64e6b72e9 Merge pull request #3362 from SmartBrave/develop
modify qbs to qps
2018-10-28 20:01:23 +08:00
4e83b4400a Merge pull request #3371 from HaraldNordgren/go_versions
Bump Go versions and use 1.n.x to get latest minor versions
2018-10-28 19:49:27 +08:00
f6f61513a1 Bump Go versions and use 1.n.x to get latest minor versions 2018-10-28 00:58:36 +02:00
8217817a0b modify qbs to qps 2018-10-24 10:38:59 +08:00
833f54d818 Merge pull request #3319 from Colstuwjx/annotated-filter
Annotated filter
2018-10-23 13:45:18 +08:00
706c086bc5 Merge pull request #3345 from dingyuanhong/develop
fix / can use dynamic directory
2018-10-23 13:43:45 +08:00
187add9b84 add sessionid prefix 2018-10-10 11:02:45 +08:00
5b8e468a13 Merge pull request #3344 from akhedrane/patch-1
duo to #3278 numRow should be 0
2018-10-05 21:16:18 +08:00
dff9c8f5fa fix / can use dynamic directory 2018-10-01 15:16:35 +08:00
21a8623002 duo to #3278 numRow should be 0 2018-09-30 10:45:18 +01:00
ea9c5822e6 Merge pull request #3292 from SongLiangChen/master
Read over 4096 length values
2018-09-30 16:03:45 +08:00
ad0d166d46 Merge pull request #3295 from GNURub/feature/outputWithformat
feature/outputwithformat
2018-09-30 16:01:07 +08:00
2c2ace9a60 Merge pull request #3313 from oiooj/pr-module
support go modules
2018-09-30 15:55:57 +08:00
8134a89e81 Merge pull request #3333 from akhedrane/patch-1
Return error when wrong filtering field
2018-09-30 15:45:23 +08:00
e342a0099f Merge pull request #3340 from GNURub/feature/update-travis
Add go 1.11 version
2018-09-30 15:44:46 +08:00
c4ed5030da Merge pull request #3339 from GNURub/hotfix/redis-uri
Support redis URI format
2018-09-30 15:43:41 +08:00
7b9c24567d Merge pull request #3335 from GNURub/hotfix/ranking-response-times
Added link to time ranking
2018-09-30 15:42:05 +08:00
6906c5ce30 Updated travis go version 2018-09-27 18:27:38 +02:00
e4605f232b Support redis URI 2018-09-26 18:05:09 +02:00
6092e737a1 Added link to ranking 2018-09-24 18:33:56 +02:00
0e4d954fa7 Return error when wrong filtering field
When end user put wrong filtering field ORM should return error instead of Panic()
so developers can handle this error.
2018-09-23 12:18:15 +02:00
1cbba4d56f Add annotated filter, support @Import, @Filter.
Signed-off-by: Colstuwjx <Colstuwjx@gmail.com>
2018-09-07 15:59:57 +08:00
kun
cf5d1f3f3c support go modules 2018-09-05 14:05:16 +08:00
1097ac3682 GetProvider 2018-08-28 15:12:28 -04:00
755cc98ef7 Fix content type 2018-08-21 12:32:16 +02:00
5c407ff2e3 Add map shortcut and ServeFormatted method in output 2018-08-20 22:55:50 +02:00
8f455ef199 Read over 4096 length values 2018-08-17 11:40:00 +08:00
7e0649d661 Merge pull request #3289 from nezorflame/patch-1
Remove panic from Redirect()
2018-08-16 09:22:22 +08:00
1a3dcb4f84 Remove panic from Redirect()
This `panic(ErrAbort)` is unnecessary in `Redirect` function and causes problems in the production code.
2018-08-12 05:11:03 +03:00
b606f1f73f Merge pull request #3283 from JessonChan/develop
typo fixed
2018-08-09 17:05:25 +08:00
8241f219fd Merge pull request #3278 from hurisheng/revert-3247-develop
Revert "send ErrNoRows if the query returns zero rows ... in method orm_query…"
2018-08-09 17:05:01 +08:00
dea45a3d6c fix TestAll() 2018-08-07 16:36:27 +08:00
34a812d45f typo fixed
#3260
2018-08-07 12:08:15 +08:00
842336834f Merge pull request #3275 from WiFeng/master
fix bug of tasks that only some but not all are executed
2018-08-07 10:07:29 +08:00
58fe012446 Merge pull request #3274 from gombaniro/improvement/make-TestSet-on-map-self-contained
breaks dependency on TestNewBeeMap Testcase
2018-08-07 10:02:29 +08:00
bf0d40bca6 Merge pull request #3280 from GNURub/feature/pusher
Add access to pusher
2018-08-07 09:58:56 +08:00
48e6658eca Add access to pusher 2018-08-03 12:33:46 +02:00
1bd3fb7a33 Revert "send ErrNoRows if the query returns zero rows ... in method orm_query…" 2018-08-03 13:35:48 +08:00
d86410a631 fix bug of tasks that only some but not all are executed 2018-08-01 11:20:22 +08:00
6b62502b99 breaks rely on TestNewBeeMap 2018-08-01 10:24:39 +08:00
053a075344 Merge pull request #3271 from astaxie/develop
add vendor gopkg.in/yaml.v2
2018-07-31 21:18:48 +08:00
9dd7d19ce7 add vendor gopkg.in/yaml.v2 2018-07-31 21:18:07 +08:00
efe0f67388 Merge pull request #3267 from astaxie/develop
beego 1.10.1
2018-07-31 20:52:47 +08:00
de66d2bdfd beego 1.10.1 2018-07-31 20:09:08 +08:00
0e4fe4d177 Merge pull request #3270 from astaxie/revert-3269-master
Revert "hible"
2018-07-31 20:07:18 +08:00
6d84db1e93 Revert "hible" 2018-07-31 20:07:03 +08:00
2486f3826a Merge pull request #3269 from 514366607/master
hible
2018-07-31 20:06:55 +08:00
d7b8aa8b52 only add golang.org vendor 2018-07-31 19:25:43 +08:00
c7c0b01ec5 hible
cache add function
// IncrBy increase counter by num.
IncrBy(key string, num int)
// DecrBy decrease counter by num.
DecrBy(key string, num int)
2018-07-31 17:19:09 +08:00
6d69047fff update go vet 2018-07-30 15:13:04 +08:00
787ab12605 Merge branch 'develop' of https://github.com/astaxie/beego into develop 2018-07-30 15:01:58 +08:00
f4112accef update go vet 2018-07-30 15:01:49 +08:00
42c394e28b Merge pull request #3263 from guomao545/master
Support return middle level value
2018-07-30 12:27:05 +08:00
5051d902fb Merge pull request #2986 from oxgo/hourlylog
add hourly log rotate
2018-07-30 12:26:37 +08:00
48acfa08be add vendor 2018-07-30 12:05:51 +08:00
39fc30b8b2 Support return middle level value
fix multilevel yaml config can't correct return middle level value bug
2018-07-27 15:33:24 +08:00
two
046cb248e0 edit test case 2018-07-26 15:08:14 +08:00
two
31c746d9d7 fix all confict 2018-07-26 14:34:25 +08:00
two
38a2f32252 fix one confict 2018-07-26 14:29:26 +08:00
d55f54a8ab Merge pull request #3149 from liaoishere/feature/support-begintx
Support DB.BeginTx in go 1.8
2018-07-23 20:25:17 +08:00
feb0e67fd7 upgrade go version from 1.9.2 to 1.9.7 in test env.
upgrade to avoid bug: https://github.com/golang/go/issues/22976

Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-23 15:58:12 +08:00
a09bafbf2a Merge pull request #3233 from astaxie/develop
beego 1.10.0
2018-07-21 15:55:28 +08:00
03de7456ca Merge pull request #3250 from 0x0400/develop
acquire lock when access config data
2018-07-21 15:25:23 +08:00
78f2fd8d14 acquire lock when access config data 2018-07-21 14:56:09 +08:00
a048ed51a7 ready to release 1.10.0 2018-07-21 09:29:00 +08:00
164a9231e8 Merge pull request #3249 from GNURub/feature/autocert
Feature/autocert
2018-07-21 09:20:07 +08:00
aaa7e33778 Autocert ok 2018-07-20 19:54:25 +02:00
f7008e2877 Removed patch 2018-07-20 19:02:09 +02:00
cf6e825547 Domains 2018-07-20 18:59:45 +02:00
38f9a3c49e AutoCert 2018-07-20 18:53:57 +02:00
f18283a517 Merge pull request #3181 from GNURub/feature/YAML
Feature/yaml
2018-07-20 23:41:15 +08:00
61aec396e0 Update .travis.yml 2018-07-20 23:40:11 +08:00
5ba9e63086 Merge branch 'develop' into feature/YAML 2018-07-20 23:24:51 +08:00
5acc56648d Merge branch 'develop' into feature/support-begintx 2018-07-20 23:16:12 +08:00
bc773039ca Merge pull request #2997 from DennisMao/master
fix the model can not be registered correctly on Ubuntu 32bit
2018-07-20 23:14:41 +08:00
868fc2a29f fix go1.10.3 orm test failed 2018-07-20 22:45:44 +08:00
81f69f12ab Merge branch 'develop' of https://github.com/astaxie/beego into develop 2018-07-20 22:30:25 +08:00
0711c3289f fix the orm test 2018-07-20 19:58:56 +08:00
b8868d6d2d remove unnecessary conversion
Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-20 17:07:17 +08:00
30bbc81a2e fix test case that calls All()
Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-20 16:51:36 +08:00
1a3f1d66c1 rename orm_go18.go to orm.go
Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-20 16:36:06 +08:00
6bdd152d91 upgrade postgres in travis
Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-20 16:36:06 +08:00
443c77b303 support DB.BeginTx of golang 1.8
Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-20 16:36:06 +08:00
0dff771707 fix unquoted identifier that may be misleading in postgres
Signed-off-by: Penghui Liao <liaoishere@gmail.com>
2018-07-20 16:36:06 +08:00
c9b6e4f825 Merge pull request #2981 from TankTheFrank/fix_template_render_with_automatic_parameter_routing
fixes template rendering with automatic mapped parameters (see #2979)
2018-07-20 15:39:10 +08:00
abd02c7de4 Merge pull request #2985 from terryding77/fix_orm_Field_SetRaw_function_error_judge_problem
fix orm fields SetRaw function error judge problem
2018-07-20 15:36:40 +08:00
eb4e0e4030 Merge pull request #3022 from chenpeiyuan/develop
do html escape before display path, avoid xss
2018-07-20 15:33:12 +08:00
96dffcd27f Merge pull request #3105 from ckahi/hotfix_log_dir
auto create log dir
2018-07-20 15:32:42 +08:00
0d0d87f600 Merge branch 'develop' of https://github.com/astaxie/beego into develop 2018-07-20 15:25:04 +08:00
2c779a4287 Merge pull request #3141 from gadelkareem/patch-2
Improve access log
2018-07-20 15:20:15 +08:00
f25893832f Merge pull request #3142 from amitash1109/fix_response_http_code
Fix response http code
2018-07-20 15:18:54 +08:00
af73a2d515 Merge branch 'develop' into fix_response_http_code 2018-07-20 15:18:30 +08:00
67a6b8723c Merge pull request #3146 from Wang-Kai/master
Add code style for logs README
2018-07-20 15:17:31 +08:00
fdccd85330 Merge pull request #3152 from joshtechnologygroup/staticfile_unexpected_eof_bug_gix
[#2973] Fix Unexpected EOF bug in staticfile
2018-07-20 15:09:07 +08:00
ca394fc8ab Merge pull request #3182 from GNURub/feature/autobind
Add method to set the data depending on the accepted
2018-07-20 15:05:28 +08:00
9c9ba0129f Merge pull request #3226 from jianjianzhu/master
Fix the wrong status code in prod
2018-07-20 14:53:08 +08:00
b61c91d93d remove unnecessary conversion 2018-07-20 14:47:11 +08:00
f15732798f Merge pull request #3239 from wilhelmguo/develop
add session redis IdleTimeout config
2018-07-20 14:44:07 +08:00
efbe655d6a Merge pull request #3245 from 0x0400/patch-1
fix typo
2018-07-20 14:43:38 +08:00
27ced1d9c3 Merge pull request #3247 from mohan2808/develop
send ErrNoRows if the query returns zero rows ... in method orm_query…
2018-07-20 14:34:23 +08:00
8f6bce3b87 fix test case 2018-07-20 14:26:43 +08:00
be75f93d43 add miss dep 2018-07-20 12:14:27 +08:00
541fb181fe Merge branch 'master' into develop 2018-07-20 12:00:53 +08:00
293b54192f send ErrNoRows if the query returns zero rows ... in method orm_queryset.All() 2018-07-19 18:51:16 +05:30
0e0718d110 fix typo
hasReuired --> hasRequired
2018-07-17 23:32:11 +08:00
6fec0a7831 add session redis IdleTimeout config 2018-07-12 10:48:50 +08:00
654ebebe3c Merge pull request #3217 from jinxjinxagain/develop
fix: When multiply comment routers on one func
2018-07-08 16:13:21 +08:00
08c3ca642e Merge pull request #3230 from Colstuwjx/fix/correct-httplib-maxidleconnection-default-value
Fix: correct MaxIdleConnsPerHost value to net/http default 100.
2018-07-08 16:11:35 +08:00
b3c46a87ac Fix: correct MaxIdleConnsPerHost value to net/http default 100. 2018-07-05 19:15:42 +08:00
464d080518 fix httpcode in prod 2018-07-02 11:21:06 +08:00
227c04c9e6 fix: When multiply comment routers on one func, only generates the last one controller 2018-06-28 15:54:17 +08:00
e5d68aceed Merge pull request #3185 from kaka89/master
Fix defaut value bug, and add config for maxfiles
2018-06-23 22:54:35 +08:00
67d9241abc Merge pull request #3171 from whomm/master
debug stringsToJSON
2018-06-23 22:50:11 +08:00
110dbcb31f Merge pull request #3208 from hurisheng/qs_forupdate
add 'FOR UPDATE' support for querySet
2018-06-23 22:49:01 +08:00
740bf72f0c Merge pull request #3202 from openset/develop
Update: Htmlquote Htmlunquote
2018-06-23 22:43:53 +08:00
6b3b8607a0 Merge branch 'develop' into develop 2018-06-23 22:43:45 +08:00
b21c59ee70 Merge pull request #3206 from whilei/gofmt-2018-Jun-17-00-39
gofmt
2018-06-23 22:38:07 +08:00
fc2c96a177 add 'FOR UPDATE' support for querySet 2018-06-23 22:25:05 +08:00
ia
87ba3f3cd3 all: gofmt
Run standard gofmt command on project root.

- go version go1.10.3 darwin/amd64

Signed-off-by: ia <isaac.ardis@gmail.com>
2018-06-17 00:47:51 +02:00
b80b7b06fc Update: Redundant semicolon disableEscapeHTML 2018-06-14 11:55:07 +08:00
ad6c97ec1b Update: Htmlquote Htmlunquote 2018-06-13 15:43:01 +08:00
d3d97de312 Merge pull request #3200 from openset/master
Update: use PathEscape replace QueryEscape
2018-06-12 22:45:14 +08:00
bf915c3280 Update: use PathEscape replace QueryEscape
If filename contain space(" "), QueryEscape use "+" instead.
2018-06-12 16:15:20 +08:00
19c5cd130d Merge pull request #3190 from NSObjects/develop
add field comment on create table
2018-06-07 11:06:23 +08:00
1df2662924 add field comment on create table 2018-06-06 12:33:28 +08:00
f979050a45 Fix defaut value bug, and add default maxfiles
1. add default value for maxlines(100000), maxsize(1 << 28)
2. add maxfiles for configure, just like `maxlines` & maxsize
2018-06-03 23:08:03 +08:00
45b68d444d Add method to set the data depending on the accepted 2018-06-01 19:11:01 +02:00
732f79e758 Add dep travis 2018-05-31 14:52:47 +02:00
4e954e32b8 Test YAML 2018-05-31 13:48:24 +02:00
92e81ccf50 Merge branch 'develop' into feature/YAML 2018-05-31 13:35:44 +02:00
91f2005067 Test YAML 2018-05-31 13:35:23 +02:00
7c80bf6f9d Add YAML 2018-05-30 16:06:40 +02:00
cc2c98c112 update travis 2018-05-30 22:01:59 +08:00
c3c0adbf55 Merge pull request #3175 from mo0feng/master
Create redis_cluster.go
2018-05-28 16:48:06 +08:00
jz
04c305f273 fix use it comments
fix  use it comments
2018-05-24 15:14:56 +08:00
jz
8c8cf46b55 Create redis_cluster.go
super redis cluster
2018-05-23 17:30:13 +08:00
e96ae0c24a debug stringsToJSON
json char: \u four-hex-digits number(http://json.org/)
2018-05-21 15:18:18 +08:00
98a3cda260 Fix Unexpected EOF bug in staticfile 2018-05-07 13:51:05 +05:30
1fd7fa5df7 make example runable 2018-05-03 22:04:49 +08:00
3d3f2ed4c5 Merge pull request #3127 from kaka89/master
Refactor yaml config for support multilevel
2018-05-03 14:07:59 +08:00
0f73050567 add code style for logs README 2018-05-03 12:05:59 +08:00
a40899e6be Merge pull request #3145 from gadelkareem/develop
Allow log prefix
2018-05-03 11:27:50 +08:00
a9a15e2c54 Allow log prefix 2018-05-02 00:24:09 +02:00
896c258e44 Log redirects and abort after redirect 2018-04-30 17:48:01 +02:00
6df42d63e2 Fix response http code 2018-04-29 15:12:32 +03:00
33bf80b052 Merge branch 'pr/3141' into patch-2 2018-04-28 20:07:23 +02:00
d5c1c0e9a4 log errors in access log and make static request logging optional 2018-04-28 20:03:39 +02:00
8e61a6a6de Allow access log regardless of the log level 2018-04-28 17:15:19 +02:00
ccaa2dd9e0 Update yaml.go
delete white line.
2018-04-20 19:44:22 +08:00
507ea757d7 Merge pull request #3039 from cloudzhou/patch-1
execElem.FieldByName as local variable
2018-04-20 19:41:07 +08:00
9d526dfd50 Merge pull request #3100 from godcong/master
change github.com/garyburd/redigo to newest branch github.com/gomodul…
2018-04-20 19:40:30 +08:00
ba89253e4a Update yaml.go
add support for multilevel yaml config
2018-04-20 19:40:06 +08:00
0d6f190e72 Merge pull request #3107 from sergeylanzman/patch-1
Update .travis.yml golang 1.10
2018-04-20 19:33:50 +08:00
91b9a65db0 Merge pull request #3109 from Jeff885/Jeff885-patch-1
When log maxSize set big int,FileWrite Init fail
2018-04-20 19:33:33 +08:00
e96a5fb3ca Merge pull request #3115 from m4grio/minor-typo
Amend a very minor typo in a variable name
2018-04-20 19:26:42 +08:00
f5f70f386d Merge pull request #3126 from aruhi/master
In dev mode, template parse error cause program lock
2018-04-20 19:26:24 +08:00
242efcf7fa Merge pull request #3103 from qshuai/master
fix typo
2018-04-20 19:26:00 +08:00
51cc6fc257 Update template.go 2018-04-20 19:42:50 +09:00
5fb29cb772 Amend a very minor typo in a variable name 2018-04-10 12:19:50 +08:00
2da894d4a7 When log maxSize set big int,FileWrite Init fail
example:
beego.SetLogger("multifile", {"filename":"logs/liverelay.log","separate":[ "emergency", "error", "info", "debug"],"maxsize":250000000}).

json: cannot unmarshal number 2.5e+08 into Go value of type int

The err should return and show the developer.
2018-04-06 00:50:35 +08:00
2623b15ce0 Update .travis.yml 2018-04-05 16:30:08 +03:00
6db9ad7002 auto create log dir 2018-04-04 15:59:52 +08:00
889408136b fix typo 2018-03-28 00:26:06 +08:00
886fefe738 change github.com/garyburd/redigo to newest branch github.com/gomodule/redigo 2018-03-26 16:59:01 +08:00
768406f134 Merge pull request #3076 from gadelkareem/patch-1
Set default Beego RunMode to production
2018-03-11 16:26:00 +08:00
075e63b2bd Merge pull request #2999 from moririnson/fix_unable_to_add_column
fix the issue #2998
2018-03-11 16:18:50 +08:00
0057c08a90 Merge pull request #3085 from WUMUXIAN/swagger
Swagger: Allow example values with different types, allow example for enum.
2018-03-11 10:48:48 +08:00
09b073356d Swagger:
1. Allow example value to be given based on the field type, previously it's a string.
2. Allow to give example values for Enum values.

This is for the beego/bee pull request to work well.
2018-03-10 17:15:24 +08:00
3c9ed48630 Set default Beego RunMode to production 2018-03-02 18:23:20 +01:00
65d8b4f544 Merge pull request #3064 from takeo-lvgs/dev
fix the issue #3063
2018-02-22 17:22:39 +08:00
6d18d4dcdd Merge pull request #3066 from takeo-lvgs/fix_3065
fix the issue #3065
2018-02-22 17:16:38 +08:00
21fe2d519e fix the issue #3065 2018-02-20 17:40:18 +09:00
9a7554fa01 fix the issue #3063 2018-02-20 11:39:29 +09:00
37d1c13603 Merge pull request #3046 from aspacca/master
Handle pointer validation
2018-02-02 18:57:13 +08:00
5ed112e946 added CanSkipAlso 2018-02-02 10:22:43 +01:00
453f112094 added more test case 2018-02-01 18:31:04 +01:00
faa3341603 Fix after test failure 2018-01-28 18:19:27 +01:00
ee9cf05796 Handle pointer validation 2018-01-28 17:40:05 +01:00
6de538b136 execElem.FieldByName as local variable
execElem.FieldByName(fieldType.Name) as local variable
2018-01-25 17:52:09 +08:00
47c1072b78 do html escape before display path, avoid xss 2018-01-08 19:35:53 +08:00
e81f1e53bf Merge pull request #3017 from Medicean/develop
Update: Fix migration generate SQL
2018-01-05 00:10:16 +08:00
cf92d2c6ef Update: Fix migration generate SQL 2018-01-04 10:42:39 +08:00
0507076c3f Merge pull request #3004 from pcallewaert/develop
redis cache: make MaxIdle configurable
2017-12-30 13:54:11 +08:00
59fd3952b7 bug: restore the default value 2017-12-26 11:48:34 +01:00
7fd80e6aa1 feat(redis.go): make MaxIdle configurable 2017-12-26 11:48:08 +01:00
24fa6189b5 fix the issue #2998 2017-12-23 16:41:56 +09:00
0bde9cbd91 fix the issue #2995 2017-12-22 16:21:23 +08:00
122414d789 Merge pull request #2992 from priteshgudge/develop
Update Documentation in Output.go
2017-12-21 17:08:10 +08:00
aac69674ad Update Documentation in Output.go
Fix Documentation for HTTP status codes descriptions.
2017-12-21 13:50:28 +05:30
1a42154c64 add file test 2017-12-20 17:54:40 +08:00
e81cca304b add file test 2017-12-20 16:19:58 +08:00
07aa97aa9a add hourly rotate file.go 2017-12-20 15:56:36 +08:00
94fba0b2aa fix orm fields SetRaw function error judge problem 2017-12-20 14:53:00 +08:00
80aa47f605 Merge pull request #2976 from szyhf/develop
Fix #2975
2017-12-19 23:34:17 +08:00
f16688817a Merge pull request #2978 from BorisBorshevsky/fix_reflection_bug
fix bug #2972
2017-12-18 19:18:59 +08:00
2670a86005 fix #2979 2017-12-14 17:55:08 +02:00
0e369e6df8 fix bug 2017-12-13 15:27:32 +02:00
84443b9c05 Fix #2975
修复AccessLog输出会多换一行的bug
2017-12-12 12:21:55 +08:00
33be6803a3 Merge pull request #2970 from gcy3y/master
update log.go add GetLevel Function to Log
2017-12-10 20:26:46 +08:00
aef2f1c66e Merge pull request #2971 from PureWhiteWu/fix_typo
fix a typo
2017-12-10 18:47:54 +08:00
619cd2d908 fix a typo 2017-12-08 23:01:21 +08:00
4613acd88e update log.go add GetLevel Function to Log 2017-12-08 15:35:12 +08:00
7886e69236 Update sess_file.go 2017-04-17 12:37:54 -04:00
109 changed files with 15825 additions and 514 deletions

View File

@ -1,9 +1,8 @@
language: go
go:
- 1.7.5
- 1.8.5
- 1.9.2
- "1.10.x"
- "1.11.x"
services:
- redis-server
- mysql
@ -22,10 +21,11 @@ install:
- go get github.com/go-sql-driver/mysql
- go get github.com/mattn/go-sqlite3
- go get github.com/bradfitz/gomemcache/memcache
- go get github.com/garyburd/redigo/redis
- go get github.com/gomodule/redigo/redis
- go get github.com/beego/x2j
- go get github.com/couchbase/go-couchbase
- go get github.com/beego/goyaml2
- go get gopkg.in/yaml.v2
- go get github.com/belogik/goes
- go get github.com/siddontang/ledisdb/config
- go get github.com/siddontang/ledisdb/ledis
@ -34,21 +34,23 @@ install:
- go get github.com/gogo/protobuf/proto
- go get github.com/Knetic/govaluate
- go get github.com/casbin/casbin
- go get github.com/elazarl/go-bindata-assetfs
- go get -u honnef.co/go/tools/cmd/gosimple
- go get -u github.com/mdempsky/unconvert
- go get -u github.com/gordonklaus/ineffassign
- go get -u github.com/golang/lint/golint
- go get -u github.com/go-redis/redis
before_script:
- psql --version
- sh -c "if [ '$ORM_DRIVER' = 'postgres' ]; then psql -c 'create database orm_test;' -U postgres; fi"
- sh -c "if [ '$ORM_DRIVER' = 'mysql' ]; then mysql -u root -e 'create database orm_test;'; fi"
- sh -c "if [ '$ORM_DRIVER' = 'sqlite' ]; then touch $TRAVIS_BUILD_DIR/orm_test.db; fi"
- sh -c "if [ $(go version) == *1.[5-9]* ]; then go get github.com/golang/lint/golint; golint ./...; fi"
- sh -c "if [ $(go version) == *1.[5-9]* ]; then go tool vet .; fi"
- sh -c "go get github.com/golang/lint/golint; golint ./...;"
- sh -c "go list ./... | grep -v vendor | xargs go vet -v"
- mkdir -p res/var
- ./ssdb/ssdb-server ./ssdb/ssdb.conf -d
after_script:
-killall -w ssdb-server
- killall -w ssdb-server
- rm -rf ./res/var/*
script:
- go test -v ./...
@ -58,4 +60,4 @@ script:
- find . ! \( -path './vendor' -prune \) -type f -name '*.go' -print0 | xargs -0 gofmt -l -s
- golint ./...
addons:
postgresql: "9.4"
postgresql: "9.6"

View File

@ -4,6 +4,8 @@
beego is used for rapid development of RESTful APIs, web apps and backend services in Go.
It is inspired by Tornado, Sinatra and Flask. beego has some Go-specific features such as interfaces and struct embedding.
Response time ranking: [web-frameworks](https://github.com/the-benchmarker/web-frameworks).
###### More info at [beego.me](http://beego.me).
## Quick Start

View File

@ -20,11 +20,10 @@ import (
"fmt"
"net/http"
"os"
"reflect"
"text/template"
"time"
"reflect"
"github.com/astaxie/beego/grace"
"github.com/astaxie/beego/logs"
"github.com/astaxie/beego/toolbox"
@ -35,7 +34,7 @@ import (
var beeAdminApp *adminApp
// FilterMonitorFunc is default monitor filter when admin module is enable.
// if this func returns, admin module records qbs for this request by condition of this function logic.
// if this func returns, admin module records qps for this request by condition of this function logic.
// usage:
// func MyFilterMonitor(method, requestPath string, t time.Duration, pattern string, statusCode int) bool {
// if method == "POST" {
@ -67,15 +66,27 @@ func init() {
// AdminIndex is the default http.Handler for admin module.
// it matches url pattern "/".
func adminIndex(rw http.ResponseWriter, r *http.Request) {
func adminIndex(rw http.ResponseWriter, _ *http.Request) {
execTpl(rw, map[interface{}]interface{}{}, indexTpl, defaultScriptsTpl)
}
// QpsIndex is the http.Handler for writing qbs statistics map result info in http.ResponseWriter.
// it's registered with url pattern "/qbs" in admin module.
func qpsIndex(rw http.ResponseWriter, r *http.Request) {
// QpsIndex is the http.Handler for writing qps statistics map result info in http.ResponseWriter.
// it's registered with url pattern "/qps" in admin module.
func qpsIndex(rw http.ResponseWriter, _ *http.Request) {
data := make(map[interface{}]interface{})
data["Content"] = toolbox.StatisticsMap.GetMap()
// do html escape before display path, avoid xss
if content, ok := (data["Content"]).(M); ok {
if resultLists, ok := (content["Data"]).([][]string); ok {
for i := range resultLists {
if len(resultLists[i]) > 0 {
resultLists[i][0] = template.HTMLEscapeString(resultLists[i][0])
}
}
}
}
execTpl(rw, data, qpsTpl, defaultScriptsTpl)
}
@ -92,7 +103,7 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
data := make(map[interface{}]interface{})
switch command {
case "conf":
m := make(map[string]interface{})
m := make(M)
list("BConfig", BConfig, m)
m["AppConfigPath"] = appConfigPath
m["AppConfigProvider"] = appConfigProvider
@ -116,14 +127,14 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl)
case "filter":
var (
content = map[string]interface{}{
content = M{
"Fields": []string{
"Router Pattern",
"Filter Function",
},
}
filterTypes = []string{}
filterTypeData = make(map[string]interface{})
filterTypeData = make(M)
)
if BeeApp.Handlers.enableFilter {
@ -161,7 +172,7 @@ func listConf(rw http.ResponseWriter, r *http.Request) {
}
}
func list(root string, p interface{}, m map[string]interface{}) {
func list(root string, p interface{}, m M) {
pt := reflect.TypeOf(p)
pv := reflect.ValueOf(p)
if pt.Kind() == reflect.Ptr {
@ -184,11 +195,11 @@ func list(root string, p interface{}, m map[string]interface{}) {
}
// PrintTree prints all registered routers.
func PrintTree() map[string]interface{} {
func PrintTree() M {
var (
content = map[string]interface{}{}
content = M{}
methods = []string{}
methodsData = make(map[string]interface{})
methodsData = make(M)
)
for method, t := range BeeApp.Handlers.routers {
@ -279,12 +290,12 @@ func profIndex(rw http.ResponseWriter, r *http.Request) {
// Healthcheck is a http.Handler calling health checking and showing the result.
// it's in "/healthcheck" pattern in admin module.
func healthcheck(rw http.ResponseWriter, req *http.Request) {
func healthcheck(rw http.ResponseWriter, _ *http.Request) {
var (
result []string
data = make(map[interface{}]interface{})
resultList = new([][]string)
content = map[string]interface{}{
content = M{
"Fields": []string{"Name", "Message", "Status"},
}
)
@ -332,7 +343,7 @@ func taskStatus(rw http.ResponseWriter, req *http.Request) {
}
// List Tasks
content := make(map[string]interface{})
content := make(M)
resultList := new([][]string)
var fields = []string{
"Task Name",

View File

@ -6,7 +6,7 @@ import (
)
func TestList_01(t *testing.T) {
m := make(map[string]interface{})
m := make(M)
list("BConfig", BConfig, m)
t.Log(m)
om := oldMap()
@ -18,8 +18,8 @@ func TestList_01(t *testing.T) {
}
}
func oldMap() map[string]interface{} {
m := make(map[string]interface{})
func oldMap() M {
m := make(M)
m["BConfig.AppName"] = BConfig.AppName
m["BConfig.RunMode"] = BConfig.RunMode
m["BConfig.RouterCaseSensitive"] = BConfig.RouterCaseSensitive
@ -67,6 +67,7 @@ func oldMap() map[string]interface{} {
m["BConfig.WebConfig.Session.SessionDomain"] = BConfig.WebConfig.Session.SessionDomain
m["BConfig.WebConfig.Session.SessionDisableHTTPOnly"] = BConfig.WebConfig.Session.SessionDisableHTTPOnly
m["BConfig.Log.AccessLogs"] = BConfig.Log.AccessLogs
m["BConfig.Log.EnableStaticLogs"] = BConfig.Log.EnableStaticLogs
m["BConfig.Log.AccessLogsFormat"] = BConfig.Log.AccessLogsFormat
m["BConfig.Log.FileLineNum"] = BConfig.Log.FileLineNum
m["BConfig.Log.Outputs"] = BConfig.Log.Outputs

32
app.go
View File

@ -24,12 +24,13 @@ import (
"net/http/fcgi"
"os"
"path"
"time"
"strings"
"time"
"github.com/astaxie/beego/grace"
"github.com/astaxie/beego/logs"
"github.com/astaxie/beego/utils"
"golang.org/x/crypto/acme/autocert"
)
var (
@ -101,7 +102,7 @@ func (app *App) Run(mws ...MiddleWare) {
}
app.Server.Handler = app.Handlers
for i:=len(mws)-1;i>=0;i-- {
for i := len(mws) - 1; i >= 0; i-- {
if mws[i] == nil {
continue
}
@ -117,7 +118,7 @@ func (app *App) Run(mws ...MiddleWare) {
app.Server.Addr = httpsAddr
if BConfig.Listen.EnableHTTPS || BConfig.Listen.EnableMutualHTTPS {
go func() {
time.Sleep(20 * time.Microsecond)
time.Sleep(1000 * time.Microsecond)
if BConfig.Listen.HTTPSPort != 0 {
httpsAddr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort)
app.Server.Addr = httpsAddr
@ -126,13 +127,21 @@ func (app *App) Run(mws ...MiddleWare) {
server.Server.ReadTimeout = app.Server.ReadTimeout
server.Server.WriteTimeout = app.Server.WriteTimeout
if BConfig.Listen.EnableMutualHTTPS {
if err := server.ListenAndServeMutualTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile, BConfig.Listen.TrustCaFile); err != nil {
logs.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid()))
time.Sleep(100 * time.Microsecond)
endRunning <- true
}
} else {
if BConfig.Listen.AutoTLS {
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(BConfig.Listen.Domains...),
Cache: autocert.DirCache(BConfig.Listen.TLSCacheDir),
}
app.Server.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate}
BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile = "", ""
}
if err := server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil {
logs.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid()))
time.Sleep(100 * time.Microsecond)
@ -163,15 +172,23 @@ func (app *App) Run(mws ...MiddleWare) {
// run normal mode
if BConfig.Listen.EnableHTTPS || BConfig.Listen.EnableMutualHTTPS {
go func() {
time.Sleep(20 * time.Microsecond)
time.Sleep(1000 * time.Microsecond)
if BConfig.Listen.HTTPSPort != 0 {
app.Server.Addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort)
} else if BConfig.Listen.EnableHTTP {
BeeLogger.Info("Start https server error, confict with http.Please reset https port")
BeeLogger.Info("Start https server error, conflict with http. Please reset https port")
return
}
logs.Info("https server Running on https://%s", app.Server.Addr)
if BConfig.Listen.EnableMutualHTTPS {
if BConfig.Listen.AutoTLS {
m := autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist(BConfig.Listen.Domains...),
Cache: autocert.DirCache(BConfig.Listen.TLSCacheDir),
}
app.Server.TLSConfig = &tls.Config{GetCertificate: m.GetCertificate}
BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile = "", ""
} else if BConfig.Listen.EnableMutualHTTPS {
pool := x509.NewCertPool()
data, err := ioutil.ReadFile(BConfig.Listen.TrustCaFile)
if err != nil {
@ -190,6 +207,7 @@ func (app *App) Run(mws ...MiddleWare) {
endRunning <- true
}
}()
}
if BConfig.Listen.EnableHTTP {
go func() {

View File

@ -23,7 +23,7 @@ import (
const (
// VERSION represent beego web framework version.
VERSION = "1.9.2"
VERSION = "1.11.1"
// DEV is for develop
DEV = "dev"
@ -31,7 +31,10 @@ const (
PROD = "prod"
)
//hook function to run
// M is Map shortcut
type M map[string]interface{}
// Hook function to run
type hookfunc func() error
var (
@ -62,6 +65,8 @@ func Run(params ...string) {
if len(strs) > 1 && strs[1] != "" {
BConfig.Listen.HTTPPort, _ = strconv.Atoi(strs[1])
}
BConfig.Listen.Domains = params
}
BeeApp.Run()
@ -74,6 +79,7 @@ func RunWithMiddleWares(addr string, mws ...MiddleWare) {
strs := strings.Split(addr, ":")
if len(strs) > 0 && strs[0] != "" {
BConfig.Listen.HTTPAddr = strs[0]
BConfig.Listen.Domains = []string{strs[0]}
}
if len(strs) > 1 && strs[1] != "" {
BConfig.Listen.HTTPPort, _ = strconv.Atoi(strs[1])

2
cache/README.md vendored
View File

@ -52,7 +52,7 @@ Configure like this:
## Redis adapter
Redis adapter use the [redigo](http://github.com/garyburd/redigo) client.
Redis adapter use the [redigo](http://github.com/gomodule/redigo) client.
Configure like this:

10
cache/memory.go vendored
View File

@ -203,13 +203,17 @@ func (bc *MemoryCache) StartAndGC(config string) error {
dur := time.Duration(cf["interval"]) * time.Second
bc.Every = cf["interval"]
bc.dur = dur
go bc.vaccuum()
go bc.vacuum()
return nil
}
// check expiration.
func (bc *MemoryCache) vaccuum() {
if bc.Every < 1 {
func (bc *MemoryCache) vacuum() {
bc.RLock()
every := bc.Every
bc.RUnlock()
if every < 1 {
return
}
for {

22
cache/redis/redis.go vendored
View File

@ -14,9 +14,9 @@
// Package redis for cache provider
//
// depend on github.com/garyburd/redigo/redis
// depend on github.com/gomodule/redigo/redis
//
// go install github.com/garyburd/redigo/redis
// go install github.com/gomodule/redigo/redis
//
// Usage:
// import(
@ -36,9 +36,10 @@ import (
"strconv"
"time"
"github.com/garyburd/redigo/redis"
"github.com/gomodule/redigo/redis"
"github.com/astaxie/beego/cache"
"strings"
)
var (
@ -53,6 +54,7 @@ type Cache struct {
dbNum int
key string
password string
maxIdle int
}
// NewRedisCache create new redis cache with default collection name.
@ -163,16 +165,28 @@ func (rc *Cache) StartAndGC(config string) error {
if _, ok := cf["conn"]; !ok {
return errors.New("config has no conn key")
}
// Format redis://<password>@<host>:<port>
cf["conn"] = strings.Replace(cf["conn"], "redis://", "", 1)
if i := strings.Index(cf["conn"], "@"); i > -1 {
cf["password"] = cf["conn"][0:i]
cf["conn"] = cf["conn"][i+1:]
}
if _, ok := cf["dbNum"]; !ok {
cf["dbNum"] = "0"
}
if _, ok := cf["password"]; !ok {
cf["password"] = ""
}
if _, ok := cf["maxIdle"]; !ok {
cf["maxIdle"] = "3"
}
rc.key = cf["key"]
rc.conninfo = cf["conn"]
rc.dbNum, _ = strconv.Atoi(cf["dbNum"])
rc.password = cf["password"]
rc.maxIdle, _ = strconv.Atoi(cf["maxIdle"])
rc.connectInit()
@ -206,7 +220,7 @@ func (rc *Cache) connectInit() {
}
// initialize a new pool
rc.p = &redis.Pool{
MaxIdle: 3,
MaxIdle: rc.maxIdle,
IdleTimeout: 180 * time.Second,
Dial: dialFunc,
}

View File

@ -19,7 +19,7 @@ import (
"time"
"github.com/astaxie/beego/cache"
"github.com/garyburd/redigo/redis"
"github.com/gomodule/redigo/redis"
)
func TestRedisCache(t *testing.T) {

View File

@ -55,6 +55,9 @@ type Listen struct {
EnableHTTP bool
HTTPAddr string
HTTPPort int
AutoTLS bool
Domains []string
TLSCacheDir string
EnableHTTPS bool
EnableMutualHTTPS bool
HTTPSAddr string
@ -98,14 +101,15 @@ type SessionConfig struct {
SessionAutoSetCookie bool
SessionDomain string
SessionDisableHTTPOnly bool // used to allow for cross domain cookies/javascript cookies.
SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers
SessionEnableSidInHTTPHeader bool // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader string
SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params
SessionEnableSidInURLQuery bool // enable get the sessionId from Url Query params
}
// LogConfig holds Log related config
type LogConfig struct {
AccessLogs bool
EnableStaticLogs bool //log static files requests default: false
AccessLogsFormat string //access log format: JSON_FORMAT, APACHE_FORMAT or empty string
FileLineNum bool
Outputs map[string]string // Store Adaptor : config
@ -138,8 +142,8 @@ func init() {
panic(err)
}
var filename = "app.conf"
if os.Getenv("BEEGO_MODE") != "" {
filename = os.Getenv("BEEGO_MODE") + ".app.conf"
if os.Getenv("BEEGO_RUNMODE") != "" {
filename = os.Getenv("BEEGO_RUNMODE") + ".app.conf"
}
appConfigPath = filepath.Join(workPath, "conf", filename)
if !utils.FileExists(appConfigPath) {
@ -182,13 +186,18 @@ func recoverPanic(ctx *context.Context) {
if BConfig.RunMode == DEV && BConfig.EnableErrorsRender {
showErr(err, ctx, stack)
}
if ctx.Output.Status != 0 {
ctx.ResponseWriter.WriteHeader(ctx.Output.Status)
} else {
ctx.ResponseWriter.WriteHeader(500)
}
}
}
func newBConfig() *Config {
return &Config{
AppName: "beego",
RunMode: DEV,
RunMode: PROD,
RouterCaseSensitive: true,
ServerName: "beegoServer:" + VERSION,
RecoverPanic: true,
@ -203,6 +212,9 @@ func newBConfig() *Config {
ServerTimeOut: 0,
ListenTCP4: false,
EnableHTTP: true,
AutoTLS: false,
Domains: []string{},
TLSCacheDir: ".",
HTTPAddr: "",
HTTPPort: 8080,
EnableHTTPS: false,
@ -240,13 +252,14 @@ func newBConfig() *Config {
SessionCookieLifeTime: 0, //set cookie default is the browser life
SessionAutoSetCookie: true,
SessionDomain: "",
SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers
SessionEnableSidInHTTPHeader: false, // enable store/get the sessionId into/from http headers
SessionNameInHTTPHeader: "Beegosessionid",
SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params
SessionEnableSidInURLQuery: false, // enable get the sessionId from Url Query params
},
},
Log: LogConfig{
AccessLogs: false,
EnableStaticLogs: false,
AccessLogsFormat: "APACHE_FORMAT",
FileLineNum: true,
Outputs: map[string]string{"console": ""},

View File

@ -150,12 +150,12 @@ func ExpandValueEnv(value string) (realValue string) {
}
key := ""
defalutV := ""
defaultV := ""
// value start with "${"
for i := 2; i < vLen; i++ {
if value[i] == '|' && (i+1 < vLen && value[i+1] == '|') {
key = value[2:i]
defalutV = value[i+2 : vLen-1] // other string is default value.
defaultV = value[i+2 : vLen-1] // other string is default value.
break
} else if value[i] == '}' {
key = value[2:i]
@ -165,7 +165,7 @@ func ExpandValueEnv(value string) (realValue string) {
realValue = os.Getenv(key)
if realValue == "" {
realValue = defalutV
realValue = defaultV
}
return

View File

@ -126,7 +126,7 @@ func (c *fakeConfigContainer) SaveConfigFile(filename string) error {
var _ Configer = new(fakeConfigContainer)
// NewFakeConfig return a fake Congiger
// NewFakeConfig return a fake Configer
func NewFakeConfig() Configer {
return &fakeConfigContainer{
data: make(map[string]string),

View File

@ -78,15 +78,37 @@ func (ini *IniConfig) parseData(dir string, data []byte) (*IniConfigContainer, e
}
}
section := defaultSection
tmpBuf := bytes.NewBuffer(nil)
for {
line, _, err := buf.ReadLine()
if err == io.EOF {
tmpBuf.Reset()
shouldBreak := false
for {
tmp, isPrefix, err := buf.ReadLine()
if err == io.EOF {
shouldBreak = true
break
}
//It might be a good idea to throw a error on all unknonw errors?
if _, ok := err.(*os.PathError); ok {
return nil, err
}
tmpBuf.Write(tmp)
if isPrefix {
continue
}
if !isPrefix {
break
}
}
if shouldBreak {
break
}
//It might be a good idea to throw a error on all unknonw errors?
if _, ok := err.(*os.PathError); ok {
return nil, err
}
line := tmpBuf.Bytes()
line = bytes.TrimSpace(line)
if bytes.Equal(line, bEmpty) {
continue
@ -215,7 +237,7 @@ func (c *IniConfigContainer) Bool(key string) (bool, error) {
}
// DefaultBool returns the boolean value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *IniConfigContainer) DefaultBool(key string, defaultval bool) bool {
v, err := c.Bool(key)
if err != nil {
@ -230,7 +252,7 @@ func (c *IniConfigContainer) Int(key string) (int, error) {
}
// DefaultInt returns the integer value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *IniConfigContainer) DefaultInt(key string, defaultval int) int {
v, err := c.Int(key)
if err != nil {
@ -245,7 +267,7 @@ func (c *IniConfigContainer) Int64(key string) (int64, error) {
}
// DefaultInt64 returns the int64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *IniConfigContainer) DefaultInt64(key string, defaultval int64) int64 {
v, err := c.Int64(key)
if err != nil {
@ -260,7 +282,7 @@ func (c *IniConfigContainer) Float(key string) (float64, error) {
}
// DefaultFloat returns the float64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *IniConfigContainer) DefaultFloat(key string, defaultval float64) float64 {
v, err := c.Float(key)
if err != nil {
@ -275,7 +297,7 @@ func (c *IniConfigContainer) String(key string) string {
}
// DefaultString returns the string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *IniConfigContainer) DefaultString(key string, defaultval string) string {
v := c.String(key)
if v == "" {
@ -295,7 +317,7 @@ func (c *IniConfigContainer) Strings(key string) []string {
}
// DefaultStrings returns the []string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *IniConfigContainer) DefaultStrings(key string, defaultval []string) []string {
v := c.Strings(key)
if v == nil {
@ -314,7 +336,7 @@ func (c *IniConfigContainer) GetSection(section string) (map[string]string, erro
// SaveConfigFile save the config into file.
//
// BUG(env): The environment variable config item will be saved with real value in SaveConfigFile Funcation.
// BUG(env): The environment variable config item will be saved with real value in SaveConfigFile Function.
func (c *IniConfigContainer) SaveConfigFile(filename string) (err error) {
// Write configuration file by filename.
f, err := os.Create(filename)

View File

@ -101,7 +101,7 @@ func (c *JSONConfigContainer) Int(key string) (int, error) {
}
// DefaultInt returns the integer value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *JSONConfigContainer) DefaultInt(key string, defaultval int) int {
if v, err := c.Int(key); err == nil {
return v
@ -122,7 +122,7 @@ func (c *JSONConfigContainer) Int64(key string) (int64, error) {
}
// DefaultInt64 returns the int64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *JSONConfigContainer) DefaultInt64(key string, defaultval int64) int64 {
if v, err := c.Int64(key); err == nil {
return v
@ -143,7 +143,7 @@ func (c *JSONConfigContainer) Float(key string) (float64, error) {
}
// DefaultFloat returns the float64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *JSONConfigContainer) DefaultFloat(key string, defaultval float64) float64 {
if v, err := c.Float(key); err == nil {
return v
@ -163,7 +163,7 @@ func (c *JSONConfigContainer) String(key string) string {
}
// DefaultString returns the string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *JSONConfigContainer) DefaultString(key string, defaultval string) string {
// TODO FIXME should not use "" to replace non existence
if v := c.String(key); v != "" {
@ -182,7 +182,7 @@ func (c *JSONConfigContainer) Strings(key string) []string {
}
// DefaultStrings returns the []string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *JSONConfigContainer) DefaultStrings(key string, defaultval []string) []string {
if v := c.Strings(key); v != nil {
return v

View File

@ -216,7 +216,7 @@ func TestJson(t *testing.T) {
t.Error("unknown keys should return an error when expecting a Bool")
}
if !jsonconf.DefaultBool("unknow", true) {
if !jsonconf.DefaultBool("unknown", true) {
t.Error("unknown keys with default value wrong")
}
}

View File

@ -102,7 +102,7 @@ func (c *ConfigContainer) Int(key string) (int, error) {
}
// DefaultInt returns the integer value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultInt(key string, defaultval int) int {
v, err := c.Int(key)
if err != nil {
@ -117,7 +117,7 @@ func (c *ConfigContainer) Int64(key string) (int64, error) {
}
// DefaultInt64 returns the int64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultInt64(key string, defaultval int64) int64 {
v, err := c.Int64(key)
if err != nil {
@ -133,7 +133,7 @@ func (c *ConfigContainer) Float(key string) (float64, error) {
}
// DefaultFloat returns the float64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultFloat(key string, defaultval float64) float64 {
v, err := c.Float(key)
if err != nil {
@ -151,7 +151,7 @@ func (c *ConfigContainer) String(key string) string {
}
// DefaultString returns the string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultString(key string, defaultval string) string {
v := c.String(key)
if v == "" {
@ -170,7 +170,7 @@ func (c *ConfigContainer) Strings(key string) []string {
}
// DefaultStrings returns the []string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultStrings(key string, defaultval []string) []string {
v := c.Strings(key)
if v == nil {

View File

@ -119,7 +119,7 @@ func parseYML(buf []byte) (cnf map[string]interface{}, err error) {
// ConfigContainer A Config represents the yaml configuration.
type ConfigContainer struct {
data map[string]interface{}
sync.Mutex
sync.RWMutex
}
// Bool returns the boolean value for a given key.
@ -154,7 +154,7 @@ func (c *ConfigContainer) Int(key string) (int, error) {
}
// DefaultInt returns the integer value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultInt(key string, defaultval int) int {
v, err := c.Int(key)
if err != nil {
@ -174,7 +174,7 @@ func (c *ConfigContainer) Int64(key string) (int64, error) {
}
// DefaultInt64 returns the int64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultInt64(key string, defaultval int64) int64 {
v, err := c.Int64(key)
if err != nil {
@ -198,7 +198,7 @@ func (c *ConfigContainer) Float(key string) (float64, error) {
}
// DefaultFloat returns the float64 value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultFloat(key string, defaultval float64) float64 {
v, err := c.Float(key)
if err != nil {
@ -218,7 +218,7 @@ func (c *ConfigContainer) String(key string) string {
}
// DefaultString returns the string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultString(key string, defaultval string) string {
v := c.String(key)
if v == "" {
@ -237,7 +237,7 @@ func (c *ConfigContainer) Strings(key string) []string {
}
// DefaultStrings returns the []string value for a given key.
// if err != nil return defaltval
// if err != nil return defaultval
func (c *ConfigContainer) DefaultStrings(key string, defaultval []string) []string {
v := c.Strings(key)
if v == nil {
@ -285,9 +285,28 @@ func (c *ConfigContainer) getData(key string) (interface{}, error) {
if len(key) == 0 {
return nil, errors.New("key is empty")
}
c.RLock()
defer c.RUnlock()
if v, ok := c.data[key]; ok {
return v, nil
keys := strings.Split(key, ".")
tmpData := c.data
for idx, k := range keys {
if v, ok := tmpData[k]; ok {
switch v.(type) {
case map[string]interface{}:
{
tmpData = v.(map[string]interface{})
if idx == len(keys) - 1 {
return tmpData, nil
}
}
default:
{
return v, nil
}
}
}
}
return nil, fmt.Errorf("not exist key %q", key)
}

View File

@ -48,15 +48,15 @@ func TestAssignConfig_02(t *testing.T) {
_BConfig := &Config{}
bs, _ := json.Marshal(newBConfig())
jsonMap := map[string]interface{}{}
jsonMap := M{}
json.Unmarshal(bs, &jsonMap)
configMap := map[string]interface{}{}
configMap := M{}
for k, v := range jsonMap {
if reflect.TypeOf(v).Kind() == reflect.Map {
for k1, v1 := range v.(map[string]interface{}) {
for k1, v1 := range v.(M) {
if reflect.TypeOf(v1).Kind() == reflect.Map {
for k2, v2 := range v1.(map[string]interface{}) {
for k2, v2 := range v1.(M) {
configMap[k2] = v2
}
} else {
@ -75,7 +75,7 @@ func TestAssignConfig_02(t *testing.T) {
jcf := &config.JSONConfig{}
bs, _ = json.Marshal(configMap)
ac, _ := jcf.ParseData([]byte(bs))
ac, _ := jcf.ParseData(bs)
for _, i := range []interface{}{_BConfig, &_BConfig.Listen, &_BConfig.WebConfig, &_BConfig.Log, &_BConfig.WebConfig.Session} {
assignSingleConfig(i, ac)

View File

@ -38,6 +38,14 @@ import (
"github.com/astaxie/beego/utils"
)
//commonly used mime-types
const (
ApplicationJSON = "application/json"
ApplicationXML = "application/xml"
ApplicationYAML = "application/x-yaml"
TextXML = "text/xml"
)
// NewContext return the Context with Input and Output
func NewContext() *Context {
return &Context{
@ -193,6 +201,7 @@ type Response struct {
http.ResponseWriter
Started bool
Status int
Elapsed time.Duration
}
func (r *Response) reset(rw http.ResponseWriter) {
@ -244,3 +253,11 @@ func (r *Response) CloseNotify() <-chan bool {
}
return nil
}
// Pusher http.Pusher
func (r *Response) Pusher() (pusher http.Pusher) {
if pusher, ok := r.ResponseWriter.(http.Pusher); ok {
return pusher
}
return nil
}

View File

@ -37,6 +37,7 @@ var (
acceptsHTMLRegex = regexp.MustCompile(`(text/html|application/xhtml\+xml)(?:,|$)`)
acceptsXMLRegex = regexp.MustCompile(`(application/xml|text/xml)(?:,|$)`)
acceptsJSONRegex = regexp.MustCompile(`(application/json)(?:,|$)`)
acceptsYAMLRegex = regexp.MustCompile(`(application/x-yaml)(?:,|$)`)
maxParam = 50
)
@ -203,6 +204,10 @@ func (input *BeegoInput) AcceptsXML() bool {
func (input *BeegoInput) AcceptsJSON() bool {
return acceptsJSONRegex.MatchString(input.Header("Accept"))
}
// AcceptsYAML Checks if request accepts json response
func (input *BeegoInput) AcceptsYAML() bool {
return acceptsYAMLRegex.MatchString(input.Header("Accept"))
}
// IP returns request client ip.
// if in proxy, return first proxy id.

View File

@ -30,6 +30,7 @@ import (
"strconv"
"strings"
"time"
"gopkg.in/yaml.v2"
)
// BeegoOutput does work for sending response header.
@ -182,8 +183,8 @@ func errorRenderer(err error) Renderer {
}
// JSON writes json to response body.
// if coding is true, it converts utf-8 to \u0000 type.
func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, coding bool) error {
// if encoding is true, it converts utf-8 to \u0000 type.
func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, encoding bool) error {
output.Header("Content-Type", "application/json; charset=utf-8")
var content []byte
var err error
@ -196,12 +197,26 @@ func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, coding bool) e
http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError)
return err
}
if coding {
if encoding {
content = []byte(stringsToJSON(string(content)))
}
return output.Body(content)
}
// YAML writes yaml to response body.
func (output *BeegoOutput) YAML(data interface{}) error {
output.Header("Content-Type", "application/x-yaml; charset=utf-8")
var content []byte
var err error
content, err = yaml.Marshal(data)
if err != nil {
http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError)
return err
}
return output.Body(content)
}
// JSONP writes jsonp to response body.
func (output *BeegoOutput) JSONP(data interface{}, hasIndent bool) error {
output.Header("Content-Type", "application/javascript; charset=utf-8")
@ -245,6 +260,19 @@ func (output *BeegoOutput) XML(data interface{}, hasIndent bool) error {
return output.Body(content)
}
// ServeFormatted serve YAML, XML OR JSON, depending on the value of the Accept header
func (output *BeegoOutput) ServeFormatted(data interface{}, hasIndent bool, hasEncode ...bool) {
accept := output.Context.Input.Header("Accept")
switch accept {
case ApplicationYAML:
output.YAML(data)
case ApplicationXML, TextXML:
output.XML(data, hasIndent)
default:
output.JSON(data, hasIndent, len(hasEncode) > 0 && hasEncode[0])
}
}
// Download forces response for download file.
// it prepares the download response header automatically.
func (output *BeegoOutput) Download(file string, filename ...string) {
@ -260,7 +288,7 @@ func (output *BeegoOutput) Download(file string, filename ...string) {
} else {
fName = filepath.Base(file)
}
output.Header("Content-Disposition", "attachment; filename="+url.QueryEscape(fName))
output.Header("Content-Disposition", "attachment; filename="+url.PathEscape(fName))
output.Header("Content-Description", "File Transfer")
output.Header("Content-Type", "application/octet-stream")
output.Header("Content-Transfer-Encoding", "binary")
@ -325,13 +353,13 @@ func (output *BeegoOutput) IsForbidden() bool {
}
// IsNotFound returns boolean of this request is not found.
// HTTP 404 means forbidden.
// HTTP 404 means not found.
func (output *BeegoOutput) IsNotFound() bool {
return output.Status == 404
}
// IsClientError returns boolean of this request client sends error data.
// HTTP 4xx means forbidden.
// HTTP 4xx means client error.
func (output *BeegoOutput) IsClientError() bool {
return output.Status >= 400 && output.Status < 500
}
@ -350,6 +378,11 @@ func stringsToJSON(str string) string {
jsons.WriteRune(r)
} else {
jsons.WriteString("\\u")
if rint < 0x100 {
jsons.WriteString("00")
} else if rint < 0x1000 {
jsons.WriteString("0")
}
jsons.WriteString(strconv.FormatInt(int64(rint), 16))
}
}

View File

@ -32,13 +32,6 @@ import (
"github.com/astaxie/beego/session"
)
//commonly used mime-types
const (
applicationJSON = "application/json"
applicationXML = "application/xml"
textXML = "text/xml"
)
var (
// ErrAbort custom error when user stop request handler manually.
ErrAbort = errors.New("User stop run")
@ -46,10 +39,37 @@ var (
GlobalControllerRouter = make(map[string][]ControllerComments)
)
// ControllerFilter store the filter for controller
type ControllerFilter struct {
Pattern string
Pos int
Filter FilterFunc
ReturnOnOutput bool
ResetParams bool
}
// ControllerFilterComments store the comment for controller level filter
type ControllerFilterComments struct {
Pattern string
Pos int
Filter string // NOQA
ReturnOnOutput bool
ResetParams bool
}
// ControllerImportComments store the import comment for controller needed
type ControllerImportComments struct {
ImportPath string
ImportAlias string
}
// ControllerComments store the comment for the controller method
type ControllerComments struct {
Method string
Router string
Filters []*ControllerFilter
ImportComments []*ControllerImportComments
FilterComments []*ControllerFilterComments
AllowHTTPMethods []string
Params []map[string]string
MethodParams []*param.MethodParam
@ -272,9 +292,23 @@ func (c *Controller) viewPath() string {
// Redirect sends the redirection response to url with status code.
func (c *Controller) Redirect(url string, code int) {
logAccess(c.Ctx, nil, code)
c.Ctx.Redirect(code, url)
}
// SetData set the data depending on the accepted
func (c *Controller) SetData(data interface{}) {
accept := c.Ctx.Input.Header("Accept")
switch accept {
case context.ApplicationYAML:
c.Data["yaml"] = data
case context.ApplicationXML, context.TextXML:
c.Data["xml"] = data
default:
c.Data["json"] = data
}
}
// Abort stops controller handler and show the error data if code is defined in ErrorMap or code string.
func (c *Controller) Abort(code string) {
status, err := strconv.Atoi(code)
@ -317,47 +351,35 @@ func (c *Controller) URLFor(endpoint string, values ...interface{}) string {
// ServeJSON sends a json response with encoding charset.
func (c *Controller) ServeJSON(encoding ...bool) {
var (
hasIndent = true
hasEncoding = false
hasIndent = BConfig.RunMode != PROD
hasEncoding = len(encoding) > 0 && encoding[0]
)
if BConfig.RunMode == PROD {
hasIndent = false
}
if len(encoding) > 0 && encoding[0] {
hasEncoding = true
}
c.Ctx.Output.JSON(c.Data["json"], hasIndent, hasEncoding)
}
// ServeJSONP sends a jsonp response.
func (c *Controller) ServeJSONP() {
hasIndent := true
if BConfig.RunMode == PROD {
hasIndent = false
}
hasIndent := BConfig.RunMode != PROD
c.Ctx.Output.JSONP(c.Data["jsonp"], hasIndent)
}
// ServeXML sends xml response.
func (c *Controller) ServeXML() {
hasIndent := true
if BConfig.RunMode == PROD {
hasIndent = false
}
hasIndent := BConfig.RunMode != PROD
c.Ctx.Output.XML(c.Data["xml"], hasIndent)
}
// ServeFormatted serve Xml OR Json, depending on the value of the Accept header
func (c *Controller) ServeFormatted() {
accept := c.Ctx.Input.Header("Accept")
switch accept {
case applicationJSON:
c.ServeJSON()
case applicationXML, textXML:
c.ServeXML()
default:
c.ServeJSON()
}
// ServeYAML sends yaml response.
func (c *Controller) ServeYAML() {
c.Ctx.Output.YAML(c.Data["yaml"])
}
// ServeFormatted serve YAML, XML OR JSON, depending on the value of the Accept header
func (c *Controller) ServeFormatted(encoding ...bool) {
hasIndent := BConfig.RunMode != PROD
hasEncoding := len(encoding) > 0 && encoding[0]
c.Ctx.Output.ServeFormatted(c.Data, hasIndent, hasEncoding)
}
// Input returns the input data map from POST or PUT request body and query string.

View File

@ -28,7 +28,7 @@ import (
)
const (
errorTypeHandler = iota
errorTypeHandler = iota
errorTypeController
)
@ -93,11 +93,6 @@ func showErr(err interface{}, ctx *context.Context, stack string) {
"BeegoVersion": VERSION,
"GoVersion": runtime.Version(),
}
if ctx.Output.Status != 0 {
ctx.ResponseWriter.WriteHeader(ctx.Output.Status)
} else {
ctx.ResponseWriter.WriteHeader(500)
}
t.Execute(ctx.ResponseWriter, data)
}
@ -366,7 +361,7 @@ func gatewayTimeout(rw http.ResponseWriter, r *http.Request) {
func responseError(rw http.ResponseWriter, r *http.Request, errCode int, errContent string) {
t, _ := template.New("beegoerrortemp").Parse(errtpl)
data := map[string]interface{}{
data := M{
"Title": http.StatusText(errCode),
"BeegoVersion": VERSION,
"Content": template.HTML(errContent),
@ -439,6 +434,9 @@ func exception(errCode string, ctx *context.Context) {
}
func executeError(err *errorInfo, ctx *context.Context, code int) {
//make sure to log the error in the access log
logAccess(ctx, nil, code)
if err.errorType == errorTypeHandler {
ctx.ResponseWriter.WriteHeader(code)
err.handler(ctx.ResponseWriter, ctx.Request)

74
fs.go Normal file
View File

@ -0,0 +1,74 @@
package beego
import (
"net/http"
"os"
"path/filepath"
)
type FileSystem struct {
}
func (d FileSystem) Open(name string) (http.File, error) {
return os.Open(name)
}
// Walk walks the file tree rooted at root in filesystem, calling walkFn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by walkFn.
func Walk(fs http.FileSystem, root string, walkFn filepath.WalkFunc) error {
f, err := fs.Open(root)
if err != nil {
return err
}
info, err := f.Stat()
if err != nil {
err = walkFn(root, nil, err)
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}
// walk recursively descends path, calling walkFn.
func walk(fs http.FileSystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
var err error
if !info.IsDir() {
return walkFn(path, info, nil)
}
dir, err := fs.Open(path)
defer dir.Close()
if err != nil {
if err1 := walkFn(path, info, err); err1 != nil {
return err1
}
return err
}
dirs, err := dir.Readdir(-1)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, fileInfo := range dirs {
filename := filepath.Join(path, fileInfo.Name())
if err = walk(fs, filename, fileInfo, walkFn); err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
return nil
}

39
go.mod Normal file
View File

@ -0,0 +1,39 @@
module github.com/astaxie/beego
require (
github.com/Knetic/govaluate v3.0.0+incompatible // indirect
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542
github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737
github.com/casbin/casbin v1.7.0
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c // indirect
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 // indirect
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 // indirect
github.com/elazarl/go-bindata-assetfs v1.0.0
github.com/go-redis/redis v6.14.2+incompatible
github.com/go-sql-driver/mysql v1.4.1
github.com/gogo/protobuf v1.1.1
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
github.com/gomodule/redigo v2.0.0+incompatible
github.com/lib/pq v1.0.0
github.com/mattn/go-sqlite3 v1.10.0
github.com/pelletier/go-toml v1.2.0 // indirect
github.com/pkg/errors v0.8.0 // indirect
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect
github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d // indirect
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c // indirect
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b // indirect
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect
gopkg.in/yaml.v2 v2.2.1
)
replace golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 => github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85
replace gopkg.in/yaml.v2 v2.2.1 => github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d

67
go.sum Normal file
View File

@ -0,0 +1,67 @@
github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg=
github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd h1:jZtX5jh5IOMu0fpOTC3ayh6QGSPJ/KWOv1lgPvbRw1M=
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542 h1:nYXb+3jF6Oq/j8R/y90XrKpreCxIalBWfeyeKymgOPk=
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff h1:/kO0p2RTGLB8R5gub7ps0GmYpB2O8LXEoPq8tzFDCUI=
github.com/belogik/goes v0.0.0-20151229125003-e54d722c3aff/go.mod h1:PhH1ZhyCzHKt4uAasyx+ljRCgoezetRNf59CUtwUkqY=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 h1:rRISKWyXfVxvoa702s91Zl5oREZTrR3yv+tXrrX7G/g=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/casbin/casbin v1.7.0 h1:PuzlE8w0JBg/DhIqnkF1Dewf3z+qmUZMVN07PonvVUQ=
github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb h1:w3RapLhkA5+km9Z8vUkC6VCaskduJXvXwJg5neKnfDU=
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c h1:K4FIibkr4//ziZKOKmt4RL0YImuTjLLBtwElf+F2lSQ=
github.com/couchbase/gomemcached v0.0.0-20181122193126-5125a94a666c/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8=
github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76 h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0=
github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d h1:xy93KVe+KrIIwWDEAfQBdIfsiHJkepbYsDr+VY3g9/o=
github.com/go-yaml/yaml v0.0.0-20180328195020-5420a8b6744d/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:B7ZbAFz7NOmvpUE5RGtu3u0WIizy5GdvbNpEf4RPnWs=
github.com/golang/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:uZvAcrsnNaCxlh1HorK5dUQHGmEKPh2H/Rl1kehswPo=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373 h1:p6IxqQMjab30l4lb9mmkIkkcE1yv6o0SKbPhW5pxqHI=
github.com/siddontang/ledisdb v0.0.0-20181029004158-becf5f38d373/go.mod h1:mF1DpOSOUiJRMR+FDqaqu3EBqrybQtrDDszLUZ6oxPg=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec h1:q6XVwXmKvCRHRqesF3cSv6lNqqHi0QWOvgDlSohg8UA=
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c h1:3eGShk3EQf5gJCYW+WzA0TEJQd37HLOmlYF7N0YJwv0=
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b h1:0Ve0/CCjiAiyKddUMUn3RwIGlq2iTW4GuVzyoKBYO/8=
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85 h1:et7+NAX3lLIk5qUCTA9QelBjGE/NkhzYw/mhnr0s7nI=
golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -11,7 +11,7 @@ import (
"github.com/astaxie/beego/session"
)
//
// register MIME type with content type
func registerMime() error {
for k, v := range mimemaps {
mime.AddExtensionType(k, v)

View File

@ -50,6 +50,7 @@ import (
"strings"
"sync"
"time"
"gopkg.in/yaml.v2"
)
var defaultSetting = BeegoHTTPSettings{
@ -317,6 +318,7 @@ func (b *BeegoHTTPRequest) Body(data interface{}) *BeegoHTTPRequest {
}
return b
}
// XMLBody adds request raw body encoding by XML.
func (b *BeegoHTTPRequest) XMLBody(obj interface{}) (*BeegoHTTPRequest, error) {
if b.req.Body == nil && obj != nil {
@ -330,6 +332,21 @@ func (b *BeegoHTTPRequest) XMLBody(obj interface{}) (*BeegoHTTPRequest, error) {
}
return b, nil
}
// YAMLBody adds request raw body encoding by YAML.
func (b *BeegoHTTPRequest) YAMLBody(obj interface{}) (*BeegoHTTPRequest, error) {
if b.req.Body == nil && obj != nil {
byts, err := yaml.Marshal(obj)
if err != nil {
return b, err
}
b.req.Body = ioutil.NopCloser(bytes.NewReader(byts))
b.req.ContentLength = int64(len(byts))
b.req.Header.Set("Content-Type", "application/x+yaml")
}
return b, nil
}
// JSONBody adds request raw body encoding by JSON.
func (b *BeegoHTTPRequest) JSONBody(obj interface{}) (*BeegoHTTPRequest, error) {
if b.req.Body == nil && obj != nil {
@ -429,12 +446,12 @@ func (b *BeegoHTTPRequest) DoRequest() (resp *http.Response, err error) {
}
b.buildURL(paramBody)
url, err := url.Parse(b.url)
urlParsed, err := url.Parse(b.url)
if err != nil {
return nil, err
}
b.req.URL = url
b.req.URL = urlParsed
trans := b.setting.Transport
@ -444,7 +461,7 @@ func (b *BeegoHTTPRequest) DoRequest() (resp *http.Response, err error) {
TLSClientConfig: b.setting.TLSClientConfig,
Proxy: b.setting.Proxy,
Dial: TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout),
MaxIdleConnsPerHost: -1,
MaxIdleConnsPerHost: 100,
}
} else {
// if b.transport is *http.Transport then set the settings.
@ -579,6 +596,16 @@ func (b *BeegoHTTPRequest) ToXML(v interface{}) error {
return xml.Unmarshal(data, v)
}
// ToYAML returns the map that marshals from the body bytes as yaml in response .
// it calls Response inner.
func (b *BeegoHTTPRequest) ToYAML(v interface{}) error {
data, err := b.Bytes()
if err != nil {
return err
}
return yaml.Unmarshal(data, v)
}
// Response executes request client gets response mannually.
func (b *BeegoHTTPRequest) Response() (*http.Response, error) {
return b.getResponse()

View File

@ -16,6 +16,8 @@ package httplib
import (
"io/ioutil"
"net"
"net/http"
"os"
"strings"
"testing"
@ -161,7 +163,16 @@ func TestWithSetting(t *testing.T) {
var setting BeegoHTTPSettings
setting.EnableCookie = true
setting.UserAgent = v
setting.Transport = nil
setting.Transport = &http.Transport{
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 50,
IdleConnTimeout: 90 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
setting.ReadWriteTimeout = 5 * time.Second
SetDefaultSetting(setting)

View File

@ -16,48 +16,57 @@ As of now this logs support console, file,smtp and conn.
First you must import it
import (
"github.com/astaxie/beego/logs"
)
```golang
import (
"github.com/astaxie/beego/logs"
)
```
Then init a Log (example with console adapter)
log := NewLogger(10000)
log.SetLogger("console", "")
```golang
log := logs.NewLogger(10000)
log.SetLogger("console", "")
```
> the first params stand for how many channel
Use it like this:
log.Trace("trace")
log.Info("info")
log.Warn("warning")
log.Debug("debug")
log.Critical("critical")
Use it like this:
```golang
log.Trace("trace")
log.Info("info")
log.Warn("warning")
log.Debug("debug")
log.Critical("critical")
```
## File adapter
Configure file adapter like this:
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test.log"}`)
```golang
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test.log"}`)
```
## Conn adapter
Configure like this:
log := NewLogger(1000)
log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
log.Info("info")
```golang
log := NewLogger(1000)
log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
log.Info("info")
```
## Smtp adapter
Configure like this:
log := NewLogger(10000)
log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
log.Critical("sendmail critical")
time.Sleep(time.Second * 30)
```golang
log := NewLogger(10000)
log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
log.Critical("sendmail critical")
time.Sleep(time.Second * 30)
```

View File

@ -16,13 +16,14 @@ package logs
import (
"bytes"
"strings"
"encoding/json"
"time"
"fmt"
"time"
)
const (
apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s\n"
apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s"
apacheFormat = "APACHE_FORMAT"
jsonFormat = "JSON_FORMAT"
)
@ -53,10 +54,9 @@ func (r *AccessLogRecord) json() ([]byte, error) {
}
func disableEscapeHTML(i interface{}) {
e, ok := i.(interface {
if e, ok := i.(interface {
SetEscapeHTML(bool)
});
if ok {
}); ok {
e.SetEscapeHTML(false)
}
}
@ -64,9 +64,7 @@ func disableEscapeHTML(i interface{}) {
// AccessLog - Format and print access log.
func AccessLog(r *AccessLogRecord, format string) {
var msg string
switch format {
case apacheFormat:
timeFormatted := r.RequestTime.Format("02/Jan/2006 03:04:05")
msg = fmt.Sprintf(apacheFormatPattern, r.RemoteAddr, timeFormatted, r.Request, r.Status, r.BodyBytesSent,
@ -81,6 +79,5 @@ func AccessLog(r *AccessLogRecord, format string) {
msg = string(jsonData)
}
}
beeLogger.Debug(msg)
beeLogger.writeMsg(levelLoggerImpl, strings.TrimSpace(msg))
}

View File

@ -21,6 +21,7 @@ import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
@ -40,6 +41,9 @@ type fileLogWriter struct {
MaxLines int `json:"maxlines"`
maxLinesCurLines int
MaxFiles int `json:"maxfiles"`
MaxFilesCurFiles int
// Rotate at size
MaxSize int `json:"maxsize"`
maxSizeCurSize int
@ -50,6 +54,12 @@ type fileLogWriter struct {
dailyOpenDate int
dailyOpenTime time.Time
// Rotate hourly
Hourly bool `json:"hourly"`
MaxHours int64 `json:"maxhours"`
hourlyOpenDate int
hourlyOpenTime time.Time
Rotate bool `json:"rotate"`
Level int `json:"level"`
@ -66,25 +76,30 @@ func newFileWriter() Logger {
w := &fileLogWriter{
Daily: true,
MaxDays: 7,
Hourly: false,
MaxHours: 168,
Rotate: true,
RotatePerm: "0440",
Level: LevelTrace,
Perm: "0660",
MaxLines: 10000000,
MaxFiles: 999,
MaxSize: 1 << 28,
}
return w
}
// Init file logger with json config.
// jsonConfig like:
// {
// "filename":"logs/beego.log",
// "maxLines":10000,
// "maxsize":1024,
// "daily":true,
// "maxDays":15,
// "rotate":true,
// "perm":"0600"
// }
// {
// "filename":"logs/beego.log",
// "maxLines":10000,
// "maxsize":1024,
// "daily":true,
// "maxDays":15,
// "rotate":true,
// "perm":"0600"
// }
func (w *fileLogWriter) Init(jsonConfig string) error {
err := json.Unmarshal([]byte(jsonConfig), w)
if err != nil {
@ -115,10 +130,16 @@ func (w *fileLogWriter) startLogger() error {
return w.initFd()
}
func (w *fileLogWriter) needRotate(size int, day int) bool {
func (w *fileLogWriter) needRotateDaily(size int, day int) bool {
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
(w.Daily && day != w.dailyOpenDate)
}
func (w *fileLogWriter) needRotateHourly(size int, hour int) bool {
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
(w.Hourly && hour != w.hourlyOpenDate)
}
@ -127,14 +148,23 @@ func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
if level > w.Level {
return nil
}
h, d := formatTimeHeader(when)
msg = string(h) + msg + "\n"
hd, d, h := formatTimeHeader(when)
msg = string(hd) + msg + "\n"
if w.Rotate {
w.RLock()
if w.needRotate(len(msg), d) {
if w.needRotateHourly(len(msg), h) {
w.RUnlock()
w.Lock()
if w.needRotate(len(msg), d) {
if w.needRotateHourly(len(msg), h) {
if err := w.doRotate(when); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
}
}
w.Unlock()
} else if w.needRotateDaily(len(msg), d) {
w.RUnlock()
w.Lock()
if w.needRotateDaily(len(msg), d) {
if err := w.doRotate(when); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
}
@ -161,6 +191,10 @@ func (w *fileLogWriter) createLogFile() (*os.File, error) {
if err != nil {
return nil, err
}
filepath := path.Dir(w.Filename)
os.MkdirAll(filepath, os.FileMode(perm))
fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm))
if err == nil {
// Make sure file perm is user set perm cause of `os.OpenFile` will obey umask
@ -178,8 +212,12 @@ func (w *fileLogWriter) initFd() error {
w.maxSizeCurSize = int(fInfo.Size())
w.dailyOpenTime = time.Now()
w.dailyOpenDate = w.dailyOpenTime.Day()
w.hourlyOpenTime = time.Now()
w.hourlyOpenDate = w.hourlyOpenTime.Hour()
w.maxLinesCurLines = 0
if w.Daily {
if w.Hourly {
go w.hourlyRotate(w.hourlyOpenTime)
} else if w.Daily {
go w.dailyRotate(w.dailyOpenTime)
}
if fInfo.Size() > 0 && w.MaxLines > 0 {
@ -198,7 +236,22 @@ func (w *fileLogWriter) dailyRotate(openTime time.Time) {
tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100))
<-tm.C
w.Lock()
if w.needRotate(0, time.Now().Day()) {
if w.needRotateDaily(0, time.Now().Day()) {
if err := w.doRotate(time.Now()); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
}
}
w.Unlock()
}
func (w *fileLogWriter) hourlyRotate(openTime time.Time) {
y, m, d := openTime.Add(1 * time.Hour).Date()
h, _, _ := openTime.Add(1 * time.Hour).Clock()
nextHour := time.Date(y, m, d, h, 0, 0, 0, openTime.Location())
tm := time.NewTimer(time.Duration(nextHour.UnixNano() - openTime.UnixNano() + 100))
<-tm.C
w.Lock()
if w.needRotateHourly(0, time.Now().Hour()) {
if err := w.doRotate(time.Now()); err != nil {
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
}
@ -238,8 +291,10 @@ func (w *fileLogWriter) lines() (int, error) {
func (w *fileLogWriter) doRotate(logTime time.Time) error {
// file exists
// Find the next available number
num := 1
num := w.MaxFilesCurFiles + 1
fName := ""
format := ""
var openTime time.Time
rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64)
if err != nil {
return err
@ -251,19 +306,26 @@ func (w *fileLogWriter) doRotate(logTime time.Time) error {
goto RESTART_LOGGER
}
if w.Hourly {
format = "2006010215"
openTime = w.hourlyOpenTime
} else if w.Daily {
format = "2006-01-02"
openTime = w.dailyOpenTime
}
// only when one of them be setted, then the file would be splited
if w.MaxLines > 0 || w.MaxSize > 0 {
for ; err == nil && num <= 999; num++ {
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format("2006-01-02"), num, w.suffix)
for ; err == nil && num <= w.MaxFiles; num++ {
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format(format), num, w.suffix)
_, err = os.Lstat(fName)
}
} else {
fName = fmt.Sprintf("%s.%s%s", w.fileNameOnly, w.dailyOpenTime.Format("2006-01-02"), w.suffix)
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", openTime.Format(format), num, w.suffix)
_, err = os.Lstat(fName)
for ; err == nil && num <= 999; num++ {
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", w.dailyOpenTime.Format("2006-01-02"), num, w.suffix)
_, err = os.Lstat(fName)
}
w.MaxFilesCurFiles = num
}
// return error if the last file checked still existed
if err == nil {
return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename)
@ -307,13 +369,21 @@ func (w *fileLogWriter) deleteOldLog() {
if info == nil {
return
}
if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) {
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
strings.HasSuffix(filepath.Base(path), w.suffix) {
os.Remove(path)
}
}
if w.Hourly {
if !info.IsDir() && info.ModTime().Add(1 * time.Hour * time.Duration(w.MaxHours)).Before(time.Now()) {
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
strings.HasSuffix(filepath.Base(path), w.suffix) {
os.Remove(path)
}
}
} else if w.Daily {
if !info.IsDir() && info.ModTime().Add(24 * time.Hour * time.Duration(w.MaxDays)).Before(time.Now()) {
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
strings.HasSuffix(filepath.Base(path), w.suffix) {
os.Remove(path)
}
}
}
return
})
}

View File

@ -112,7 +112,7 @@ func TestFile2(t *testing.T) {
os.Remove("test2.log")
}
func TestFileRotate_01(t *testing.T) {
func TestFileDailyRotate_01(t *testing.T) {
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`)
log.Debug("debug")
@ -133,28 +133,28 @@ func TestFileRotate_01(t *testing.T) {
os.Remove("test3.log")
}
func TestFileRotate_02(t *testing.T) {
func TestFileDailyRotate_02(t *testing.T) {
fn1 := "rotate_day.log"
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log"
testFileRotate(t, fn1, fn2)
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
testFileRotate(t, fn1, fn2, true, false)
}
func TestFileRotate_03(t *testing.T) {
func TestFileDailyRotate_03(t *testing.T) {
fn1 := "rotate_day.log"
fn := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log"
os.Create(fn)
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
testFileRotate(t, fn1, fn2)
testFileRotate(t, fn1, fn2, true, false)
os.Remove(fn)
}
func TestFileRotate_04(t *testing.T) {
func TestFileDailyRotate_04(t *testing.T) {
fn1 := "rotate_day.log"
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log"
fn2 := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".001.log"
testFileDailyRotate(t, fn1, fn2)
}
func TestFileRotate_05(t *testing.T) {
func TestFileDailyRotate_05(t *testing.T) {
fn1 := "rotate_day.log"
fn := "rotate_day." + time.Now().Add(-24*time.Hour).Format("2006-01-02") + ".log"
os.Create(fn)
@ -162,7 +162,7 @@ func TestFileRotate_05(t *testing.T) {
testFileDailyRotate(t, fn1, fn2)
os.Remove(fn)
}
func TestFileRotate_06(t *testing.T) { //test file mode
func TestFileDailyRotate_06(t *testing.T) { //test file mode
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test3.log","maxlines":4}`)
log.Debug("debug")
@ -183,23 +183,110 @@ func TestFileRotate_06(t *testing.T) { //test file mode
os.Remove(rotateName)
os.Remove("test3.log")
}
func testFileRotate(t *testing.T, fn1, fn2 string) {
func TestFileHourlyRotate_01(t *testing.T) {
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test3.log","hourly":true,"maxlines":4}`)
log.Debug("debug")
log.Info("info")
log.Notice("notice")
log.Warning("warning")
log.Error("error")
log.Alert("alert")
log.Critical("critical")
log.Emergency("emergency")
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006010215"), 1) + ".log"
b, err := exists(rotateName)
if !b || err != nil {
os.Remove("test3.log")
t.Fatal("rotate not generated")
}
os.Remove(rotateName)
os.Remove("test3.log")
}
func TestFileHourlyRotate_02(t *testing.T) {
fn1 := "rotate_hour.log"
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
testFileRotate(t, fn1, fn2, false, true)
}
func TestFileHourlyRotate_03(t *testing.T) {
fn1 := "rotate_hour.log"
fn := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".log"
os.Create(fn)
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
testFileRotate(t, fn1, fn2, false, true)
os.Remove(fn)
}
func TestFileHourlyRotate_04(t *testing.T) {
fn1 := "rotate_hour.log"
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
testFileHourlyRotate(t, fn1, fn2)
}
func TestFileHourlyRotate_05(t *testing.T) {
fn1 := "rotate_hour.log"
fn := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".log"
os.Create(fn)
fn2 := "rotate_hour." + time.Now().Add(-1*time.Hour).Format("2006010215") + ".001.log"
testFileHourlyRotate(t, fn1, fn2)
os.Remove(fn)
}
func TestFileHourlyRotate_06(t *testing.T) { //test file mode
log := NewLogger(10000)
log.SetLogger("file", `{"filename":"test3.log", "hourly":true, "maxlines":4}`)
log.Debug("debug")
log.Info("info")
log.Notice("notice")
log.Warning("warning")
log.Error("error")
log.Alert("alert")
log.Critical("critical")
log.Emergency("emergency")
rotateName := "test3" + fmt.Sprintf(".%s.%03d", time.Now().Format("2006010215"), 1) + ".log"
s, _ := os.Lstat(rotateName)
if s.Mode() != 0440 {
os.Remove(rotateName)
os.Remove("test3.log")
t.Fatal("rotate file mode error")
}
os.Remove(rotateName)
os.Remove("test3.log")
}
func testFileRotate(t *testing.T, fn1, fn2 string, daily, hourly bool) {
fw := &fileLogWriter{
Daily: true,
Daily: daily,
MaxDays: 7,
Hourly: hourly,
MaxHours: 168,
Rotate: true,
Level: LevelTrace,
Perm: "0660",
RotatePerm: "0440",
}
fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1))
fw.dailyOpenTime = time.Now().Add(-24 * time.Hour)
fw.dailyOpenDate = fw.dailyOpenTime.Day()
fw.WriteMsg(time.Now(), "this is a msg for test", LevelDebug)
if daily {
fw.Init(fmt.Sprintf(`{"filename":"%v","maxdays":1}`, fn1))
fw.dailyOpenTime = time.Now().Add(-24 * time.Hour)
fw.dailyOpenDate = fw.dailyOpenTime.Day()
}
if hourly {
fw.Init(fmt.Sprintf(`{"filename":"%v","maxhours":1}`, fn1))
fw.hourlyOpenTime = time.Now().Add(-1 * time.Hour)
fw.hourlyOpenDate = fw.hourlyOpenTime.Day()
}
fw.WriteMsg(time.Now(), "this is a msg for test", LevelDebug)
for _, file := range []string{fn1, fn2} {
_, err := os.Stat(file)
if err != nil {
t.Log(err)
t.FailNow()
}
os.Remove(file)
@ -239,6 +326,37 @@ func testFileDailyRotate(t *testing.T, fn1, fn2 string) {
fw.Destroy()
}
func testFileHourlyRotate(t *testing.T, fn1, fn2 string) {
fw := &fileLogWriter{
Hourly: true,
MaxHours: 168,
Rotate: true,
Level: LevelTrace,
Perm: "0660",
RotatePerm: "0440",
}
fw.Init(fmt.Sprintf(`{"filename":"%v","maxhours":1}`, fn1))
fw.hourlyOpenTime = time.Now().Add(-1 * time.Hour)
fw.hourlyOpenDate = fw.hourlyOpenTime.Hour()
hour, _ := time.ParseInLocation("2006010215", time.Now().Format("2006010215"), fw.hourlyOpenTime.Location())
hour = hour.Add(-1 * time.Second)
fw.hourlyRotate(hour)
for _, file := range []string{fn1, fn2} {
_, err := os.Stat(file)
if err != nil {
t.FailNow()
}
content, err := ioutil.ReadFile(file)
if err != nil {
t.FailNow()
}
if len(content) > 0 {
t.FailNow()
}
os.Remove(file)
}
fw.Destroy()
}
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {

View File

@ -47,7 +47,7 @@ import (
// RFC5424 log message levels.
const (
LevelEmergency = iota
LevelEmergency = iota
LevelAlert
LevelCritical
LevelError
@ -116,6 +116,7 @@ type BeeLogger struct {
enableFuncCallDepth bool
loggerFuncCallDepth int
asynchronous bool
prefix string
msgChanLen int64
msgChan chan *logMsg
signalChan chan string
@ -247,7 +248,7 @@ func (bl *BeeLogger) Write(p []byte) (n int, err error) {
}
// writeMsg will always add a '\n' character
if p[len(p)-1] == '\n' {
p = p[0 : len(p)-1]
p = p[0: len(p)-1]
}
// set levelLoggerImpl to ensure all log message will be write out
err = bl.writeMsg(levelLoggerImpl, string(p))
@ -267,6 +268,9 @@ func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error
if len(v) > 0 {
msg = fmt.Sprintf(msg, v...)
}
msg = bl.prefix + " " + msg
when := time.Now()
if bl.enableFuncCallDepth {
_, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth)
@ -305,6 +309,11 @@ func (bl *BeeLogger) SetLevel(l int) {
bl.level = l
}
// GetLevel Get Current log message level.
func (bl *BeeLogger) GetLevel() int {
return bl.level
}
// SetLogFuncCallDepth set log funcCallDepth
func (bl *BeeLogger) SetLogFuncCallDepth(d int) {
bl.loggerFuncCallDepth = d
@ -320,6 +329,11 @@ func (bl *BeeLogger) EnableFuncCallDepth(b bool) {
bl.enableFuncCallDepth = b
}
// set prefix
func (bl *BeeLogger) SetPrefix(s string) {
bl.prefix = s
}
// start logger chan reading.
// when chan is not empty, write logs.
func (bl *BeeLogger) startLogger() {
@ -544,6 +558,11 @@ func SetLevel(l int) {
beeLogger.SetLevel(l)
}
// SetPrefix sets the prefix
func SetPrefix(s string) {
beeLogger.SetPrefix(s)
}
// EnableFuncCallDepth enable log funcCallDepth
func EnableFuncCallDepth(b bool) {
beeLogger.enableFuncCallDepth = b

View File

@ -33,7 +33,7 @@ func newLogWriter(wr io.Writer) *logWriter {
func (lg *logWriter) println(when time.Time, msg string) {
lg.Lock()
h, _ := formatTimeHeader(when)
h, _, _:= formatTimeHeader(when)
lg.writer.Write(append(append(h, msg...), '\n'))
lg.Unlock()
}
@ -90,10 +90,10 @@ const (
ns1 = `0123456789`
)
func formatTimeHeader(when time.Time) ([]byte, int) {
func formatTimeHeader(when time.Time) ([]byte, int, int) {
y, mo, d := when.Date()
h, mi, s := when.Clock()
ns := when.Nanosecond()/1000000
ns := when.Nanosecond() / 1000000
//len("2006/01/02 15:04:05.123 ")==24
var buf [24]byte
@ -123,7 +123,7 @@ func formatTimeHeader(when time.Time) ([]byte, int) {
buf[23] = ' '
return buf[0:], d
return buf[0:], d, h
}
var (

View File

@ -30,8 +30,8 @@ func TestFormatHeader_0(t *testing.T) {
if tm.Year() >= 2100 {
break
}
h, _ := formatTimeHeader(tm)
if tm.Format("2006/01/02 15:04:05.999 ") != string(h) {
h, _, _ := formatTimeHeader(tm)
if tm.Format("2006/01/02 15:04:05.000 ") != string(h) {
t.Log(tm)
t.FailNow()
}
@ -48,8 +48,8 @@ func TestFormatHeader_1(t *testing.T) {
if tm.Year() >= year+1 {
break
}
h, _ := formatTimeHeader(tm)
if tm.Format("2006/01/02 15:04:05.999 ") != string(h) {
h, _, _ := formatTimeHeader(tm)
if tm.Format("2006/01/02 15:04:05.000 ") != string(h) {
t.Log(tm)
t.FailNow()
}

View File

@ -67,7 +67,10 @@ func (f *multiFileLogWriter) Init(config string) error {
jsonMap["level"] = i
bs, _ := json.Marshal(jsonMap)
writer = newFileWriter().(*fileLogWriter)
writer.Init(string(bs))
err := writer.Init(string(bs))
if err != nil {
return err
}
f.writers[i] = writer
}
}

View File

@ -322,7 +322,7 @@ func (m *Migration) GetSQL() (sql string) {
sql += fmt.Sprintf("\n DROP COLUMN `%s`", column.Name)
}
if len(m.Columns) > index {
if len(m.Columns) > index+1 {
sql += ","
}
}
@ -355,7 +355,7 @@ func (m *Migration) GetSQL() (sql string) {
} else {
sql += fmt.Sprintf("\n DROP COLUMN `%s`", column.Name)
}
if len(m.Columns) > index {
if len(m.Columns) > index+1 {
sql += ","
}
}
@ -366,14 +366,14 @@ func (m *Migration) GetSQL() (sql string) {
for index, unique := range m.Uniques {
sql += fmt.Sprintf("\n DROP KEY `%s`", unique.Definition)
if len(m.Uniques) > index {
if len(m.Uniques) > index+1 {
sql += ","
}
}
for index, column := range m.Renames {
sql += fmt.Sprintf("\n CHANGE COLUMN `%s` `%s` %s %s %s %s", column.NewName, column.OldName, column.OldDataType, column.OldUnsign, column.OldNull, column.OldDefault)
if len(m.Renames) > index {
if len(m.Renames) > index+1 {
sql += ","
}
}

View File

@ -197,6 +197,10 @@ func getDbCreateSQL(al *alias) (sqls []string, tableIndexes map[string][]dbIndex
if strings.Contains(column, "%COL%") {
column = strings.Replace(column, "%COL%", fi.column, -1)
}
if fi.description != "" {
column += " " + fmt.Sprintf("COMMENT '%s'",fi.description)
}
columns = append(columns, column)
}

View File

@ -536,6 +536,8 @@ func (d *dbBase) InsertOrUpdate(q dbQuerier, mi *modelInfo, ind reflect.Value, a
updates := make([]string, len(names))
var conflitValue interface{}
for i, v := range names {
// identifier in database may not be case-sensitive, so quote it
v = fmt.Sprintf("%s%s%s", Q, v, Q)
marks[i] = "?"
valueStr := argsMap[strings.ToLower(v)]
if v == args0 {
@ -760,7 +762,13 @@ func (d *dbBase) UpdateBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Con
}
d.ins.ReplaceMarks(&query)
res, err := q.Exec(query, values...)
var err error
var res sql.Result
if qs != nil && qs.forContext {
res, err = q.ExecContext(qs.ctx, query, values...)
} else {
res, err = q.Exec(query, values...)
}
if err == nil {
return res.RowsAffected()
}
@ -849,11 +857,16 @@ func (d *dbBase) DeleteBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Con
for i := range marks {
marks[i] = "?"
}
sql := fmt.Sprintf("IN (%s)", strings.Join(marks, ", "))
query = fmt.Sprintf("DELETE FROM %s%s%s WHERE %s%s%s %s", Q, mi.table, Q, Q, mi.fields.pk.column, Q, sql)
sqlIn := fmt.Sprintf("IN (%s)", strings.Join(marks, ", "))
query = fmt.Sprintf("DELETE FROM %s%s%s WHERE %s%s%s %s", Q, mi.table, Q, Q, mi.fields.pk.column, Q, sqlIn)
d.ins.ReplaceMarks(&query)
res, err := q.Exec(query, args...)
var res sql.Result
if qs != nil && qs.forContext {
res, err = q.ExecContext(qs.ctx, query, args...)
} else {
res, err = q.Exec(query, args...)
}
if err == nil {
num, err := res.RowsAffected()
if err != nil {
@ -926,7 +939,7 @@ func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condi
maps[fi.column] = true
}
} else {
panic(fmt.Errorf("wrong field/column name `%s`", col))
return 0, fmt.Errorf("wrong field/column name `%s`", col)
}
}
if hasRel {
@ -969,14 +982,25 @@ func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condi
}
query := fmt.Sprintf("%s %s FROM %s%s%s T0 %s%s%s%s%s", sqlSelect, sels, Q, mi.table, Q, join, where, groupBy, orderBy, limit)
if qs.forupdate {
query += " FOR UPDATE"
}
d.ins.ReplaceMarks(&query)
var rs *sql.Rows
r, err := q.Query(query, args...)
if err != nil {
return 0, err
var err error
if qs != nil && qs.forContext {
rs, err = q.QueryContext(qs.ctx, query, args...)
if err != nil {
return 0, err
}
} else {
rs, err = q.Query(query, args...)
if err != nil {
return 0, err
}
}
rs = r
refs := make([]interface{}, colsNum)
for i := range refs {
@ -1105,8 +1129,12 @@ func (d *dbBase) Count(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition
d.ins.ReplaceMarks(&query)
row := q.QueryRow(query, args...)
var row *sql.Row
if qs != nil && qs.forContext {
row = q.QueryRowContext(qs.ctx, query, args...)
} else {
row = q.QueryRow(query, args...)
}
err = row.Scan(&cnt)
return
}

View File

@ -372,7 +372,13 @@ func (t *dbTables) getCondSQL(cond *Condition, sub bool, tz *time.Location) (whe
operator = "exact"
}
operSQL, args := t.base.GenerateOperatorSQL(mi, fi, operator, p.args, tz)
var operSQL string
var args []interface{}
if p.isRaw {
operSQL = p.sql
} else {
operSQL, args = t.base.GenerateOperatorSQL(mi, fi, operator, p.args, tz)
}
leftCol := fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q)
t.base.GenerateOperatorLeftCol(fi, operator, &leftCol)

View File

@ -86,7 +86,7 @@ func (e *BooleanField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Bool()
if err != nil {
if err == nil {
e.Set(v)
}
return err
@ -191,7 +191,7 @@ func (e *TimeField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := timeParse(d, formatTime)
if err != nil {
if err == nil {
e.Set(v)
}
return err
@ -250,7 +250,7 @@ func (e *DateField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := timeParse(d, formatDate)
if err != nil {
if err == nil {
e.Set(v)
}
return err
@ -300,7 +300,7 @@ func (e *DateTimeField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := timeParse(d, formatDateTime)
if err != nil {
if err == nil {
e.Set(v)
}
return err
@ -350,9 +350,10 @@ func (e *FloatField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Float64()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<FloatField.SetRaw> unknown value `%s`", value)
}
@ -397,9 +398,10 @@ func (e *SmallIntegerField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Int16()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<SmallIntegerField.SetRaw> unknown value `%s`", value)
}
@ -444,9 +446,10 @@ func (e *IntegerField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Int32()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<IntegerField.SetRaw> unknown value `%s`", value)
}
@ -491,9 +494,10 @@ func (e *BigIntegerField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Int64()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<BigIntegerField.SetRaw> unknown value `%s`", value)
}
@ -538,9 +542,10 @@ func (e *PositiveSmallIntegerField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Uint16()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<PositiveSmallIntegerField.SetRaw> unknown value `%s`", value)
}
@ -585,9 +590,10 @@ func (e *PositiveIntegerField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Uint32()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<PositiveIntegerField.SetRaw> unknown value `%s`", value)
}
@ -632,9 +638,10 @@ func (e *PositiveBigIntegerField) SetRaw(value interface{}) error {
e.Set(d)
case string:
v, err := StrTo(d).Uint64()
if err != nil {
if err == nil {
e.Set(v)
}
return err
default:
return fmt.Errorf("<PositiveBigIntegerField.SetRaw> unknown value `%s`", value)
}

View File

@ -136,6 +136,7 @@ type fieldInfo struct {
decimals int
isFielder bool // implement Fielder interface
onDelete string
description string
}
// new field info
@ -300,6 +301,7 @@ checkType:
fi.sf = sf
fi.fullName = mi.fullName + mName + "." + sf.Name
fi.description = sf.Tag.Get("description")
fi.null = attrs["null"]
fi.index = attrs["index"]
fi.auto = attrs["auto"]

View File

@ -75,7 +75,8 @@ func addModelFields(mi *modelInfo, ind reflect.Value, mName string, index []int)
break
}
//record current field index
fi.fieldIndex = append(index, i)
fi.fieldIndex = append(fi.fieldIndex, index...)
fi.fieldIndex = append(fi.fieldIndex, i)
fi.mi = mi
fi.inModel = true
if !mi.fields.Add(fi) {

View File

@ -433,53 +433,57 @@ var (
dDbBaser dbBaser
)
var (
helpinfo = `need driver and source!
Default DB Drivers.
driver: url
mysql: https://github.com/go-sql-driver/mysql
sqlite3: https://github.com/mattn/go-sqlite3
postgres: https://github.com/lib/pq
tidb: https://github.com/pingcap/tidb
usage:
go get -u github.com/astaxie/beego/orm
go get -u github.com/go-sql-driver/mysql
go get -u github.com/mattn/go-sqlite3
go get -u github.com/lib/pq
go get -u github.com/pingcap/tidb
#### MySQL
mysql -u root -e 'create database orm_test;'
export ORM_DRIVER=mysql
export ORM_SOURCE="root:@/orm_test?charset=utf8"
go test -v github.com/astaxie/beego/orm
#### Sqlite3
export ORM_DRIVER=sqlite3
export ORM_SOURCE='file:memory_test?mode=memory'
go test -v github.com/astaxie/beego/orm
#### PostgreSQL
psql -c 'create database orm_test;' -U postgres
export ORM_DRIVER=postgres
export ORM_SOURCE="user=postgres dbname=orm_test sslmode=disable"
go test -v github.com/astaxie/beego/orm
#### TiDB
export ORM_DRIVER=tidb
export ORM_SOURCE='memory://test/test'
go test -v github.com/astaxie/beego/orm
`
)
func init() {
Debug, _ = StrTo(DBARGS.Debug).Bool()
if DBARGS.Driver == "" || DBARGS.Source == "" {
fmt.Println(`need driver and source!
Default DB Drivers.
driver: url
mysql: https://github.com/go-sql-driver/mysql
sqlite3: https://github.com/mattn/go-sqlite3
postgres: https://github.com/lib/pq
tidb: https://github.com/pingcap/tidb
usage:
go get -u github.com/astaxie/beego/orm
go get -u github.com/go-sql-driver/mysql
go get -u github.com/mattn/go-sqlite3
go get -u github.com/lib/pq
go get -u github.com/pingcap/tidb
#### MySQL
mysql -u root -e 'create database orm_test;'
export ORM_DRIVER=mysql
export ORM_SOURCE="root:@/orm_test?charset=utf8"
go test -v github.com/astaxie/beego/orm
#### Sqlite3
export ORM_DRIVER=sqlite3
export ORM_SOURCE='file:memory_test?mode=memory'
go test -v github.com/astaxie/beego/orm
#### PostgreSQL
psql -c 'create database orm_test;' -U postgres
export ORM_DRIVER=postgres
export ORM_SOURCE="user=postgres dbname=orm_test sslmode=disable"
go test -v github.com/astaxie/beego/orm
#### TiDB
export ORM_DRIVER=tidb
export ORM_SOURCE='memory://test/test'
go test -v github.com/astaxie/beego/orm
`)
fmt.Println(helpinfo)
os.Exit(2)
}

View File

@ -109,7 +109,7 @@ func getTableUnique(val reflect.Value) [][]string {
func getColumnName(ft int, addrField reflect.Value, sf reflect.StructField, col string) string {
column := col
if col == "" {
column = snakeString(sf.Name)
column = nameStrategyMap[nameStrategy](sf.Name)
}
switch ft {
case RelForeignKey, RelOneToOne:

View File

@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
// Package orm provide ORM for MySQL/PostgreSQL/sqlite
// Simple Usage
//
@ -52,6 +54,7 @@
package orm
import (
"context"
"database/sql"
"errors"
"fmt"
@ -422,7 +425,7 @@ func (o *orm) getRelQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet {
func (o *orm) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) {
var name string
if table, ok := ptrStructOrTableName.(string); ok {
name = snakeString(table)
name = nameStrategyMap[defaultNameStrategy](table)
if mi, ok := modelCache.get(name); ok {
qs = newQuerySet(o, mi)
}
@ -458,11 +461,15 @@ func (o *orm) Using(name string) error {
// begin transaction
func (o *orm) Begin() error {
return o.BeginTx(context.Background(), nil)
}
func (o *orm) BeginTx(ctx context.Context, opts *sql.TxOptions) error {
if o.isTx {
return ErrTxHasBegan
}
var tx *sql.Tx
tx, err := o.db.(txer).Begin()
tx, err := o.db.(txer).BeginTx(ctx, opts)
if err != nil {
return err
}
@ -541,6 +548,9 @@ func NewOrmWithDB(driverName, aliasName string, db *sql.DB) (Ormer, error) {
al.Name = aliasName
al.DriverName = driverName
al.DB = db
detectTZ(al)
o := new(orm)
o.alias = al

View File

@ -31,6 +31,8 @@ type condValue struct {
isOr bool
isNot bool
isCond bool
isRaw bool
sql string
}
// Condition struct.
@ -45,6 +47,15 @@ func NewCondition() *Condition {
return c
}
// Raw add raw sql to condition
func (c Condition) Raw(expr string, sql string) *Condition {
if len(sql) == 0 {
panic(fmt.Errorf("<Condition.Raw> sql cannot empty"))
}
c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), sql: sql, isRaw: true})
return &c
}
// And add expression to condition
func (c Condition) And(expr string, args ...interface{}) *Condition {
if expr == "" || len(args) == 0 {

View File

@ -15,6 +15,7 @@
package orm
import (
"context"
"database/sql"
"fmt"
"io"
@ -122,6 +123,13 @@ func (d *dbQueryLog) Prepare(query string) (*sql.Stmt, error) {
return stmt, err
}
func (d *dbQueryLog) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
a := time.Now()
stmt, err := d.db.PrepareContext(ctx, query)
debugLogQueies(d.alias, "db.Prepare", query, a, err)
return stmt, err
}
func (d *dbQueryLog) Exec(query string, args ...interface{}) (sql.Result, error) {
a := time.Now()
res, err := d.db.Exec(query, args...)
@ -129,6 +137,13 @@ func (d *dbQueryLog) Exec(query string, args ...interface{}) (sql.Result, error)
return res, err
}
func (d *dbQueryLog) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
a := time.Now()
res, err := d.db.ExecContext(ctx, query, args...)
debugLogQueies(d.alias, "db.Exec", query, a, err, args...)
return res, err
}
func (d *dbQueryLog) Query(query string, args ...interface{}) (*sql.Rows, error) {
a := time.Now()
res, err := d.db.Query(query, args...)
@ -136,6 +151,13 @@ func (d *dbQueryLog) Query(query string, args ...interface{}) (*sql.Rows, error)
return res, err
}
func (d *dbQueryLog) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
a := time.Now()
res, err := d.db.QueryContext(ctx, query, args...)
debugLogQueies(d.alias, "db.Query", query, a, err, args...)
return res, err
}
func (d *dbQueryLog) QueryRow(query string, args ...interface{}) *sql.Row {
a := time.Now()
res := d.db.QueryRow(query, args...)
@ -143,6 +165,13 @@ func (d *dbQueryLog) QueryRow(query string, args ...interface{}) *sql.Row {
return res
}
func (d *dbQueryLog) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
a := time.Now()
res := d.db.QueryRowContext(ctx, query, args...)
debugLogQueies(d.alias, "db.QueryRow", query, a, nil, args...)
return res
}
func (d *dbQueryLog) Begin() (*sql.Tx, error) {
a := time.Now()
tx, err := d.db.(txer).Begin()
@ -150,6 +179,13 @@ func (d *dbQueryLog) Begin() (*sql.Tx, error) {
return tx, err
}
func (d *dbQueryLog) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
a := time.Now()
tx, err := d.db.(txer).BeginTx(ctx, opts)
debugLogQueies(d.alias, "db.BeginTx", "START TRANSACTION", a, err)
return tx, err
}
func (d *dbQueryLog) Commit() error {
a := time.Now()
err := d.db.(txEnder).Commit()

View File

@ -15,6 +15,7 @@
package orm
import (
"context"
"fmt"
)
@ -55,16 +56,19 @@ func ColValue(opt operator, value interface{}) interface{} {
// real query struct
type querySet struct {
mi *modelInfo
cond *Condition
related []string
relDepth int
limit int64
offset int64
groups []string
orders []string
distinct bool
orm *orm
mi *modelInfo
cond *Condition
related []string
relDepth int
limit int64
offset int64
groups []string
orders []string
distinct bool
forupdate bool
orm *orm
ctx context.Context
forContext bool
}
var _ QuerySeter = new(querySet)
@ -78,6 +82,15 @@ func (o querySet) Filter(expr string, args ...interface{}) QuerySeter {
return &o
}
// add raw sql to querySeter.
func (o querySet) FilterRaw(expr string, sql string) QuerySeter {
if o.cond == nil {
o.cond = NewCondition()
}
o.cond = o.cond.Raw(expr, sql)
return &o
}
// add NOT condition to querySeter.
func (o querySet) Exclude(expr string, args ...interface{}) QuerySeter {
if o.cond == nil {
@ -127,6 +140,12 @@ func (o querySet) Distinct() QuerySeter {
return &o
}
// add FOR UPDATE to SELECT
func (o querySet) ForUpdate() QuerySeter {
o.forupdate = true
return &o
}
// set relation model to query together.
// it will query relation models and assign to parent model.
func (o querySet) RelatedSel(params ...interface{}) QuerySeter {
@ -259,6 +278,13 @@ func (o *querySet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string)
panic(ErrNotImplement)
}
// set context to QuerySeter.
func (o querySet) WithContext(ctx context.Context) QuerySeter {
o.ctx = ctx
o.forContext = true
return &o
}
// create new QuerySeter.
func newQuerySet(orm *orm, mi *modelInfo) QuerySeter {
o := new(querySet)

View File

@ -358,7 +358,7 @@ func (o *rawSet) QueryRow(containers ...interface{}) error {
_, tags := parseStructTag(fe.Tag.Get(defaultStructTagName))
var col string
if col = tags["column"]; col == "" {
col = snakeString(fe.Name)
col = nameStrategyMap[nameStrategy](fe.Name)
}
if v, ok := columnsMp[col]; ok {
value := reflect.ValueOf(v).Elem().Interface()
@ -509,7 +509,7 @@ func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) {
_, tags := parseStructTag(fe.Tag.Get(defaultStructTagName))
var col string
if col = tags["column"]; col == "" {
col = snakeString(fe.Name)
col = nameStrategyMap[nameStrategy](fe.Name)
}
if v, ok := columnsMp[col]; ok {
value := reflect.ValueOf(v).Elem().Interface()

View File

@ -12,10 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package orm
import (
"bytes"
"context"
"database/sql"
"fmt"
"io/ioutil"
@ -452,9 +455,9 @@ func TestNullDataTypes(t *testing.T) {
throwFail(t, AssertIs(*d.Float32Ptr, float32Ptr))
throwFail(t, AssertIs(*d.Float64Ptr, float64Ptr))
throwFail(t, AssertIs(*d.DecimalPtr, decimalPtr))
throwFail(t, AssertIs((*d.TimePtr).Format(testTime), timePtr.Format(testTime)))
throwFail(t, AssertIs((*d.DatePtr).Format(testDate), datePtr.Format(testDate)))
throwFail(t, AssertIs((*d.DateTimePtr).Format(testDateTime), dateTimePtr.Format(testDateTime)))
throwFail(t, AssertIs((*d.TimePtr).UTC().Format(testTime), timePtr.UTC().Format(testTime)))
throwFail(t, AssertIs((*d.DatePtr).UTC().Format(testDate), datePtr.UTC().Format(testDate)))
throwFail(t, AssertIs((*d.DateTimePtr).UTC().Format(testDateTime), dateTimePtr.UTC().Format(testDateTime)))
}
func TestDataCustomTypes(t *testing.T) {
@ -896,6 +899,18 @@ func TestOperators(t *testing.T) {
num, err = qs.Filter("id__between", []int{2, 3}).Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 2))
num, err = qs.FilterRaw("user_name", "= 'slene'").Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 1))
num, err = qs.FilterRaw("status", "IN (1, 2)").Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 2))
num, err = qs.FilterRaw("profile_id", "IN (SELECT id FROM user_profile WHERE age=30)").Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 1))
}
func TestSetCond(t *testing.T) {
@ -921,6 +936,11 @@ func TestSetCond(t *testing.T) {
num, err = qs.SetCond(cond4).Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 3))
cond5 := cond.Raw("user_name", "= 'slene'").OrNotCond(cond.And("user_name", "slene"))
num, err = qs.SetCond(cond5).Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 3))
}
func TestLimit(t *testing.T) {
@ -1990,6 +2010,66 @@ func TestTransaction(t *testing.T) {
}
func TestTransactionIsolationLevel(t *testing.T) {
// this test worked when database support transaction isolation level
if IsSqlite {
return
}
o1 := NewOrm()
o2 := NewOrm()
// start two transaction with isolation level repeatable read
err := o1.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
throwFail(t, err)
err = o2.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
throwFail(t, err)
// o1 insert tag
var tag Tag
tag.Name = "test-transaction"
id, err := o1.Insert(&tag)
throwFail(t, err)
throwFail(t, AssertIs(id > 0, true))
// o2 query tag table, no result
num, err := o2.QueryTable("tag").Filter("name", "test-transaction").Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 0))
// o1 commit
o1.Commit()
// o2 query tag table, still no result
num, err = o2.QueryTable("tag").Filter("name", "test-transaction").Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 0))
// o2 commit and query tag table, get the result
o2.Commit()
num, err = o2.QueryTable("tag").Filter("name", "test-transaction").Count()
throwFail(t, err)
throwFail(t, AssertIs(num, 1))
num, err = o1.QueryTable("tag").Filter("name", "test-transaction").Delete()
throwFail(t, err)
throwFail(t, AssertIs(num, 1))
}
func TestBeginTxWithContextCanceled(t *testing.T) {
o := NewOrm()
ctx, cancel := context.WithCancel(context.Background())
o.BeginTx(ctx, nil)
id, err := o.Insert(&Tag{Name: "test-context"})
throwFail(t, err)
throwFail(t, AssertIs(id > 0, true))
// cancel the context before commit to make it error
cancel()
err = o.Commit()
throwFail(t, AssertIs(err, context.Canceled))
}
func TestReadOrCreate(t *testing.T) {
u := &User{
UserName: "Kyle",
@ -2260,6 +2340,7 @@ func TestIgnoreCaseTag(t *testing.T) {
throwFail(t, AssertIs(info.fields.GetByName("Name02").column, "Name"))
throwFail(t, AssertIs(info.fields.GetByName("Name03").column, "name"))
}
func TestInsertOrUpdate(t *testing.T) {
RegisterModel(new(User))
user := User{UserName: "unique_username133", Status: 1, Password: "o"}
@ -2297,6 +2378,11 @@ func TestInsertOrUpdate(t *testing.T) {
throwFailNow(t, AssertIs(user2.Status, test.Status))
throwFailNow(t, AssertIs(user2.Password, strings.TrimSpace(test.Password)))
}
//postgres ON CONFLICT DO UPDATE SET can`t use colu=colu+values
if IsPostgres {
return
}
//test3 +
_, err = dORM.InsertOrUpdate(&user2, "user_name", "status=status+1")
if err != nil {

View File

@ -15,6 +15,7 @@
package orm
import (
"context"
"database/sql"
"reflect"
"time"
@ -106,6 +107,17 @@ type Ormer interface {
// ...
// err = o.Rollback()
Begin() error
// begin transaction with provided context and option
// the provided context is used until the transaction is committed or rolled back.
// if the context is canceled, the transaction will be rolled back.
// the provided TxOptions is optional and may be nil if defaults should be used.
// if a non-default isolation level is used that the driver doesn't support, an error will be returned.
// for example:
// o := NewOrm()
// err := o.BeginTx(context.Background(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
// ...
// err = o.Rollback()
BeginTx(ctx context.Context, opts *sql.TxOptions) error
// commit transaction
Commit() error
// rollback transaction
@ -135,6 +147,11 @@ type QuerySeter interface {
// // time compare
// qs.Filter("created", time.Now())
Filter(string, ...interface{}) QuerySeter
// add raw sql to querySeter.
// for example:
// qs.FilterRaw("user_id IN (SELECT id FROM profile WHERE age>=18)")
// //sql-> WHERE user_id IN (SELECT id FROM profile WHERE age>=18)
FilterRaw(string, string) QuerySeter
// add NOT condition to querySeter.
// have the same usage as Filter
Exclude(string, ...interface{}) QuerySeter
@ -190,6 +207,10 @@ type QuerySeter interface {
// Distinct().
// All(&permissions)
Distinct() QuerySeter
// set FOR UPDATE to query.
// for example:
// o.QueryTable("user").Filter("uid", uid).ForUpdate().All(&users)
ForUpdate() QuerySeter
// return QuerySeter execution result number
// for example:
// num, err = qs.Filter("profile__age__gt", 28).Count()
@ -374,16 +395,23 @@ type RawSeter interface {
type stmtQuerier interface {
Close() error
Exec(args ...interface{}) (sql.Result, error)
//ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error)
Query(args ...interface{}) (*sql.Rows, error)
//QueryContext(args ...interface{}) (*sql.Rows, error)
QueryRow(args ...interface{}) *sql.Row
//QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row
}
// db querier
type dbQuerier interface {
Prepare(query string) (*sql.Stmt, error)
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
Exec(query string, args ...interface{}) (sql.Result, error)
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
Query(query string, args ...interface{}) (*sql.Rows, error)
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
QueryRow(query string, args ...interface{}) *sql.Row
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
}
// type DB interface {
@ -397,6 +425,7 @@ type dbQuerier interface {
// transaction beginner
type txer interface {
Begin() (*sql.Tx, error)
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
}
// transaction ending

View File

@ -23,6 +23,18 @@ import (
"time"
)
type fn func(string) string
var (
nameStrategyMap = map[string]fn{
defaultNameStrategy: snakeString,
SnakeAcronymNameStrategy: snakeStringWithAcronym,
}
defaultNameStrategy = "snakeString"
SnakeAcronymNameStrategy = "snakeStringWithAcronym"
nameStrategy = defaultNameStrategy
)
// StrTo is the target string
type StrTo string
@ -198,7 +210,28 @@ func ToInt64(value interface{}) (d int64) {
return
}
// snake string, XxYy to xx_yy , XxYY to xx_yy
func snakeStringWithAcronym(s string) string {
data := make([]byte, 0, len(s)*2)
num := len(s)
for i := 0; i < num; i++ {
d := s[i]
before := false
after := false
if i > 0 {
before = s[i-1] >= 'a' && s[i-1] <= 'z'
}
if i+1 < num {
after = s[i+1] >= 'a' && s[i+1] <= 'z'
}
if i > 0 && d >= 'A' && d <= 'Z' && (before || after) {
data = append(data, '_')
}
data = append(data, d)
}
return strings.ToLower(string(data[:]))
}
// snake string, XxYy to xx_yy , XxYY to xx_y_y
func snakeString(s string) string {
data := make([]byte, 0, len(s)*2)
j := false
@ -216,6 +249,14 @@ func snakeString(s string) string {
return strings.ToLower(string(data[:]))
}
// SetNameStrategy set different name strategy
func SetNameStrategy(s string) {
if SnakeAcronymNameStrategy != s {
nameStrategy = defaultNameStrategy
}
nameStrategy = s
}
// camel string, xx_yy to XxYy
func camelString(s string) string {
data := make([]byte, 0, len(s))

View File

@ -34,3 +34,37 @@ func TestCamelString(t *testing.T) {
}
}
}
func TestSnakeString(t *testing.T) {
camel := []string{"PicUrl", "HelloWorld", "HelloWorld", "HelLOWord", "PicUrl1", "XyXX"}
snake := []string{"pic_url", "hello_world", "hello_world", "hel_l_o_word", "pic_url1", "xy_x_x"}
answer := make(map[string]string)
for i, v := range camel {
answer[v] = snake[i]
}
for _, v := range camel {
res := snakeString(v)
if res != answer[v] {
t.Error("Unit Test Fail:", v, res, answer[v])
}
}
}
func TestSnakeStringWithAcronym(t *testing.T) {
camel := []string{"ID", "PicURL", "HelloWorld", "HelloWorld", "HelLOWord", "PicUrl1", "XyXX"}
snake := []string{"id", "pic_url", "hello_world", "hello_world", "hel_lo_word", "pic_url1", "xy_xx"}
answer := make(map[string]string)
for i, v := range camel {
answer[v] = snake[i]
}
for _, v := range camel {
res := snakeStringWithAcronym(v)
if res != answer[v] {
t.Error("Unit Test Fail:", v, res, answer[v])
}
}
}

275
parser.go
View File

@ -39,7 +39,7 @@ var globalRouterTemplate = `package routers
import (
"github.com/astaxie/beego"
"github.com/astaxie/beego/context/param"
"github.com/astaxie/beego/context/param"{{.globalimport}}
)
func init() {
@ -52,6 +52,22 @@ var (
commentFilename string
pkgLastupdate map[string]int64
genInfoList map[string][]ControllerComments
routerHooks = map[string]int{
"beego.BeforeStatic": BeforeStatic,
"beego.BeforeRouter": BeforeRouter,
"beego.BeforeExec": BeforeExec,
"beego.AfterExec": AfterExec,
"beego.FinishRouter": FinishRouter,
}
routerHooksMapping = map[int]string{
BeforeStatic: "beego.BeforeStatic",
BeforeRouter: "beego.BeforeRouter",
BeforeExec: "beego.BeforeExec",
AfterExec: "beego.AfterExec",
FinishRouter: "beego.FinishRouter",
}
)
const commentPrefix = "commentsRouter_"
@ -102,6 +118,20 @@ type parsedComment struct {
routerPath string
methods []string
params map[string]parsedParam
filters []parsedFilter
imports []parsedImport
}
type parsedImport struct {
importPath string
importAlias string
}
type parsedFilter struct {
pattern string
pos int
filter string
params []bool
}
type parsedParam struct {
@ -114,24 +144,69 @@ type parsedParam struct {
func parserComments(f *ast.FuncDecl, controllerName, pkgpath string) error {
if f.Doc != nil {
parsedComment, err := parseComment(f.Doc.List)
parsedComments, err := parseComment(f.Doc.List)
if err != nil {
return err
}
if parsedComment.routerPath != "" {
key := pkgpath + ":" + controllerName
cc := ControllerComments{}
cc.Method = f.Name.String()
cc.Router = parsedComment.routerPath
cc.AllowHTTPMethods = parsedComment.methods
cc.MethodParams = buildMethodParams(f.Type.Params.List, parsedComment)
genInfoList[key] = append(genInfoList[key], cc)
for _, parsedComment := range parsedComments {
if parsedComment.routerPath != "" {
key := pkgpath + ":" + controllerName
cc := ControllerComments{}
cc.Method = f.Name.String()
cc.Router = parsedComment.routerPath
cc.AllowHTTPMethods = parsedComment.methods
cc.MethodParams = buildMethodParams(f.Type.Params.List, parsedComment)
cc.FilterComments = buildFilters(parsedComment.filters)
cc.ImportComments = buildImports(parsedComment.imports)
genInfoList[key] = append(genInfoList[key], cc)
}
}
}
return nil
}
func buildImports(pis []parsedImport) []*ControllerImportComments {
var importComments []*ControllerImportComments
for _, pi := range pis {
importComments = append(importComments, &ControllerImportComments{
ImportPath: pi.importPath,
ImportAlias: pi.importAlias,
})
}
return importComments
}
func buildFilters(pfs []parsedFilter) []*ControllerFilterComments {
var filterComments []*ControllerFilterComments
for _, pf := range pfs {
var (
returnOnOutput bool
resetParams bool
)
if len(pf.params) >= 1 {
returnOnOutput = pf.params[0]
}
if len(pf.params) >= 2 {
resetParams = pf.params[1]
}
filterComments = append(filterComments, &ControllerFilterComments{
Filter: pf.filter,
Pattern: pf.pattern,
Pos: pf.pos,
ReturnOnOutput: returnOnOutput,
ResetParams: resetParams,
})
}
return filterComments
}
func buildMethodParams(funcParams []*ast.Field, pc *parsedComment) []*param.MethodParam {
result := make([]*param.MethodParam, 0, len(funcParams))
for _, fparam := range funcParams {
@ -177,26 +252,15 @@ func paramInPath(name, route string) bool {
var routeRegex = regexp.MustCompile(`@router\s+(\S+)(?:\s+\[(\S+)\])?`)
func parseComment(lines []*ast.Comment) (pc *parsedComment, err error) {
pc = &parsedComment{}
func parseComment(lines []*ast.Comment) (pcs []*parsedComment, err error) {
pcs = []*parsedComment{}
params := map[string]parsedParam{}
filters := []parsedFilter{}
imports := []parsedImport{}
for _, c := range lines {
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
if strings.HasPrefix(t, "@router") {
matches := routeRegex.FindStringSubmatch(t)
if len(matches) == 3 {
pc.routerPath = matches[1]
methods := matches[2]
if methods == "" {
pc.methods = []string{"get"}
//pc.hasGet = true
} else {
pc.methods = strings.Split(methods, ",")
//pc.hasGet = strings.Contains(methods, "get")
}
} else {
return nil, errors.New("Router information is missing")
}
} else if strings.HasPrefix(t, "@Param") {
if strings.HasPrefix(t, "@Param") {
pv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Param")))
if len(pv) < 4 {
logs.Error("Invalid @Param format. Needs at least 4 parameters")
@ -217,10 +281,92 @@ func parseComment(lines []*ast.Comment) (pc *parsedComment, err error) {
p.defValue = pv[3]
p.required, _ = strconv.ParseBool(pv[4])
}
if pc.params == nil {
pc.params = map[string]parsedParam{}
params[funcParamName] = p
}
}
for _, c := range lines {
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
if strings.HasPrefix(t, "@Import") {
iv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Import")))
if len(iv) == 0 || len(iv) > 2 {
logs.Error("Invalid @Import format. Only accepts 1 or 2 parameters")
continue
}
p := parsedImport{}
p.importPath = iv[0]
if len(iv) == 2 {
p.importAlias = iv[1]
}
imports = append(imports, p)
}
}
filterLoop:
for _, c := range lines {
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
if strings.HasPrefix(t, "@Filter") {
fv := getparams(strings.TrimSpace(strings.TrimLeft(t, "@Filter")))
if len(fv) < 3 {
logs.Error("Invalid @Filter format. Needs at least 3 parameters")
continue filterLoop
}
p := parsedFilter{}
p.pattern = fv[0]
posName := fv[1]
if pos, exists := routerHooks[posName]; exists {
p.pos = pos
} else {
logs.Error("Invalid @Filter pos: ", posName)
continue filterLoop
}
p.filter = fv[2]
fvParams := fv[3:]
for _, fvParam := range fvParams {
switch fvParam {
case "true":
p.params = append(p.params, true)
case "false":
p.params = append(p.params, false)
default:
logs.Error("Invalid @Filter param: ", fvParam)
continue filterLoop
}
}
filters = append(filters, p)
}
}
for _, c := range lines {
var pc = &parsedComment{}
pc.params = params
pc.filters = filters
pc.imports = imports
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
if strings.HasPrefix(t, "@router") {
t := strings.TrimSpace(strings.TrimLeft(c.Text, "//"))
matches := routeRegex.FindStringSubmatch(t)
if len(matches) == 3 {
pc.routerPath = matches[1]
methods := matches[2]
if methods == "" {
pc.methods = []string{"get"}
//pc.hasGet = true
} else {
pc.methods = strings.Split(methods, ",")
//pc.hasGet = strings.Contains(methods, "get")
}
pcs = append(pcs, pc)
} else {
return nil, errors.New("Router information is missing")
}
pc.params[funcParamName] = p
}
}
return
@ -266,8 +412,9 @@ func genRouterCode(pkgRealpath string) {
os.Mkdir(getRouterDir(pkgRealpath), 0755)
logs.Info("generate router from comments")
var (
globalinfo string
sortKey []string
globalinfo string
globalimport string
sortKey []string
)
for k := range genInfoList {
sortKey = append(sortKey, k)
@ -285,6 +432,7 @@ func genRouterCode(pkgRealpath string) {
}
allmethod = strings.TrimRight(allmethod, ",") + "}"
}
params := "nil"
if len(c.Params) > 0 {
params = "[]map[string]string{"
@ -295,6 +443,7 @@ func genRouterCode(pkgRealpath string) {
}
params = strings.TrimRight(params, ",") + "}"
}
methodParams := "param.Make("
if len(c.MethodParams) > 0 {
lines := make([]string, 0, len(c.MethodParams))
@ -306,24 +455,66 @@ func genRouterCode(pkgRealpath string) {
",\n "
}
methodParams += ")"
imports := ""
if len(c.ImportComments) > 0 {
for _, i := range c.ImportComments {
if i.ImportAlias != "" {
imports += fmt.Sprintf(`
%s "%s"`, i.ImportAlias, i.ImportPath)
} else {
imports += fmt.Sprintf(`
"%s"`, i.ImportPath)
}
}
}
filters := ""
if len(c.FilterComments) > 0 {
for _, f := range c.FilterComments {
filters += fmt.Sprintf(` &beego.ControllerFilter{
Pattern: "%s",
Pos: %s,
Filter: %s,
ReturnOnOutput: %v,
ResetParams: %v,
},`, f.Pattern, routerHooksMapping[f.Pos], f.Filter, f.ReturnOnOutput, f.ResetParams)
}
}
if filters == "" {
filters = "nil"
} else {
filters = fmt.Sprintf(`[]*beego.ControllerFilter{
%s
}`, filters)
}
globalimport = imports
globalinfo = globalinfo + `
beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"],
beego.ControllerComments{
Method: "` + strings.TrimSpace(c.Method) + `",
` + "Router: `" + c.Router + "`" + `,
AllowHTTPMethods: ` + allmethod + `,
MethodParams: ` + methodParams + `,
Params: ` + params + `})
beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"],
beego.ControllerComments{
Method: "` + strings.TrimSpace(c.Method) + `",
` + "Router: `" + c.Router + "`" + `,
AllowHTTPMethods: ` + allmethod + `,
MethodParams: ` + methodParams + `,
Filters: ` + filters + `,
Params: ` + params + `})
`
}
}
if globalinfo != "" {
f, err := os.Create(filepath.Join(getRouterDir(pkgRealpath), commentFilename))
if err != nil {
panic(err)
}
defer f.Close()
f.WriteString(strings.Replace(globalRouterTemplate, "{{.globalinfo}}", globalinfo, -1))
content := strings.Replace(globalRouterTemplate, "{{.globalinfo}}", globalinfo, -1)
content = strings.Replace(content, "{{.globalimport}}", globalimport, -1)
f.WriteString(content)
}
}

118
router.go
View File

@ -71,7 +71,7 @@ var (
// these beego.Controller's methods shouldn't reflect to AutoRouter
exceptMethod = []string{"Init", "Prepare", "Finish", "Render", "RenderString",
"RenderBytes", "Redirect", "Abort", "StopRun", "UrlFor", "ServeJSON", "ServeJSONP",
"ServeXML", "Input", "ParseForm", "GetString", "GetStrings", "GetInt", "GetBool",
"ServeYAML", "ServeXML", "Input", "ParseForm", "GetString", "GetStrings", "GetInt", "GetBool",
"GetFloat", "GetFile", "SaveToFile", "StartSession", "SetSession", "GetSession",
"DelSession", "SessionRegenerateID", "DestroySession", "IsAjax", "GetSecureCookie",
"SetSecureCookie", "XsrfToken", "CheckXsrfCookie", "XsrfFormHtml",
@ -133,14 +133,15 @@ type ControllerRegister struct {
// NewControllerRegister returns a new ControllerRegister.
func NewControllerRegister() *ControllerRegister {
cr := &ControllerRegister{
return &ControllerRegister{
routers: make(map[string]*Tree),
policies: make(map[string]*Tree),
pool: sync.Pool{
New: func() interface{} {
return beecontext.NewContext()
},
},
}
cr.pool.New = func() interface{} {
return beecontext.NewContext()
}
return cr
}
// Add controller handler and pattern rules to ControllerRegister.
@ -201,9 +202,12 @@ func (p *ControllerRegister) addWithMethodParams(pattern string, c ControllerInt
numOfFields := elemVal.NumField()
for i := 0; i < numOfFields; i++ {
fieldVal := elemVal.Field(i)
fieldType := elemType.Field(i)
execElem.FieldByName(fieldType.Name).Set(fieldVal)
elemField := execElem.FieldByName(fieldType.Name)
if elemField.CanSet() {
fieldVal := elemVal.Field(i)
elemField.Set(fieldVal)
}
}
return execController
@ -274,6 +278,10 @@ func (p *ControllerRegister) Include(cList ...ControllerInterface) {
key := t.PkgPath() + ":" + t.Name()
if comm, ok := GlobalControllerRouter[key]; ok {
for _, a := range comm {
for _, f := range a.Filters {
p.InsertFilter(f.Pattern, f.Pos, f.Filter, f.ReturnOnOutput, f.ResetParams)
}
p.addWithMethodParams(a.Router, c, a.MethodParams, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method)
}
}
@ -791,7 +799,7 @@ func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request)
if !isRunnable {
//Invoke the request handler
var execController ControllerInterface
if routerInfo.initialize != nil {
if routerInfo != nil && routerInfo.initialize != nil {
execController = routerInfo.initialize()
} else {
vc := reflect.New(runRouter)
@ -881,8 +889,11 @@ Admin:
statusCode = 200
}
logAccess(context, &startTime, statusCode)
timeDur := time.Since(startTime)
context.ResponseWriter.Elapsed = timeDur
if BConfig.Listen.EnableAdmin {
timeDur := time.Since(startTime)
pattern := ""
if routerInfo != nil {
pattern = routerInfo.pattern
@ -897,49 +908,29 @@ Admin:
}
}
if BConfig.RunMode == DEV || BConfig.Log.AccessLogs {
timeDur := time.Since(startTime)
if BConfig.RunMode == DEV && !BConfig.Log.AccessLogs {
var devInfo string
iswin := (runtime.GOOS == "windows")
statusColor := logs.ColorByStatus(iswin, statusCode)
methodColor := logs.ColorByMethod(iswin, r.Method)
resetColor := logs.ColorByMethod(iswin, "")
if BConfig.Log.AccessLogsFormat != "" {
record := &logs.AccessLogRecord{
RemoteAddr: context.Input.IP(),
RequestTime: startTime,
RequestMethod: r.Method,
Request: fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto),
ServerProtocol: r.Proto,
Host: r.Host,
Status: statusCode,
ElapsedTime: timeDur,
HTTPReferrer: r.Header.Get("Referer"),
HTTPUserAgent: r.Header.Get("User-Agent"),
RemoteUser: r.Header.Get("Remote-User"),
BodyBytesSent: 0, //@todo this one is missing!
}
logs.AccessLog(record, BConfig.Log.AccessLogsFormat)
} else {
if findRouter {
if routerInfo != nil {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s r:%s", context.Input.IP(), statusColor, statusCode,
resetColor, timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path,
routerInfo.pattern)
} else {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor,
timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path)
}
if findRouter {
if routerInfo != nil {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s r:%s", context.Input.IP(), statusColor, statusCode,
resetColor, timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path,
routerInfo.pattern)
} else {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor,
timeDur.String(), "nomatch", methodColor, r.Method, resetColor, r.URL.Path)
}
if iswin {
logs.W32Debug(devInfo)
} else {
logs.Debug(devInfo)
timeDur.String(), "match", methodColor, r.Method, resetColor, r.URL.Path)
}
} else {
devInfo = fmt.Sprintf("|%15s|%s %3d %s|%13s|%8s|%s %-7s %s %-3s", context.Input.IP(), statusColor, statusCode, resetColor,
timeDur.String(), "nomatch", methodColor, r.Method, resetColor, r.URL.Path)
}
if iswin {
logs.W32Debug(devInfo)
} else {
logs.Debug(devInfo)
}
}
// Call WriteHeader if status code has been set changed
@ -957,7 +948,7 @@ func (p *ControllerRegister) handleParamResponse(context *beecontext.Context, ex
context.RenderMethodResult(resultValue)
}
}
if !context.ResponseWriter.Started && context.Output.Status == 0 {
if !context.ResponseWriter.Started && len(results) > 0 && context.Output.Status == 0 {
context.Output.SetStatus(200)
}
}
@ -988,3 +979,38 @@ func toURL(params map[string]string) string {
}
return strings.TrimRight(u, "&")
}
func logAccess(ctx *beecontext.Context, startTime *time.Time, statusCode int) {
//Skip logging if AccessLogs config is false
if !BConfig.Log.AccessLogs {
return
}
//Skip logging static requests unless EnableStaticLogs config is true
if !BConfig.Log.EnableStaticLogs && DefaultAccessLogFilter.Filter(ctx) {
return
}
var (
requestTime time.Time
elapsedTime time.Duration
r = ctx.Request
)
if startTime != nil {
requestTime = *startTime
elapsedTime = time.Since(*startTime)
}
record := &logs.AccessLogRecord{
RemoteAddr: ctx.Input.IP(),
RequestTime: requestTime,
RequestMethod: r.Method,
Request: fmt.Sprintf("%s %s %s", r.Method, r.RequestURI, r.Proto),
ServerProtocol: r.Proto,
Host: r.Host,
Status: statusCode,
ElapsedTime: elapsedTime,
HTTPReferrer: r.Header.Get("Referer"),
HTTPUserAgent: r.Header.Get("User-Agent"),
RemoteUser: r.Header.Get("Remote-User"),
BodyBytesSent: 0, //@todo this one is missing!
}
logs.AccessLog(record, BConfig.Log.AccessLogsFormat)
}

View File

@ -695,3 +695,30 @@ func beegoResetParams(ctx *context.Context) {
func beegoHandleResetParams(ctx *context.Context) {
ctx.ResponseWriter.Header().Set("splat", ctx.Input.Param(":splat"))
}
// YAML
type YAMLController struct {
Controller
}
func (jc *YAMLController) Prepare() {
jc.Data["yaml"] = "prepare"
jc.ServeYAML()
}
func (jc *YAMLController) Get() {
jc.Data["Username"] = "astaxie"
jc.Ctx.Output.Body([]byte("ok"))
}
func TestYAMLPrepare(t *testing.T) {
r, _ := http.NewRequest("GET", "/yaml/list", nil)
w := httptest.NewRecorder()
handler := NewControllerRegister()
handler.Add("/yaml/list", &YAMLController{})
handler.ServeHTTP(w, r)
if strings.TrimSpace(w.Body.String()) != "prepare" {
t.Errorf(w.Body.String())
}
}

View File

@ -14,9 +14,9 @@
// Package redis for session provider
//
// depend on github.com/garyburd/redigo/redis
// depend on github.com/gomodule/redigo/redis
//
// go install github.com/garyburd/redigo/redis
// go install github.com/gomodule/redigo/redis
//
// Usage:
// import(
@ -24,10 +24,10 @@
// "github.com/astaxie/beego/session"
// )
//
// func init() {
// globalSessions, _ = session.NewManager("redis", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:7070"}``)
// go globalSessions.GC()
// }
// func init() {
// globalSessions, _ = session.NewManager("redis", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:7070"}``)
// go globalSessions.GC()
// }
//
// more docs: http://beego.me/docs/module/session.md
package redis
@ -37,10 +37,11 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/astaxie/beego/session"
"github.com/garyburd/redigo/redis"
"github.com/gomodule/redigo/redis"
)
var redispder = &Provider{}
@ -118,8 +119,8 @@ type Provider struct {
}
// SessionInit init redis session
// savepath like redis server addr,pool size,password,dbnum
// e.g. 127.0.0.1:6379,100,astaxie,0
// savepath like redis server addr,pool size,password,dbnum,IdleTimeout second
// e.g. 127.0.0.1:6379,100,astaxie,0,30
func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
rp.maxlifetime = maxlifetime
configs := strings.Split(savePath, ",")
@ -149,27 +150,39 @@ func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
} else {
rp.dbNum = 0
}
rp.poollist = redis.NewPool(func() (redis.Conn, error) {
c, err := redis.Dial("tcp", rp.savePath)
if err != nil {
return nil, err
var idleTimeout time.Duration = 0
if len(configs) > 4 {
timeout, err := strconv.Atoi(configs[4])
if err == nil && timeout > 0 {
idleTimeout = time.Duration(timeout) * time.Second
}
if rp.password != "" {
if _, err = c.Do("AUTH", rp.password); err != nil {
c.Close()
return nil, err
}
}
//some redis proxy such as twemproxy is not support select command
if rp.dbNum > 0 {
_, err = c.Do("SELECT", rp.dbNum)
}
rp.poollist = &redis.Pool{
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", rp.savePath)
if err != nil {
c.Close()
return nil, err
}
}
return c, err
}, rp.poolsize)
if rp.password != "" {
if _, err = c.Do("AUTH", rp.password); err != nil {
c.Close()
return nil, err
}
}
// some redis proxy such as twemproxy is not support select command
if rp.dbNum > 0 {
_, err = c.Do("SELECT", rp.dbNum)
if err != nil {
c.Close()
return nil, err
}
}
return c, err
},
MaxIdle: rp.poolsize,
}
rp.poollist.IdleTimeout = idleTimeout
return rp.poollist.Get().Err()
}

View File

@ -0,0 +1,220 @@
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package redis for session provider
//
// depend on github.com/go-redis/redis
//
// go install github.com/go-redis/redis
//
// Usage:
// import(
// _ "github.com/astaxie/beego/session/redis_cluster"
// "github.com/astaxie/beego/session"
// )
//
// func init() {
// globalSessions, _ = session.NewManager("redis_cluster", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:7070;127.0.0.1:7071"}``)
// go globalSessions.GC()
// }
//
// more docs: http://beego.me/docs/module/session.md
package redis_cluster
import (
"net/http"
"strconv"
"strings"
"sync"
"github.com/astaxie/beego/session"
rediss "github.com/go-redis/redis"
"time"
)
var redispder = &Provider{}
// MaxPoolSize redis_cluster max pool size
var MaxPoolSize = 1000
// SessionStore redis_cluster session store
type SessionStore struct {
p *rediss.ClusterClient
sid string
lock sync.RWMutex
values map[interface{}]interface{}
maxlifetime int64
}
// Set value in redis_cluster session
func (rs *SessionStore) Set(key, value interface{}) error {
rs.lock.Lock()
defer rs.lock.Unlock()
rs.values[key] = value
return nil
}
// Get value in redis_cluster session
func (rs *SessionStore) Get(key interface{}) interface{} {
rs.lock.RLock()
defer rs.lock.RUnlock()
if v, ok := rs.values[key]; ok {
return v
}
return nil
}
// Delete value in redis_cluster session
func (rs *SessionStore) Delete(key interface{}) error {
rs.lock.Lock()
defer rs.lock.Unlock()
delete(rs.values, key)
return nil
}
// Flush clear all values in redis_cluster session
func (rs *SessionStore) Flush() error {
rs.lock.Lock()
defer rs.lock.Unlock()
rs.values = make(map[interface{}]interface{})
return nil
}
// SessionID get redis_cluster session id
func (rs *SessionStore) SessionID() string {
return rs.sid
}
// SessionRelease save session values to redis_cluster
func (rs *SessionStore) SessionRelease(w http.ResponseWriter) {
b, err := session.EncodeGob(rs.values)
if err != nil {
return
}
c := rs.p
c.Set(rs.sid, string(b), time.Duration(rs.maxlifetime) * time.Second)
}
// Provider redis_cluster session provider
type Provider struct {
maxlifetime int64
savePath string
poolsize int
password string
dbNum int
poollist *rediss.ClusterClient
}
// SessionInit init redis_cluster session
// savepath like redis server addr,pool size,password,dbnum
// e.g. 127.0.0.1:6379;127.0.0.1:6380,100,test,0
func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error {
rp.maxlifetime = maxlifetime
configs := strings.Split(savePath, ",")
if len(configs) > 0 {
rp.savePath = configs[0]
}
if len(configs) > 1 {
poolsize, err := strconv.Atoi(configs[1])
if err != nil || poolsize < 0 {
rp.poolsize = MaxPoolSize
} else {
rp.poolsize = poolsize
}
} else {
rp.poolsize = MaxPoolSize
}
if len(configs) > 2 {
rp.password = configs[2]
}
if len(configs) > 3 {
dbnum, err := strconv.Atoi(configs[3])
if err != nil || dbnum < 0 {
rp.dbNum = 0
} else {
rp.dbNum = dbnum
}
} else {
rp.dbNum = 0
}
rp.poollist = rediss.NewClusterClient(&rediss.ClusterOptions{
Addrs: strings.Split(rp.savePath, ";"),
Password: rp.password,
PoolSize: rp.poolsize,
})
return rp.poollist.Ping().Err()
}
// SessionRead read redis_cluster session by sid
func (rp *Provider) SessionRead(sid string) (session.Store, error) {
var kv map[interface{}]interface{}
kvs, err := rp.poollist.Get(sid).Result()
if err != nil && err != rediss.Nil {
return nil, err
}
if len(kvs) == 0 {
kv = make(map[interface{}]interface{})
} else {
if kv, err = session.DecodeGob([]byte(kvs)); err != nil {
return nil, err
}
}
rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime}
return rs, nil
}
// SessionExist check redis_cluster session exist by sid
func (rp *Provider) SessionExist(sid string) bool {
c := rp.poollist
if existed, err := c.Exists(sid).Result(); err != nil || existed == 0 {
return false
}
return true
}
// SessionRegenerate generate new sid for redis_cluster session
func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) {
c := rp.poollist
if existed, err := c.Exists(oldsid).Result(); err != nil || existed == 0 {
// oldsid doesn't exists, set the new sid directly
// ignore error here, since if it return error
// the existed value will be 0
c.Set(sid, "", time.Duration(rp.maxlifetime) * time.Second)
} else {
c.Rename(oldsid, sid)
c.Expire(sid, time.Duration(rp.maxlifetime) * time.Second)
}
return rp.SessionRead(sid)
}
// SessionDestroy delete redis session by id
func (rp *Provider) SessionDestroy(sid string) error {
c := rp.poollist
c.Del(sid)
return nil
}
// SessionGC Impelment method, no used.
func (rp *Provider) SessionGC() {
}
// SessionAll return all activeSession
func (rp *Provider) SessionAll() int {
return 0
}
func init() {
session.Register("redis_cluster", redispder)
}

View File

@ -21,6 +21,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
)
@ -127,6 +128,9 @@ func (fp *FileProvider) SessionInit(maxlifetime int64, savePath string) error {
// if file is not exist, create it.
// the file path is generated from sid string.
func (fp *FileProvider) SessionRead(sid string) (Store, error) {
if strings.ContainsAny(sid, "./") {
return nil, nil
}
filepder.lock.Lock()
defer filepder.lock.Unlock()

View File

@ -96,6 +96,7 @@ type ManagerConfig struct {
EnableSidInHTTPHeader bool `json:"EnableSidInHTTPHeader"`
SessionNameInHTTPHeader string `json:"SessionNameInHTTPHeader"`
EnableSidInURLQuery bool `json:"EnableSidInURLQuery"`
SessionIDPrefix string `json:"sessionIDPrefix"`
}
// Manager contains Provider and its configuration.
@ -153,6 +154,11 @@ func NewManager(provideName string, cf *ManagerConfig) (*Manager, error) {
}, nil
}
// GetProvider return current manager's provider
func (manager *Manager) GetProvider() Provider {
return manager.provider
}
// getSid retrieves session identifier from HTTP Request.
// First try to retrieve id by reading from cookie, session cookie name is configurable,
// if not exist, then retrieve id from querying parameters.
@ -331,7 +337,7 @@ func (manager *Manager) sessionID() (string, error) {
if n != len(b) || err != nil {
return "", fmt.Errorf("Could not successfully read from the system CSPRNG")
}
return hex.EncodeToString(b), nil
return manager.config.SessionIDPrefix + hex.EncodeToString(b), nil
}
// Set cookie with https.

View File

@ -74,7 +74,7 @@ func serverStaticRouter(ctx *context.Context) {
if enableCompress {
acceptEncoding = context.ParseEncoding(ctx.Request)
}
b, n, sch, err := openFile(filePath, fileInfo, acceptEncoding)
b, n, sch, reader, err := openFile(filePath, fileInfo, acceptEncoding)
if err != nil {
if BConfig.RunMode == DEV {
logs.Warn("Can't compress the file:", filePath, err)
@ -89,47 +89,53 @@ func serverStaticRouter(ctx *context.Context) {
ctx.Output.Header("Content-Length", strconv.FormatInt(sch.size, 10))
}
http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, sch)
http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, reader)
}
type serveContentHolder struct {
*bytes.Reader
data []byte
modTime time.Time
size int64
encoding string
}
type serveContentReader struct {
*bytes.Reader
}
var (
staticFileMap = make(map[string]*serveContentHolder)
mapLock sync.RWMutex
)
func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, error) {
func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, *serveContentReader, error) {
mapKey := acceptEncoding + ":" + filePath
mapLock.RLock()
mapFile := staticFileMap[mapKey]
mapLock.RUnlock()
if isOk(mapFile, fi) {
return mapFile.encoding != "", mapFile.encoding, mapFile, nil
reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)}
return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil
}
mapLock.Lock()
defer mapLock.Unlock()
if mapFile = staticFileMap[mapKey]; !isOk(mapFile, fi) {
file, err := os.Open(filePath)
if err != nil {
return false, "", nil, err
return false, "", nil, nil, err
}
defer file.Close()
var bufferWriter bytes.Buffer
_, n, err := context.WriteFile(acceptEncoding, &bufferWriter, file)
if err != nil {
return false, "", nil, err
return false, "", nil, nil, err
}
mapFile = &serveContentHolder{Reader: bytes.NewReader(bufferWriter.Bytes()), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), encoding: n}
mapFile = &serveContentHolder{data: bufferWriter.Bytes(), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), encoding: n}
staticFileMap[mapKey] = mapFile
}
return mapFile.encoding != "", mapFile.encoding, mapFile, nil
reader := &serveContentReader{Reader: bytes.NewReader(mapFile.data)}
return mapFile.encoding != "", mapFile.encoding, mapFile, reader, nil
}
func isOk(s *serveContentHolder, fi os.FileInfo) bool {
@ -172,7 +178,7 @@ func searchFile(ctx *context.Context) (string, os.FileInfo, error) {
if !strings.Contains(requestPath, prefix) {
continue
}
if len(requestPath) > len(prefix) && requestPath[len(prefix)] != '/' {
if prefix != "/" && len(requestPath) > len(prefix) && requestPath[len(prefix)] != '/' {
continue
}
filePath := path.Join(staticDir, requestPath[len(prefix):])

View File

@ -16,7 +16,7 @@ var licenseFile = filepath.Join(currentWorkDir, "LICENSE")
func testOpenFile(encoding string, content []byte, t *testing.T) {
fi, _ := os.Stat(licenseFile)
b, n, sch, err := openFile(licenseFile, fi, encoding)
b, n, sch, reader, err := openFile(licenseFile, fi, encoding)
if err != nil {
t.Log(err)
t.Fail()
@ -24,7 +24,7 @@ func testOpenFile(encoding string, content []byte, t *testing.T) {
t.Log("open static file encoding "+n, b)
assetOpenFileAndContent(sch, content, t)
assetOpenFileAndContent(sch, reader, content, t)
}
func TestOpenStaticFile_1(t *testing.T) {
file, _ := os.Open(licenseFile)
@ -53,13 +53,13 @@ func TestOpenStaticFileDeflate_1(t *testing.T) {
testOpenFile("deflate", content, t)
}
func assetOpenFileAndContent(sch *serveContentHolder, content []byte, t *testing.T) {
func assetOpenFileAndContent(sch *serveContentHolder, reader *serveContentReader, content []byte, t *testing.T) {
t.Log(sch.size, len(content))
if sch.size != int64(len(content)) {
t.Log("static content file size not same")
t.Fail()
}
bs, _ := ioutil.ReadAll(sch)
bs, _ := ioutil.ReadAll(reader)
for i, v := range content {
if v != bs[i] {
t.Log("content not same")

View File

@ -122,6 +122,7 @@ type Schema struct {
Items *Schema `json:"items,omitempty" yaml:"items,omitempty"`
Properties map[string]Propertie `json:"properties,omitempty" yaml:"properties,omitempty"`
Enum []interface{} `json:"enum,omitempty" yaml:"enum,omitempty"`
Example interface{} `json:"example,omitempty" yaml:"example,omitempty"`
}
// Propertie are taken from the JSON Schema definition but their definitions were adjusted to the Swagger Specification
@ -131,7 +132,7 @@ type Propertie struct {
Description string `json:"description,omitempty" yaml:"description,omitempty"`
Default interface{} `json:"default,omitempty" yaml:"default,omitempty"`
Type string `json:"type,omitempty" yaml:"type,omitempty"`
Example string `json:"example,omitempty" yaml:"example,omitempty"`
Example interface{} `json:"example,omitempty" yaml:"example,omitempty"`
Required []string `json:"required,omitempty" yaml:"required,omitempty"`
Format string `json:"format,omitempty" yaml:"format,omitempty"`
ReadOnly bool `json:"readOnly,omitempty" yaml:"readOnly,omitempty"`

View File

@ -20,6 +20,7 @@ import (
"html/template"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"regexp"
@ -40,6 +41,7 @@ var (
beeTemplateExt = []string{"tpl", "html"}
// beeTemplatePreprocessors stores associations of extension -> preprocessor handler
beeTemplateEngines = map[string]templatePreProcessor{}
beeTemplateFS = defaultFSFunc
)
// ExecuteTemplate applies the template with name to the specified data object,
@ -181,12 +183,17 @@ func lockViewPaths() {
// BuildTemplate will build all template files in a directory.
// it makes beego can render any template file in view directory.
func BuildTemplate(dir string, files ...string) error {
if _, err := os.Stat(dir); err != nil {
var err error
fs := beeTemplateFS()
f, err := fs.Open(dir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.New("dir open err")
}
defer f.Close()
beeTemplates, ok := beeViewPathTemplates[dir]
if !ok {
panic("Unknown view path: " + dir)
@ -195,11 +202,11 @@ func BuildTemplate(dir string, files ...string) error {
root: dir,
files: make(map[string][]string),
}
err := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {
err = Walk(fs, dir, func(path string, f os.FileInfo, err error) error {
return self.visit(path, f, err)
})
if err != nil {
fmt.Printf("filepath.Walk() returned %v\n", err)
fmt.Printf("Walk() returned %v\n", err)
return err
}
buildAllFiles := len(files) == 0
@ -210,14 +217,15 @@ func BuildTemplate(dir string, files ...string) error {
ext := filepath.Ext(file)
var t *template.Template
if len(ext) == 0 {
t, err = getTemplate(self.root, file, v...)
t, err = getTemplate(self.root, fs, file, v...)
} else if fn, ok := beeTemplateEngines[ext[1:]]; ok {
t, err = fn(self.root, file, beegoTplFuncMap)
} else {
t, err = getTemplate(self.root, file, v...)
t, err = getTemplate(self.root, fs, file, v...)
}
if err != nil {
logs.Error("parse template err:", file, err)
templatesLock.Unlock()
return err
}
beeTemplates[file] = t
@ -228,9 +236,10 @@ func BuildTemplate(dir string, files ...string) error {
return nil
}
func getTplDeep(root, file, parent string, t *template.Template) (*template.Template, [][]string, error) {
func getTplDeep(root string, fs http.FileSystem, file string, parent string, t *template.Template) (*template.Template, [][]string, error) {
var fileAbsPath string
var rParent string
var err error
if filepath.HasPrefix(file, "../") {
rParent = filepath.Join(filepath.Dir(parent), file)
fileAbsPath = filepath.Join(root, filepath.Dir(parent), file)
@ -238,10 +247,12 @@ func getTplDeep(root, file, parent string, t *template.Template) (*template.Temp
rParent = file
fileAbsPath = filepath.Join(root, file)
}
if e := utils.FileExists(fileAbsPath); !e {
f, err := fs.Open(fileAbsPath)
defer f.Close()
if err != nil {
panic("can't find template file:" + file)
}
data, err := ioutil.ReadFile(fileAbsPath)
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, [][]string{}, err
}
@ -260,7 +271,7 @@ func getTplDeep(root, file, parent string, t *template.Template) (*template.Temp
if !HasTemplateExt(m[1]) {
continue
}
_, _, err = getTplDeep(root, m[1], rParent, t)
_, _, err = getTplDeep(root, fs, m[1], rParent, t)
if err != nil {
return nil, [][]string{}, err
}
@ -269,14 +280,14 @@ func getTplDeep(root, file, parent string, t *template.Template) (*template.Temp
return t, allSub, nil
}
func getTemplate(root, file string, others ...string) (t *template.Template, err error) {
func getTemplate(root string, fs http.FileSystem, file string, others ...string) (t *template.Template, err error) {
t = template.New(file).Delims(BConfig.WebConfig.TemplateLeft, BConfig.WebConfig.TemplateRight).Funcs(beegoTplFuncMap)
var subMods [][]string
t, subMods, err = getTplDeep(root, file, "", t)
t, subMods, err = getTplDeep(root, fs, file, "", t)
if err != nil {
return nil, err
}
t, err = _getTemplate(t, root, subMods, others...)
t, err = _getTemplate(t, root, fs, subMods, others...)
if err != nil {
return nil, err
@ -284,7 +295,7 @@ func getTemplate(root, file string, others ...string) (t *template.Template, err
return
}
func _getTemplate(t0 *template.Template, root string, subMods [][]string, others ...string) (t *template.Template, err error) {
func _getTemplate(t0 *template.Template, root string, fs http.FileSystem, subMods [][]string, others ...string) (t *template.Template, err error) {
t = t0
for _, m := range subMods {
if len(m) == 2 {
@ -296,11 +307,11 @@ func _getTemplate(t0 *template.Template, root string, subMods [][]string, others
for _, otherFile := range others {
if otherFile == m[1] {
var subMods1 [][]string
t, subMods1, err = getTplDeep(root, otherFile, "", t)
t, subMods1, err = getTplDeep(root, fs, otherFile, "", t)
if err != nil {
logs.Trace("template parse file err:", err)
} else if len(subMods1) > 0 {
t, err = _getTemplate(t, root, subMods1, others...)
t, err = _getTemplate(t, root, fs, subMods1, others...)
}
break
}
@ -309,8 +320,16 @@ func _getTemplate(t0 *template.Template, root string, subMods [][]string, others
for _, otherFile := range others {
var data []byte
fileAbsPath := filepath.Join(root, otherFile)
data, err = ioutil.ReadFile(fileAbsPath)
f, err := fs.Open(fileAbsPath)
if err != nil {
f.Close()
logs.Trace("template file parse error, not success open file:", err)
continue
}
data, err = ioutil.ReadAll(f)
f.Close()
if err != nil {
logs.Trace("template file parse error, not success read file:", err)
continue
}
reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*define[ ]+\"([^\"]+)\"")
@ -318,11 +337,14 @@ func _getTemplate(t0 *template.Template, root string, subMods [][]string, others
for _, sub := range allSub {
if len(sub) == 2 && sub[1] == m[1] {
var subMods1 [][]string
t, subMods1, err = getTplDeep(root, otherFile, "", t)
t, subMods1, err = getTplDeep(root, fs, otherFile, "", t)
if err != nil {
logs.Trace("template parse file err:", err)
} else if len(subMods1) > 0 {
t, err = _getTemplate(t, root, subMods1, others...)
t, err = _getTemplate(t, root, fs, subMods1, others...)
if err != nil {
logs.Trace("template parse file err:", err)
}
}
break
}
@ -334,6 +356,17 @@ func _getTemplate(t0 *template.Template, root string, subMods [][]string, others
return
}
type templateFSFunc func() http.FileSystem
func defaultFSFunc() http.FileSystem {
return FileSystem{}
}
// SetTemplateFSFunc set default filesystem function
func SetTemplateFSFunc(fnt templateFSFunc) {
beeTemplateFS = fnt
}
// SetViewsPath sets view directory path in beego application.
func SetViewsPath(path string) *App {
BConfig.WebConfig.ViewsPath = path

View File

@ -16,6 +16,9 @@ package beego
import (
"bytes"
"github.com/astaxie/beego/testdata"
"github.com/elazarl/go-bindata-assetfs"
"net/http"
"os"
"path/filepath"
"testing"
@ -256,3 +259,58 @@ func TestTemplateLayout(t *testing.T) {
}
os.RemoveAll(dir)
}
type TestingFileSystem struct {
assetfs *assetfs.AssetFS
}
func (d TestingFileSystem) Open(name string) (http.File, error) {
return d.assetfs.Open(name)
}
var outputBinData = `<!DOCTYPE html>
<html>
<head>
<title>beego welcome template</title>
</head>
<body>
<h1>Hello, blocks!</h1>
<h1>Hello, astaxie!</h1>
<h2>Hello</h2>
<p> This is SomeVar: val</p>
</body>
</html>
`
func TestFsBinData(t *testing.T) {
SetTemplateFSFunc(func() http.FileSystem {
return TestingFileSystem{&assetfs.AssetFS{Asset: testdata.Asset, AssetDir: testdata.AssetDir, AssetInfo: testdata.AssetInfo}}
})
dir := "views"
if err := AddViewPath("views"); err != nil {
t.Fatal(err)
}
beeTemplates := beeViewPathTemplates[dir]
if len(beeTemplates) != 3 {
t.Fatalf("should be 3 but got %v", len(beeTemplates))
}
if err := beeTemplates["index.tpl"].ExecuteTemplate(os.Stdout, "index.tpl", map[string]string{"Title": "Hello", "SomeVar": "val"}); err != nil {
t.Fatal(err)
}
out := bytes.NewBufferString("")
if err := beeTemplates["index.tpl"].ExecuteTemplate(out, "index.tpl", map[string]string{"Title": "Hello", "SomeVar": "val"}); err != nil {
t.Fatal(err)
}
if out.String() != outputBinData {
t.Log(out.String())
t.Fatal("Compare failed")
}
}

View File

@ -17,6 +17,7 @@ package beego
import (
"errors"
"fmt"
"html"
"html/template"
"net/url"
"reflect"
@ -84,24 +85,24 @@ func DateFormat(t time.Time, layout string) (datestring string) {
var datePatterns = []string{
// year
"Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003
"y", "06", //A two digit representation of a year Examples: 99 or 03
"y", "06", //A two digit representation of a year Examples: 99 or 03
// month
"m", "01", // Numeric representation of a month, with leading zeros 01 through 12
"n", "1", // Numeric representation of a month, without leading zeros 1 through 12
"M", "Jan", // A short textual representation of a month, three letters Jan through Dec
"m", "01", // Numeric representation of a month, with leading zeros 01 through 12
"n", "1", // Numeric representation of a month, without leading zeros 1 through 12
"M", "Jan", // A short textual representation of a month, three letters Jan through Dec
"F", "January", // A full textual representation of a month, such as January or March January through December
// day
"d", "02", // Day of the month, 2 digits with leading zeros 01 to 31
"j", "2", // Day of the month without leading zeros 1 to 31
"j", "2", // Day of the month without leading zeros 1 to 31
// week
"D", "Mon", // A textual representation of a day, three letters Mon through Sun
"D", "Mon", // A textual representation of a day, three letters Mon through Sun
"l", "Monday", // A full textual representation of the day of the week Sunday through Saturday
// time
"g", "3", // 12-hour format of an hour without leading zeros 1 through 12
"g", "3", // 12-hour format of an hour without leading zeros 1 through 12
"G", "15", // 24-hour format of an hour without leading zeros 0 through 23
"h", "03", // 12-hour format of an hour with leading zeros 01 through 12
"H", "15", // 24-hour format of an hour with leading zeros 00 through 23
@ -207,14 +208,12 @@ func Htmlquote(text string) string {
'&lt;&#39;&amp;&quot;&gt;'
*/
text = strings.Replace(text, "&", "&amp;", -1) // Must be done first!
text = strings.Replace(text, "<", "&lt;", -1)
text = strings.Replace(text, ">", "&gt;", -1)
text = strings.Replace(text, "'", "&#39;", -1)
text = strings.Replace(text, "\"", "&quot;", -1)
text = strings.Replace(text, "“", "&ldquo;", -1)
text = strings.Replace(text, "”", "&rdquo;", -1)
text = strings.Replace(text, " ", "&nbsp;", -1)
text = html.EscapeString(text)
text = strings.NewReplacer(
``, "&ldquo;",
``, "&rdquo;",
` `, "&nbsp;",
).Replace(text)
return strings.TrimSpace(text)
}
@ -228,17 +227,7 @@ func Htmlunquote(text string) string {
'<\\'&">'
*/
// strings.Replace(s, old, new, n)
// 在s字符串中把old字符串替换为new字符串n表示替换的次数小于0表示全部替换
text = strings.Replace(text, "&nbsp;", " ", -1)
text = strings.Replace(text, "&rdquo;", "”", -1)
text = strings.Replace(text, "&ldquo;", "“", -1)
text = strings.Replace(text, "&quot;", "\"", -1)
text = strings.Replace(text, "&#39;", "'", -1)
text = strings.Replace(text, "&gt;", ">", -1)
text = strings.Replace(text, "&lt;", "<", -1)
text = strings.Replace(text, "&amp;", "&", -1) // Must be done last!
text = html.UnescapeString(text)
return strings.TrimSpace(text)
}
@ -703,7 +692,7 @@ func ge(arg1, arg2 interface{}) (bool, error) {
// MapGet getting value from map by keys
// usage:
// Data["m"] = map[string]interface{} {
// Data["m"] = M{
// "a": 1,
// "1": map[string]float64{
// "c": 4,

View File

@ -94,7 +94,7 @@ func TestCompareRelated(t *testing.T) {
}
func TestHtmlquote(t *testing.T) {
h := `&lt;&#39;&nbsp;&rdquo;&ldquo;&amp;&quot;&gt;`
h := `&lt;&#39;&nbsp;&rdquo;&ldquo;&amp;&#34;&gt;`
s := `<' ”“&">`
if Htmlquote(s) != h {
t.Error("should be equal")
@ -102,8 +102,8 @@ func TestHtmlquote(t *testing.T) {
}
func TestHtmlunquote(t *testing.T) {
h := `&lt;&#39;&nbsp;&rdquo;&ldquo;&amp;&quot;&gt;`
s := `<' ”“&">`
h := `&lt;&#39;&nbsp;&rdquo;&ldquo;&amp;&#34;&gt;`
s := `<' ”“&">`
if Htmlunquote(h) != s {
t.Error("should be equal")
}
@ -329,7 +329,7 @@ func TestMapGet(t *testing.T) {
}
// test 2 level map
m2 := map[string]interface{}{
m2 := M{
"1": map[string]float64{
"2": 3.5,
},
@ -344,11 +344,11 @@ func TestMapGet(t *testing.T) {
}
// test 5 level map
m5 := map[string]interface{}{
"1": map[string]interface{}{
"2": map[string]interface{}{
"3": map[string]interface{}{
"4": map[string]interface{}{
m5 := M{
"1": M{
"2": M{
"3": M{
"4": M{
"5": 1.2,
},
},

2
testdata/Makefile vendored Normal file
View File

@ -0,0 +1,2 @@
build_view:
$(GOPATH)/bin/go-bindata-assetfs -pkg testdata views/...

296
testdata/bindata.go vendored Normal file
View File

@ -0,0 +1,296 @@
// Code generated by go-bindata.
// sources:
// views/blocks/block.tpl
// views/header.tpl
// views/index.tpl
// DO NOT EDIT!
package testdata
import (
"bytes"
"compress/gzip"
"fmt"
"github.com/elazarl/go-bindata-assetfs"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
func (fi bindataFileInfo) Name() string {
return fi.name
}
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
func (fi bindataFileInfo) IsDir() bool {
return false
}
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _viewsBlocksBlockTpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xaa\xae\x4e\x49\x4d\xcb\xcc\x4b\x55\x50\x4a\xca\xc9\x4f\xce\x56\xaa\xad\xe5\xb2\xc9\x30\xb4\xf3\x48\xcd\xc9\xc9\xd7\x51\x00\x8b\x15\x2b\xda\xe8\x67\x18\xda\x71\x55\x57\xa7\xe6\xa5\xd4\xd6\x02\x02\x00\x00\xff\xff\xfd\xa1\x7a\xf6\x32\x00\x00\x00")
func viewsBlocksBlockTplBytes() ([]byte, error) {
return bindataRead(
_viewsBlocksBlockTpl,
"views/blocks/block.tpl",
)
}
func viewsBlocksBlockTpl() (*asset, error) {
bytes, err := viewsBlocksBlockTplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "views/blocks/block.tpl", size: 50, mode: os.FileMode(436), modTime: time.Unix(1541431067, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _viewsHeaderTpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xaa\xae\x4e\x49\x4d\xcb\xcc\x4b\x55\x50\xca\x48\x4d\x4c\x49\x2d\x52\xaa\xad\xe5\xb2\xc9\x30\xb4\xf3\x48\xcd\xc9\xc9\xd7\x51\x48\x2c\x2e\x49\xac\xc8\x4c\x55\xb4\xd1\xcf\x30\xb4\xe3\xaa\xae\x4e\xcd\x4b\xa9\xad\x05\x04\x00\x00\xff\xff\xe4\x12\x47\x01\x34\x00\x00\x00")
func viewsHeaderTplBytes() ([]byte, error) {
return bindataRead(
_viewsHeaderTpl,
"views/header.tpl",
)
}
func viewsHeaderTpl() (*asset, error) {
bytes, err := viewsHeaderTplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "views/header.tpl", size: 52, mode: os.FileMode(436), modTime: time.Unix(1541431067, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _viewsIndexTpl = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x8f\xbd\x8a\xc3\x30\x10\x84\x6b\xeb\x29\xe6\xfc\x00\x16\xb8\x3c\x16\x35\x77\xa9\x13\x88\x09\xa4\xf4\xcf\x12\x99\x48\x48\xd8\x82\x10\x84\xde\x3d\xc8\x8a\x8b\x90\x6a\xa4\xd9\x6f\xd8\x59\xfa\xf9\x3f\xfe\x75\xd7\xd3\x01\x3a\x58\xa3\x04\x15\x01\x48\x73\x3f\xe5\x07\x40\x61\x0e\x86\xd5\xc0\x7c\x73\x78\xb0\x19\x9d\x65\x04\xb6\xde\xf4\x81\x49\x96\x69\x8e\xc8\x3d\x43\x83\x9b\x9e\x4a\x88\x2a\xc6\x9d\x43\x3d\x18\x37\xde\xeb\x94\x3e\xdd\x1c\xe1\xe5\xcb\xde\xe0\x55\x6e\xd2\x04\x6f\x32\x20\x2a\xd2\xad\x8a\x11\x4d\x97\x57\x22\x25\x92\xba\x55\xa2\x22\xaf\xd0\xe9\x79\xc5\xbc\xe2\xec\x2c\x5f\xfa\xe5\x17\x99\x7b\x7f\x36\xd2\x97\x8a\xa5\x19\xc9\x72\xe7\x2b\x00\x00\xff\xff\xb2\x39\xca\x9f\xff\x00\x00\x00")
func viewsIndexTplBytes() ([]byte, error) {
return bindataRead(
_viewsIndexTpl,
"views/index.tpl",
)
}
func viewsIndexTpl() (*asset, error) {
bytes, err := viewsIndexTplBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "views/index.tpl", size: 255, mode: os.FileMode(436), modTime: time.Unix(1541434906, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"views/blocks/block.tpl": viewsBlocksBlockTpl,
"views/header.tpl": viewsHeaderTpl,
"views/index.tpl": viewsIndexTpl,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"views": &bintree{nil, map[string]*bintree{
"blocks": &bintree{nil, map[string]*bintree{
"block.tpl": &bintree{viewsBlocksBlockTpl, map[string]*bintree{}},
}},
"header.tpl": &bintree{viewsHeaderTpl, map[string]*bintree{}},
"index.tpl": &bintree{viewsIndexTpl, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
func assetFS() *assetfs.AssetFS {
assetInfo := func(path string) (os.FileInfo, error) {
return os.Stat(path)
}
for k := range _bintree.Children {
return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: assetInfo, Prefix: k}
}
panic("unreachable")
}

3
testdata/views/blocks/block.tpl vendored Normal file
View File

@ -0,0 +1,3 @@
{{define "block"}}
<h1>Hello, blocks!</h1>
{{end}}

3
testdata/views/header.tpl vendored Normal file
View File

@ -0,0 +1,3 @@
{{define "header"}}
<h1>Hello, astaxie!</h1>
{{end}}

15
testdata/views/index.tpl vendored Normal file
View File

@ -0,0 +1,15 @@
<!DOCTYPE html>
<html>
<head>
<title>beego welcome template</title>
</head>
<body>
{{template "block"}}
{{template "header"}}
{{template "blocks/block.tpl"}}
<h2>{{ .Title }}</h2>
<p> This is SomeVar: {{ .SomeVar }}</p>
</body>
</html>

View File

@ -428,6 +428,9 @@ func run() {
continue
case <-changed:
now = time.Now().Local()
for _, t := range AdminTaskList {
t.SetNext(now)
}
continue
case <-stop:
return
@ -446,6 +449,7 @@ func StopTask() {
// AddTask add task with name
func AddTask(taskname string, t Tasker) {
t.SetNext(time.Now().Local())
AdminTaskList[taskname] = t
if isstart {
changed <- true

View File

@ -26,6 +26,7 @@ func TestNewBeeMap(t *testing.T) {
}
func TestSet(t *testing.T) {
safeMap = NewBeeMap()
if ok := safeMap.Set("astaxie", 1); !ok {
t.Error("expected", true, "got", false)
}

View File

@ -245,7 +245,21 @@ func (v *Validation) ZipCode(obj interface{}, key string) *Result {
}
func (v *Validation) apply(chk Validator, obj interface{}) *Result {
if chk.IsSatisfied(obj) {
if nil == obj {
if chk.IsSatisfied(obj) {
return &Result{Ok: true}
}
} else if reflect.TypeOf(obj).Kind() == reflect.Ptr {
if reflect.ValueOf(obj).IsNil() {
if chk.IsSatisfied(nil) {
return &Result{Ok: true}
}
} else {
if chk.IsSatisfied(reflect.ValueOf(obj).Elem().Interface()) {
return &Result{Ok: true}
}
}
} else if chk.IsSatisfied(obj) {
return &Result{Ok: true}
}
@ -351,13 +365,24 @@ func (v *Validation) Valid(obj interface{}) (b bool, err error) {
return
}
var hasReuired bool
var hasRequired bool
for _, vf := range vfs {
if vf.Name == "Required" {
hasReuired = true
hasRequired = true
}
if !hasReuired && v.RequiredFirst && len(objV.Field(i).String()) == 0 {
currentField := objV.Field(i).Interface()
if objV.Field(i).Kind() == reflect.Ptr {
if objV.Field(i).IsNil() {
currentField = ""
} else {
currentField = objV.Field(i).Elem().Interface()
}
}
chk := Required{""}.IsSatisfied(currentField)
if !hasRequired && v.RequiredFirst && !chk {
if _, ok := CanSkipFuncs[vf.Name]; ok {
continue
}
@ -414,3 +439,9 @@ func (v *Validation) RecursiveValid(objc interface{}) (bool, error) {
}
return pass, err
}
func (v *Validation) CanSkipAlso(skipFunc string) {
if _, ok := CanSkipFuncs[skipFunc]; !ok {
CanSkipFuncs[skipFunc] = struct{}{}
}
}

View File

@ -442,3 +442,122 @@ func TestSkipValid(t *testing.T) {
t.Fatal("validation should be passed")
}
}
func TestPointer(t *testing.T) {
type User struct {
ID int
Email *string `valid:"Email"`
ReqEmail *string `valid:"Required;Email"`
}
u := User{
ReqEmail: nil,
Email: nil,
}
valid := Validation{}
b, err := valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if b {
t.Fatal("validation should not be passed")
}
validEmail := "a@a.com"
u = User{
ReqEmail: &validEmail,
Email: nil,
}
valid = Validation{RequiredFirst: true}
b, err = valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if !b {
t.Fatal("validation should be passed")
}
u = User{
ReqEmail: &validEmail,
Email: nil,
}
valid = Validation{}
b, err = valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if b {
t.Fatal("validation should not be passed")
}
invalidEmail := "a@a"
u = User{
ReqEmail: &validEmail,
Email: &invalidEmail,
}
valid = Validation{RequiredFirst: true}
b, err = valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if b {
t.Fatal("validation should not be passed")
}
u = User{
ReqEmail: &validEmail,
Email: &invalidEmail,
}
valid = Validation{}
b, err = valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if b {
t.Fatal("validation should not be passed")
}
}
func TestCanSkipAlso(t *testing.T) {
type User struct {
ID int
Email string `valid:"Email"`
ReqEmail string `valid:"Required;Email"`
MatchRange int `valid:"Range(10, 20)"`
}
u := User{
ReqEmail: "a@a.com",
Email: "",
MatchRange: 0,
}
valid := Validation{RequiredFirst: true}
b, err := valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if b {
t.Fatal("validation should not be passed")
}
valid = Validation{RequiredFirst: true}
valid.CanSkipAlso("Range")
b, err = valid.Valid(u)
if err != nil {
t.Fatal(err)
}
if !b {
t.Fatal("validation should be passed")
}
}

27
vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

921
vendor/golang.org/x/crypto/acme/acme.go generated vendored Normal file
View File

@ -0,0 +1,921 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package acme provides an implementation of the
// Automatic Certificate Management Environment (ACME) spec.
// See https://tools.ietf.org/html/draft-ietf-acme-acme-02 for details.
//
// Most common scenarios will want to use autocert subdirectory instead,
// which provides automatic access to certificates from Let's Encrypt
// and any other ACME-based CA.
//
// This package is a work in progress and makes no API stability promises.
package acme
import (
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"net/http"
"strings"
"sync"
"time"
)
const (
// LetsEncryptURL is the Directory endpoint of Let's Encrypt CA.
LetsEncryptURL = "https://acme-v01.api.letsencrypt.org/directory"
// ALPNProto is the ALPN protocol name used by a CA server when validating
// tls-alpn-01 challenges.
//
// Package users must ensure their servers can negotiate the ACME ALPN
// in order for tls-alpn-01 challenge verifications to succeed.
ALPNProto = "acme-tls/1"
)
// idPeACMEIdentifierV1 is the OID for the ACME extension for the TLS-ALPN challenge.
var idPeACMEIdentifierV1 = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 30, 1}
const (
maxChainLen = 5 // max depth and breadth of a certificate chain
maxCertSize = 1 << 20 // max size of a certificate, in bytes
// Max number of collected nonces kept in memory.
// Expect usual peak of 1 or 2.
maxNonces = 100
)
// Client is an ACME client.
// The only required field is Key. An example of creating a client with a new key
// is as follows:
//
// key, err := rsa.GenerateKey(rand.Reader, 2048)
// if err != nil {
// log.Fatal(err)
// }
// client := &Client{Key: key}
//
type Client struct {
// Key is the account key used to register with a CA and sign requests.
// Key.Public() must return a *rsa.PublicKey or *ecdsa.PublicKey.
Key crypto.Signer
// HTTPClient optionally specifies an HTTP client to use
// instead of http.DefaultClient.
HTTPClient *http.Client
// DirectoryURL points to the CA directory endpoint.
// If empty, LetsEncryptURL is used.
// Mutating this value after a successful call of Client's Discover method
// will have no effect.
DirectoryURL string
// RetryBackoff computes the duration after which the nth retry of a failed request
// should occur. The value of n for the first call on failure is 1.
// The values of r and resp are the request and response of the last failed attempt.
// If the returned value is negative or zero, no more retries are done and an error
// is returned to the caller of the original method.
//
// Requests which result in a 4xx client error are not retried,
// except for 400 Bad Request due to "bad nonce" errors and 429 Too Many Requests.
//
// If RetryBackoff is nil, a truncated exponential backoff algorithm
// with the ceiling of 10 seconds is used, where each subsequent retry n
// is done after either ("Retry-After" + jitter) or (2^n seconds + jitter),
// preferring the former if "Retry-After" header is found in the resp.
// The jitter is a random value up to 1 second.
RetryBackoff func(n int, r *http.Request, resp *http.Response) time.Duration
dirMu sync.Mutex // guards writes to dir
dir *Directory // cached result of Client's Discover method
noncesMu sync.Mutex
nonces map[string]struct{} // nonces collected from previous responses
}
// Discover performs ACME server discovery using c.DirectoryURL.
//
// It caches successful result. So, subsequent calls will not result in
// a network round-trip. This also means mutating c.DirectoryURL after successful call
// of this method will have no effect.
func (c *Client) Discover(ctx context.Context) (Directory, error) {
c.dirMu.Lock()
defer c.dirMu.Unlock()
if c.dir != nil {
return *c.dir, nil
}
dirURL := c.DirectoryURL
if dirURL == "" {
dirURL = LetsEncryptURL
}
res, err := c.get(ctx, dirURL, wantStatus(http.StatusOK))
if err != nil {
return Directory{}, err
}
defer res.Body.Close()
c.addNonce(res.Header)
var v struct {
Reg string `json:"new-reg"`
Authz string `json:"new-authz"`
Cert string `json:"new-cert"`
Revoke string `json:"revoke-cert"`
Meta struct {
Terms string `json:"terms-of-service"`
Website string `json:"website"`
CAA []string `json:"caa-identities"`
}
}
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return Directory{}, err
}
c.dir = &Directory{
RegURL: v.Reg,
AuthzURL: v.Authz,
CertURL: v.Cert,
RevokeURL: v.Revoke,
Terms: v.Meta.Terms,
Website: v.Meta.Website,
CAA: v.Meta.CAA,
}
return *c.dir, nil
}
// CreateCert requests a new certificate using the Certificate Signing Request csr encoded in DER format.
// The exp argument indicates the desired certificate validity duration. CA may issue a certificate
// with a different duration.
// If the bundle argument is true, the returned value will also contain the CA (issuer) certificate chain.
//
// In the case where CA server does not provide the issued certificate in the response,
// CreateCert will poll certURL using c.FetchCert, which will result in additional round-trips.
// In such a scenario, the caller can cancel the polling with ctx.
//
// CreateCert returns an error if the CA's response or chain was unreasonably large.
// Callers are encouraged to parse the returned value to ensure the certificate is valid and has the expected features.
func (c *Client) CreateCert(ctx context.Context, csr []byte, exp time.Duration, bundle bool) (der [][]byte, certURL string, err error) {
if _, err := c.Discover(ctx); err != nil {
return nil, "", err
}
req := struct {
Resource string `json:"resource"`
CSR string `json:"csr"`
NotBefore string `json:"notBefore,omitempty"`
NotAfter string `json:"notAfter,omitempty"`
}{
Resource: "new-cert",
CSR: base64.RawURLEncoding.EncodeToString(csr),
}
now := timeNow()
req.NotBefore = now.Format(time.RFC3339)
if exp > 0 {
req.NotAfter = now.Add(exp).Format(time.RFC3339)
}
res, err := c.post(ctx, c.Key, c.dir.CertURL, req, wantStatus(http.StatusCreated))
if err != nil {
return nil, "", err
}
defer res.Body.Close()
curl := res.Header.Get("Location") // cert permanent URL
if res.ContentLength == 0 {
// no cert in the body; poll until we get it
cert, err := c.FetchCert(ctx, curl, bundle)
return cert, curl, err
}
// slurp issued cert and CA chain, if requested
cert, err := c.responseCert(ctx, res, bundle)
return cert, curl, err
}
// FetchCert retrieves already issued certificate from the given url, in DER format.
// It retries the request until the certificate is successfully retrieved,
// context is cancelled by the caller or an error response is received.
//
// The returned value will also contain the CA (issuer) certificate if the bundle argument is true.
//
// FetchCert returns an error if the CA's response or chain was unreasonably large.
// Callers are encouraged to parse the returned value to ensure the certificate is valid
// and has expected features.
func (c *Client) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) {
res, err := c.get(ctx, url, wantStatus(http.StatusOK))
if err != nil {
return nil, err
}
return c.responseCert(ctx, res, bundle)
}
// RevokeCert revokes a previously issued certificate cert, provided in DER format.
//
// The key argument, used to sign the request, must be authorized
// to revoke the certificate. It's up to the CA to decide which keys are authorized.
// For instance, the key pair of the certificate may be authorized.
// If the key is nil, c.Key is used instead.
func (c *Client) RevokeCert(ctx context.Context, key crypto.Signer, cert []byte, reason CRLReasonCode) error {
if _, err := c.Discover(ctx); err != nil {
return err
}
body := &struct {
Resource string `json:"resource"`
Cert string `json:"certificate"`
Reason int `json:"reason"`
}{
Resource: "revoke-cert",
Cert: base64.RawURLEncoding.EncodeToString(cert),
Reason: int(reason),
}
if key == nil {
key = c.Key
}
res, err := c.post(ctx, key, c.dir.RevokeURL, body, wantStatus(http.StatusOK))
if err != nil {
return err
}
defer res.Body.Close()
return nil
}
// AcceptTOS always returns true to indicate the acceptance of a CA's Terms of Service
// during account registration. See Register method of Client for more details.
func AcceptTOS(tosURL string) bool { return true }
// Register creates a new account registration by following the "new-reg" flow.
// It returns the registered account. The account is not modified.
//
// The registration may require the caller to agree to the CA's Terms of Service (TOS).
// If so, and the account has not indicated the acceptance of the terms (see Account for details),
// Register calls prompt with a TOS URL provided by the CA. Prompt should report
// whether the caller agrees to the terms. To always accept the terms, the caller can use AcceptTOS.
func (c *Client) Register(ctx context.Context, a *Account, prompt func(tosURL string) bool) (*Account, error) {
if _, err := c.Discover(ctx); err != nil {
return nil, err
}
var err error
if a, err = c.doReg(ctx, c.dir.RegURL, "new-reg", a); err != nil {
return nil, err
}
var accept bool
if a.CurrentTerms != "" && a.CurrentTerms != a.AgreedTerms {
accept = prompt(a.CurrentTerms)
}
if accept {
a.AgreedTerms = a.CurrentTerms
a, err = c.UpdateReg(ctx, a)
}
return a, err
}
// GetReg retrieves an existing registration.
// The url argument is an Account URI.
func (c *Client) GetReg(ctx context.Context, url string) (*Account, error) {
a, err := c.doReg(ctx, url, "reg", nil)
if err != nil {
return nil, err
}
a.URI = url
return a, nil
}
// UpdateReg updates an existing registration.
// It returns an updated account copy. The provided account is not modified.
func (c *Client) UpdateReg(ctx context.Context, a *Account) (*Account, error) {
uri := a.URI
a, err := c.doReg(ctx, uri, "reg", a)
if err != nil {
return nil, err
}
a.URI = uri
return a, nil
}
// Authorize performs the initial step in an authorization flow.
// The caller will then need to choose from and perform a set of returned
// challenges using c.Accept in order to successfully complete authorization.
//
// If an authorization has been previously granted, the CA may return
// a valid authorization (Authorization.Status is StatusValid). If so, the caller
// need not fulfill any challenge and can proceed to requesting a certificate.
func (c *Client) Authorize(ctx context.Context, domain string) (*Authorization, error) {
if _, err := c.Discover(ctx); err != nil {
return nil, err
}
type authzID struct {
Type string `json:"type"`
Value string `json:"value"`
}
req := struct {
Resource string `json:"resource"`
Identifier authzID `json:"identifier"`
}{
Resource: "new-authz",
Identifier: authzID{Type: "dns", Value: domain},
}
res, err := c.post(ctx, c.Key, c.dir.AuthzURL, req, wantStatus(http.StatusCreated))
if err != nil {
return nil, err
}
defer res.Body.Close()
var v wireAuthz
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
}
if v.Status != StatusPending && v.Status != StatusValid {
return nil, fmt.Errorf("acme: unexpected status: %s", v.Status)
}
return v.authorization(res.Header.Get("Location")), nil
}
// GetAuthorization retrieves an authorization identified by the given URL.
//
// If a caller needs to poll an authorization until its status is final,
// see the WaitAuthorization method.
func (c *Client) GetAuthorization(ctx context.Context, url string) (*Authorization, error) {
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
if err != nil {
return nil, err
}
defer res.Body.Close()
var v wireAuthz
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
}
return v.authorization(url), nil
}
// RevokeAuthorization relinquishes an existing authorization identified
// by the given URL.
// The url argument is an Authorization.URI value.
//
// If successful, the caller will be required to obtain a new authorization
// using the Authorize method before being able to request a new certificate
// for the domain associated with the authorization.
//
// It does not revoke existing certificates.
func (c *Client) RevokeAuthorization(ctx context.Context, url string) error {
req := struct {
Resource string `json:"resource"`
Status string `json:"status"`
Delete bool `json:"delete"`
}{
Resource: "authz",
Status: "deactivated",
Delete: true,
}
res, err := c.post(ctx, c.Key, url, req, wantStatus(http.StatusOK))
if err != nil {
return err
}
defer res.Body.Close()
return nil
}
// WaitAuthorization polls an authorization at the given URL
// until it is in one of the final states, StatusValid or StatusInvalid,
// the ACME CA responded with a 4xx error code, or the context is done.
//
// It returns a non-nil Authorization only if its Status is StatusValid.
// In all other cases WaitAuthorization returns an error.
// If the Status is StatusInvalid, the returned error is of type *AuthorizationError.
func (c *Client) WaitAuthorization(ctx context.Context, url string) (*Authorization, error) {
for {
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
if err != nil {
return nil, err
}
var raw wireAuthz
err = json.NewDecoder(res.Body).Decode(&raw)
res.Body.Close()
switch {
case err != nil:
// Skip and retry.
case raw.Status == StatusValid:
return raw.authorization(url), nil
case raw.Status == StatusInvalid:
return nil, raw.error(url)
}
// Exponential backoff is implemented in c.get above.
// This is just to prevent continuously hitting the CA
// while waiting for a final authorization status.
d := retryAfter(res.Header.Get("Retry-After"))
if d == 0 {
// Given that the fastest challenges TLS-SNI and HTTP-01
// require a CA to make at least 1 network round trip
// and most likely persist a challenge state,
// this default delay seems reasonable.
d = time.Second
}
t := time.NewTimer(d)
select {
case <-ctx.Done():
t.Stop()
return nil, ctx.Err()
case <-t.C:
// Retry.
}
}
}
// GetChallenge retrieves the current status of an challenge.
//
// A client typically polls a challenge status using this method.
func (c *Client) GetChallenge(ctx context.Context, url string) (*Challenge, error) {
res, err := c.get(ctx, url, wantStatus(http.StatusOK, http.StatusAccepted))
if err != nil {
return nil, err
}
defer res.Body.Close()
v := wireChallenge{URI: url}
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
}
return v.challenge(), nil
}
// Accept informs the server that the client accepts one of its challenges
// previously obtained with c.Authorize.
//
// The server will then perform the validation asynchronously.
func (c *Client) Accept(ctx context.Context, chal *Challenge) (*Challenge, error) {
auth, err := keyAuth(c.Key.Public(), chal.Token)
if err != nil {
return nil, err
}
req := struct {
Resource string `json:"resource"`
Type string `json:"type"`
Auth string `json:"keyAuthorization"`
}{
Resource: "challenge",
Type: chal.Type,
Auth: auth,
}
res, err := c.post(ctx, c.Key, chal.URI, req, wantStatus(
http.StatusOK, // according to the spec
http.StatusAccepted, // Let's Encrypt: see https://goo.gl/WsJ7VT (acme-divergences.md)
))
if err != nil {
return nil, err
}
defer res.Body.Close()
var v wireChallenge
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
}
return v.challenge(), nil
}
// DNS01ChallengeRecord returns a DNS record value for a dns-01 challenge response.
// A TXT record containing the returned value must be provisioned under
// "_acme-challenge" name of the domain being validated.
//
// The token argument is a Challenge.Token value.
func (c *Client) DNS01ChallengeRecord(token string) (string, error) {
ka, err := keyAuth(c.Key.Public(), token)
if err != nil {
return "", err
}
b := sha256.Sum256([]byte(ka))
return base64.RawURLEncoding.EncodeToString(b[:]), nil
}
// HTTP01ChallengeResponse returns the response for an http-01 challenge.
// Servers should respond with the value to HTTP requests at the URL path
// provided by HTTP01ChallengePath to validate the challenge and prove control
// over a domain name.
//
// The token argument is a Challenge.Token value.
func (c *Client) HTTP01ChallengeResponse(token string) (string, error) {
return keyAuth(c.Key.Public(), token)
}
// HTTP01ChallengePath returns the URL path at which the response for an http-01 challenge
// should be provided by the servers.
// The response value can be obtained with HTTP01ChallengeResponse.
//
// The token argument is a Challenge.Token value.
func (c *Client) HTTP01ChallengePath(token string) string {
return "/.well-known/acme-challenge/" + token
}
// TLSSNI01ChallengeCert creates a certificate for TLS-SNI-01 challenge response.
// Servers can present the certificate to validate the challenge and prove control
// over a domain name.
//
// The implementation is incomplete in that the returned value is a single certificate,
// computed only for Z0 of the key authorization. ACME CAs are expected to update
// their implementations to use the newer version, TLS-SNI-02.
// For more details on TLS-SNI-01 see https://tools.ietf.org/html/draft-ietf-acme-acme-01#section-7.3.
//
// The token argument is a Challenge.Token value.
// If a WithKey option is provided, its private part signs the returned cert,
// and the public part is used to specify the signee.
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
//
// The returned certificate is valid for the next 24 hours and must be presented only when
// the server name of the TLS ClientHello matches exactly the returned name value.
func (c *Client) TLSSNI01ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) {
ka, err := keyAuth(c.Key.Public(), token)
if err != nil {
return tls.Certificate{}, "", err
}
b := sha256.Sum256([]byte(ka))
h := hex.EncodeToString(b[:])
name = fmt.Sprintf("%s.%s.acme.invalid", h[:32], h[32:])
cert, err = tlsChallengeCert([]string{name}, opt)
if err != nil {
return tls.Certificate{}, "", err
}
return cert, name, nil
}
// TLSSNI02ChallengeCert creates a certificate for TLS-SNI-02 challenge response.
// Servers can present the certificate to validate the challenge and prove control
// over a domain name. For more details on TLS-SNI-02 see
// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-7.3.
//
// The token argument is a Challenge.Token value.
// If a WithKey option is provided, its private part signs the returned cert,
// and the public part is used to specify the signee.
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
//
// The returned certificate is valid for the next 24 hours and must be presented only when
// the server name in the TLS ClientHello matches exactly the returned name value.
func (c *Client) TLSSNI02ChallengeCert(token string, opt ...CertOption) (cert tls.Certificate, name string, err error) {
b := sha256.Sum256([]byte(token))
h := hex.EncodeToString(b[:])
sanA := fmt.Sprintf("%s.%s.token.acme.invalid", h[:32], h[32:])
ka, err := keyAuth(c.Key.Public(), token)
if err != nil {
return tls.Certificate{}, "", err
}
b = sha256.Sum256([]byte(ka))
h = hex.EncodeToString(b[:])
sanB := fmt.Sprintf("%s.%s.ka.acme.invalid", h[:32], h[32:])
cert, err = tlsChallengeCert([]string{sanA, sanB}, opt)
if err != nil {
return tls.Certificate{}, "", err
}
return cert, sanA, nil
}
// TLSALPN01ChallengeCert creates a certificate for TLS-ALPN-01 challenge response.
// Servers can present the certificate to validate the challenge and prove control
// over a domain name. For more details on TLS-ALPN-01 see
// https://tools.ietf.org/html/draft-shoemaker-acme-tls-alpn-00#section-3
//
// The token argument is a Challenge.Token value.
// If a WithKey option is provided, its private part signs the returned cert,
// and the public part is used to specify the signee.
// If no WithKey option is provided, a new ECDSA key is generated using P-256 curve.
//
// The returned certificate is valid for the next 24 hours and must be presented only when
// the server name in the TLS ClientHello matches the domain, and the special acme-tls/1 ALPN protocol
// has been specified.
func (c *Client) TLSALPN01ChallengeCert(token, domain string, opt ...CertOption) (cert tls.Certificate, err error) {
ka, err := keyAuth(c.Key.Public(), token)
if err != nil {
return tls.Certificate{}, err
}
shasum := sha256.Sum256([]byte(ka))
extValue, err := asn1.Marshal(shasum[:])
if err != nil {
return tls.Certificate{}, err
}
acmeExtension := pkix.Extension{
Id: idPeACMEIdentifierV1,
Critical: true,
Value: extValue,
}
tmpl := defaultTLSChallengeCertTemplate()
var newOpt []CertOption
for _, o := range opt {
switch o := o.(type) {
case *certOptTemplate:
t := *(*x509.Certificate)(o) // shallow copy is ok
tmpl = &t
default:
newOpt = append(newOpt, o)
}
}
tmpl.ExtraExtensions = append(tmpl.ExtraExtensions, acmeExtension)
newOpt = append(newOpt, WithTemplate(tmpl))
return tlsChallengeCert([]string{domain}, newOpt)
}
// doReg sends all types of registration requests.
// The type of request is identified by typ argument, which is a "resource"
// in the ACME spec terms.
//
// A non-nil acct argument indicates whether the intention is to mutate data
// of the Account. Only Contact and Agreement of its fields are used
// in such cases.
func (c *Client) doReg(ctx context.Context, url string, typ string, acct *Account) (*Account, error) {
req := struct {
Resource string `json:"resource"`
Contact []string `json:"contact,omitempty"`
Agreement string `json:"agreement,omitempty"`
}{
Resource: typ,
}
if acct != nil {
req.Contact = acct.Contact
req.Agreement = acct.AgreedTerms
}
res, err := c.post(ctx, c.Key, url, req, wantStatus(
http.StatusOK, // updates and deletes
http.StatusCreated, // new account creation
http.StatusAccepted, // Let's Encrypt divergent implementation
))
if err != nil {
return nil, err
}
defer res.Body.Close()
var v struct {
Contact []string
Agreement string
Authorizations string
Certificates string
}
if err := json.NewDecoder(res.Body).Decode(&v); err != nil {
return nil, fmt.Errorf("acme: invalid response: %v", err)
}
var tos string
if v := linkHeader(res.Header, "terms-of-service"); len(v) > 0 {
tos = v[0]
}
var authz string
if v := linkHeader(res.Header, "next"); len(v) > 0 {
authz = v[0]
}
return &Account{
URI: res.Header.Get("Location"),
Contact: v.Contact,
AgreedTerms: v.Agreement,
CurrentTerms: tos,
Authz: authz,
Authorizations: v.Authorizations,
Certificates: v.Certificates,
}, nil
}
// popNonce returns a nonce value previously stored with c.addNonce
// or fetches a fresh one from the given URL.
func (c *Client) popNonce(ctx context.Context, url string) (string, error) {
c.noncesMu.Lock()
defer c.noncesMu.Unlock()
if len(c.nonces) == 0 {
return c.fetchNonce(ctx, url)
}
var nonce string
for nonce = range c.nonces {
delete(c.nonces, nonce)
break
}
return nonce, nil
}
// clearNonces clears any stored nonces
func (c *Client) clearNonces() {
c.noncesMu.Lock()
defer c.noncesMu.Unlock()
c.nonces = make(map[string]struct{})
}
// addNonce stores a nonce value found in h (if any) for future use.
func (c *Client) addNonce(h http.Header) {
v := nonceFromHeader(h)
if v == "" {
return
}
c.noncesMu.Lock()
defer c.noncesMu.Unlock()
if len(c.nonces) >= maxNonces {
return
}
if c.nonces == nil {
c.nonces = make(map[string]struct{})
}
c.nonces[v] = struct{}{}
}
func (c *Client) fetchNonce(ctx context.Context, url string) (string, error) {
r, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return "", err
}
resp, err := c.doNoRetry(ctx, r)
if err != nil {
return "", err
}
defer resp.Body.Close()
nonce := nonceFromHeader(resp.Header)
if nonce == "" {
if resp.StatusCode > 299 {
return "", responseError(resp)
}
return "", errors.New("acme: nonce not found")
}
return nonce, nil
}
func nonceFromHeader(h http.Header) string {
return h.Get("Replay-Nonce")
}
func (c *Client) responseCert(ctx context.Context, res *http.Response, bundle bool) ([][]byte, error) {
b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1))
if err != nil {
return nil, fmt.Errorf("acme: response stream: %v", err)
}
if len(b) > maxCertSize {
return nil, errors.New("acme: certificate is too big")
}
cert := [][]byte{b}
if !bundle {
return cert, nil
}
// Append CA chain cert(s).
// At least one is required according to the spec:
// https://tools.ietf.org/html/draft-ietf-acme-acme-03#section-6.3.1
up := linkHeader(res.Header, "up")
if len(up) == 0 {
return nil, errors.New("acme: rel=up link not found")
}
if len(up) > maxChainLen {
return nil, errors.New("acme: rel=up link is too large")
}
for _, url := range up {
cc, err := c.chainCert(ctx, url, 0)
if err != nil {
return nil, err
}
cert = append(cert, cc...)
}
return cert, nil
}
// chainCert fetches CA certificate chain recursively by following "up" links.
// Each recursive call increments the depth by 1, resulting in an error
// if the recursion level reaches maxChainLen.
//
// First chainCert call starts with depth of 0.
func (c *Client) chainCert(ctx context.Context, url string, depth int) ([][]byte, error) {
if depth >= maxChainLen {
return nil, errors.New("acme: certificate chain is too deep")
}
res, err := c.get(ctx, url, wantStatus(http.StatusOK))
if err != nil {
return nil, err
}
defer res.Body.Close()
b, err := ioutil.ReadAll(io.LimitReader(res.Body, maxCertSize+1))
if err != nil {
return nil, err
}
if len(b) > maxCertSize {
return nil, errors.New("acme: certificate is too big")
}
chain := [][]byte{b}
uplink := linkHeader(res.Header, "up")
if len(uplink) > maxChainLen {
return nil, errors.New("acme: certificate chain is too large")
}
for _, up := range uplink {
cc, err := c.chainCert(ctx, up, depth+1)
if err != nil {
return nil, err
}
chain = append(chain, cc...)
}
return chain, nil
}
// linkHeader returns URI-Reference values of all Link headers
// with relation-type rel.
// See https://tools.ietf.org/html/rfc5988#section-5 for details.
func linkHeader(h http.Header, rel string) []string {
var links []string
for _, v := range h["Link"] {
parts := strings.Split(v, ";")
for _, p := range parts {
p = strings.TrimSpace(p)
if !strings.HasPrefix(p, "rel=") {
continue
}
if v := strings.Trim(p[4:], `"`); v == rel {
links = append(links, strings.Trim(parts[0], "<>"))
}
}
}
return links
}
// keyAuth generates a key authorization string for a given token.
func keyAuth(pub crypto.PublicKey, token string) (string, error) {
th, err := JWKThumbprint(pub)
if err != nil {
return "", err
}
return fmt.Sprintf("%s.%s", token, th), nil
}
// defaultTLSChallengeCertTemplate is a template used to create challenge certs for TLS challenges.
func defaultTLSChallengeCertTemplate() *x509.Certificate {
return &x509.Certificate{
SerialNumber: big.NewInt(1),
NotBefore: time.Now(),
NotAfter: time.Now().Add(24 * time.Hour),
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
}
}
// tlsChallengeCert creates a temporary certificate for TLS-SNI challenges
// with the given SANs and auto-generated public/private key pair.
// The Subject Common Name is set to the first SAN to aid debugging.
// To create a cert with a custom key pair, specify WithKey option.
func tlsChallengeCert(san []string, opt []CertOption) (tls.Certificate, error) {
var key crypto.Signer
tmpl := defaultTLSChallengeCertTemplate()
for _, o := range opt {
switch o := o.(type) {
case *certOptKey:
if key != nil {
return tls.Certificate{}, errors.New("acme: duplicate key option")
}
key = o.key
case *certOptTemplate:
t := *(*x509.Certificate)(o) // shallow copy is ok
tmpl = &t
default:
// package's fault, if we let this happen:
panic(fmt.Sprintf("unsupported option type %T", o))
}
}
if key == nil {
var err error
if key, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil {
return tls.Certificate{}, err
}
}
tmpl.DNSNames = san
if len(san) > 0 {
tmpl.Subject.CommonName = san[0]
}
der, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
if err != nil {
return tls.Certificate{}, err
}
return tls.Certificate{
Certificate: [][]byte{der},
PrivateKey: key,
}, nil
}
// encodePEM returns b encoded as PEM with block of type typ.
func encodePEM(typ string, b []byte) []byte {
pb := &pem.Block{Type: typ, Bytes: b}
return pem.EncodeToMemory(pb)
}
// timeNow is useful for testing for fixed current time.
var timeNow = time.Now

1127
vendor/golang.org/x/crypto/acme/autocert/autocert.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

130
vendor/golang.org/x/crypto/acme/autocert/cache.go generated vendored Normal file
View File

@ -0,0 +1,130 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package autocert
import (
"context"
"errors"
"io/ioutil"
"os"
"path/filepath"
)
// ErrCacheMiss is returned when a certificate is not found in cache.
var ErrCacheMiss = errors.New("acme/autocert: certificate cache miss")
// Cache is used by Manager to store and retrieve previously obtained certificates
// and other account data as opaque blobs.
//
// Cache implementations should not rely on the key naming pattern. Keys can
// include any printable ASCII characters, except the following: \/:*?"<>|
type Cache interface {
// Get returns a certificate data for the specified key.
// If there's no such key, Get returns ErrCacheMiss.
Get(ctx context.Context, key string) ([]byte, error)
// Put stores the data in the cache under the specified key.
// Underlying implementations may use any data storage format,
// as long as the reverse operation, Get, results in the original data.
Put(ctx context.Context, key string, data []byte) error
// Delete removes a certificate data from the cache under the specified key.
// If there's no such key in the cache, Delete returns nil.
Delete(ctx context.Context, key string) error
}
// DirCache implements Cache using a directory on the local filesystem.
// If the directory does not exist, it will be created with 0700 permissions.
type DirCache string
// Get reads a certificate data from the specified file name.
func (d DirCache) Get(ctx context.Context, name string) ([]byte, error) {
name = filepath.Join(string(d), name)
var (
data []byte
err error
done = make(chan struct{})
)
go func() {
data, err = ioutil.ReadFile(name)
close(done)
}()
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-done:
}
if os.IsNotExist(err) {
return nil, ErrCacheMiss
}
return data, err
}
// Put writes the certificate data to the specified file name.
// The file will be created with 0600 permissions.
func (d DirCache) Put(ctx context.Context, name string, data []byte) error {
if err := os.MkdirAll(string(d), 0700); err != nil {
return err
}
done := make(chan struct{})
var err error
go func() {
defer close(done)
var tmp string
if tmp, err = d.writeTempFile(name, data); err != nil {
return
}
select {
case <-ctx.Done():
// Don't overwrite the file if the context was canceled.
default:
newName := filepath.Join(string(d), name)
err = os.Rename(tmp, newName)
}
}()
select {
case <-ctx.Done():
return ctx.Err()
case <-done:
}
return err
}
// Delete removes the specified file name.
func (d DirCache) Delete(ctx context.Context, name string) error {
name = filepath.Join(string(d), name)
var (
err error
done = make(chan struct{})
)
go func() {
err = os.Remove(name)
close(done)
}()
select {
case <-ctx.Done():
return ctx.Err()
case <-done:
}
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// writeTempFile writes b to a temporary file, closes the file and returns its path.
func (d DirCache) writeTempFile(prefix string, b []byte) (string, error) {
// TempFile uses 0600 permissions
f, err := ioutil.TempFile(string(d), prefix)
if err != nil {
return "", err
}
if _, err := f.Write(b); err != nil {
f.Close()
return "", err
}
return f.Name(), f.Close()
}

157
vendor/golang.org/x/crypto/acme/autocert/listener.go generated vendored Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package autocert
import (
"crypto/tls"
"log"
"net"
"os"
"path/filepath"
"runtime"
"time"
)
// NewListener returns a net.Listener that listens on the standard TLS
// port (443) on all interfaces and returns *tls.Conn connections with
// LetsEncrypt certificates for the provided domain or domains.
//
// It enables one-line HTTPS servers:
//
// log.Fatal(http.Serve(autocert.NewListener("example.com"), handler))
//
// NewListener is a convenience function for a common configuration.
// More complex or custom configurations can use the autocert.Manager
// type instead.
//
// Use of this function implies acceptance of the LetsEncrypt Terms of
// Service. If domains is not empty, the provided domains are passed
// to HostWhitelist. If domains is empty, the listener will do
// LetsEncrypt challenges for any requested domain, which is not
// recommended.
//
// Certificates are cached in a "golang-autocert" directory under an
// operating system-specific cache or temp directory. This may not
// be suitable for servers spanning multiple machines.
//
// The returned listener uses a *tls.Config that enables HTTP/2, and
// should only be used with servers that support HTTP/2.
//
// The returned Listener also enables TCP keep-alives on the accepted
// connections. The returned *tls.Conn are returned before their TLS
// handshake has completed.
func NewListener(domains ...string) net.Listener {
m := &Manager{
Prompt: AcceptTOS,
}
if len(domains) > 0 {
m.HostPolicy = HostWhitelist(domains...)
}
dir := cacheDir()
if err := os.MkdirAll(dir, 0700); err != nil {
log.Printf("warning: autocert.NewListener not using a cache: %v", err)
} else {
m.Cache = DirCache(dir)
}
return m.Listener()
}
// Listener listens on the standard TLS port (443) on all interfaces
// and returns a net.Listener returning *tls.Conn connections.
//
// The returned listener uses a *tls.Config that enables HTTP/2, and
// should only be used with servers that support HTTP/2.
//
// The returned Listener also enables TCP keep-alives on the accepted
// connections. The returned *tls.Conn are returned before their TLS
// handshake has completed.
//
// Unlike NewListener, it is the caller's responsibility to initialize
// the Manager m's Prompt, Cache, HostPolicy, and other desired options.
func (m *Manager) Listener() net.Listener {
ln := &listener{
m: m,
conf: m.TLSConfig(),
}
ln.tcpListener, ln.tcpListenErr = net.Listen("tcp", ":443")
return ln
}
type listener struct {
m *Manager
conf *tls.Config
tcpListener net.Listener
tcpListenErr error
}
func (ln *listener) Accept() (net.Conn, error) {
if ln.tcpListenErr != nil {
return nil, ln.tcpListenErr
}
conn, err := ln.tcpListener.Accept()
if err != nil {
return nil, err
}
tcpConn := conn.(*net.TCPConn)
// Because Listener is a convenience function, help out with
// this too. This is not possible for the caller to set once
// we return a *tcp.Conn wrapping an inaccessible net.Conn.
// If callers don't want this, they can do things the manual
// way and tweak as needed. But this is what net/http does
// itself, so copy that. If net/http changes, we can change
// here too.
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(3 * time.Minute)
return tls.Server(tcpConn, ln.conf), nil
}
func (ln *listener) Addr() net.Addr {
if ln.tcpListener != nil {
return ln.tcpListener.Addr()
}
// net.Listen failed. Return something non-nil in case callers
// call Addr before Accept:
return &net.TCPAddr{IP: net.IP{0, 0, 0, 0}, Port: 443}
}
func (ln *listener) Close() error {
if ln.tcpListenErr != nil {
return ln.tcpListenErr
}
return ln.tcpListener.Close()
}
func homeDir() string {
if runtime.GOOS == "windows" {
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
if h := os.Getenv("HOME"); h != "" {
return h
}
return "/"
}
func cacheDir() string {
const base = "golang-autocert"
switch runtime.GOOS {
case "darwin":
return filepath.Join(homeDir(), "Library", "Caches", base)
case "windows":
for _, ev := range []string{"APPDATA", "CSIDL_APPDATA", "TEMP", "TMP"} {
if v := os.Getenv(ev); v != "" {
return filepath.Join(v, base)
}
}
// Worst case:
return filepath.Join(homeDir(), base)
}
if xdg := os.Getenv("XDG_CACHE_HOME"); xdg != "" {
return filepath.Join(xdg, base)
}
return filepath.Join(homeDir(), ".cache", base)
}

141
vendor/golang.org/x/crypto/acme/autocert/renewal.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package autocert
import (
"context"
"crypto"
"sync"
"time"
)
// renewJitter is the maximum deviation from Manager.RenewBefore.
const renewJitter = time.Hour
// domainRenewal tracks the state used by the periodic timers
// renewing a single domain's cert.
type domainRenewal struct {
m *Manager
ck certKey
key crypto.Signer
timerMu sync.Mutex
timer *time.Timer
}
// start starts a cert renewal timer at the time
// defined by the certificate expiration time exp.
//
// If the timer is already started, calling start is a noop.
func (dr *domainRenewal) start(exp time.Time) {
dr.timerMu.Lock()
defer dr.timerMu.Unlock()
if dr.timer != nil {
return
}
dr.timer = time.AfterFunc(dr.next(exp), dr.renew)
}
// stop stops the cert renewal timer.
// If the timer is already stopped, calling stop is a noop.
func (dr *domainRenewal) stop() {
dr.timerMu.Lock()
defer dr.timerMu.Unlock()
if dr.timer == nil {
return
}
dr.timer.Stop()
dr.timer = nil
}
// renew is called periodically by a timer.
// The first renew call is kicked off by dr.start.
func (dr *domainRenewal) renew() {
dr.timerMu.Lock()
defer dr.timerMu.Unlock()
if dr.timer == nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
// TODO: rotate dr.key at some point?
next, err := dr.do(ctx)
if err != nil {
next = renewJitter / 2
next += time.Duration(pseudoRand.int63n(int64(next)))
}
dr.timer = time.AfterFunc(next, dr.renew)
testDidRenewLoop(next, err)
}
// updateState locks and replaces the relevant Manager.state item with the given
// state. It additionally updates dr.key with the given state's key.
func (dr *domainRenewal) updateState(state *certState) {
dr.m.stateMu.Lock()
defer dr.m.stateMu.Unlock()
dr.key = state.key
dr.m.state[dr.ck] = state
}
// do is similar to Manager.createCert but it doesn't lock a Manager.state item.
// Instead, it requests a new certificate independently and, upon success,
// replaces dr.m.state item with a new one and updates cache for the given domain.
//
// It may lock and update the Manager.state if the expiration date of the currently
// cached cert is far enough in the future.
//
// The returned value is a time interval after which the renewal should occur again.
func (dr *domainRenewal) do(ctx context.Context) (time.Duration, error) {
// a race is likely unavoidable in a distributed environment
// but we try nonetheless
if tlscert, err := dr.m.cacheGet(ctx, dr.ck); err == nil {
next := dr.next(tlscert.Leaf.NotAfter)
if next > dr.m.renewBefore()+renewJitter {
signer, ok := tlscert.PrivateKey.(crypto.Signer)
if ok {
state := &certState{
key: signer,
cert: tlscert.Certificate,
leaf: tlscert.Leaf,
}
dr.updateState(state)
return next, nil
}
}
}
der, leaf, err := dr.m.authorizedCert(ctx, dr.key, dr.ck)
if err != nil {
return 0, err
}
state := &certState{
key: dr.key,
cert: der,
leaf: leaf,
}
tlscert, err := state.tlscert()
if err != nil {
return 0, err
}
if err := dr.m.cachePut(ctx, dr.ck, tlscert); err != nil {
return 0, err
}
dr.updateState(state)
return dr.next(leaf.NotAfter), nil
}
func (dr *domainRenewal) next(expiry time.Time) time.Duration {
d := expiry.Sub(timeNow()) - dr.m.renewBefore()
// add a bit of randomness to renew deadline
n := pseudoRand.int63n(int64(renewJitter))
d -= time.Duration(n)
if d < 0 {
return 0
}
return d
}
var testDidRenewLoop = func(next time.Duration, err error) {}

281
vendor/golang.org/x/crypto/acme/http.go generated vendored Normal file
View File

@ -0,0 +1,281 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package acme
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"net/http"
"strconv"
"strings"
"time"
)
// retryTimer encapsulates common logic for retrying unsuccessful requests.
// It is not safe for concurrent use.
type retryTimer struct {
// backoffFn provides backoff delay sequence for retries.
// See Client.RetryBackoff doc comment.
backoffFn func(n int, r *http.Request, res *http.Response) time.Duration
// n is the current retry attempt.
n int
}
func (t *retryTimer) inc() {
t.n++
}
// backoff pauses the current goroutine as described in Client.RetryBackoff.
func (t *retryTimer) backoff(ctx context.Context, r *http.Request, res *http.Response) error {
d := t.backoffFn(t.n, r, res)
if d <= 0 {
return fmt.Errorf("acme: no more retries for %s; tried %d time(s)", r.URL, t.n)
}
wakeup := time.NewTimer(d)
defer wakeup.Stop()
select {
case <-ctx.Done():
return ctx.Err()
case <-wakeup.C:
return nil
}
}
func (c *Client) retryTimer() *retryTimer {
f := c.RetryBackoff
if f == nil {
f = defaultBackoff
}
return &retryTimer{backoffFn: f}
}
// defaultBackoff provides default Client.RetryBackoff implementation
// using a truncated exponential backoff algorithm,
// as described in Client.RetryBackoff.
//
// The n argument is always bounded between 1 and 30.
// The returned value is always greater than 0.
func defaultBackoff(n int, r *http.Request, res *http.Response) time.Duration {
const max = 10 * time.Second
var jitter time.Duration
if x, err := rand.Int(rand.Reader, big.NewInt(1000)); err == nil {
// Set the minimum to 1ms to avoid a case where
// an invalid Retry-After value is parsed into 0 below,
// resulting in the 0 returned value which would unintentionally
// stop the retries.
jitter = (1 + time.Duration(x.Int64())) * time.Millisecond
}
if v, ok := res.Header["Retry-After"]; ok {
return retryAfter(v[0]) + jitter
}
if n < 1 {
n = 1
}
if n > 30 {
n = 30
}
d := time.Duration(1<<uint(n-1))*time.Second + jitter
if d > max {
return max
}
return d
}
// retryAfter parses a Retry-After HTTP header value,
// trying to convert v into an int (seconds) or use http.ParseTime otherwise.
// It returns zero value if v cannot be parsed.
func retryAfter(v string) time.Duration {
if i, err := strconv.Atoi(v); err == nil {
return time.Duration(i) * time.Second
}
t, err := http.ParseTime(v)
if err != nil {
return 0
}
return t.Sub(timeNow())
}
// resOkay is a function that reports whether the provided response is okay.
// It is expected to keep the response body unread.
type resOkay func(*http.Response) bool
// wantStatus returns a function which reports whether the code
// matches the status code of a response.
func wantStatus(codes ...int) resOkay {
return func(res *http.Response) bool {
for _, code := range codes {
if code == res.StatusCode {
return true
}
}
return false
}
}
// get issues an unsigned GET request to the specified URL.
// It returns a non-error value only when ok reports true.
//
// get retries unsuccessful attempts according to c.RetryBackoff
// until the context is done or a non-retriable error is received.
func (c *Client) get(ctx context.Context, url string, ok resOkay) (*http.Response, error) {
retry := c.retryTimer()
for {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
res, err := c.doNoRetry(ctx, req)
switch {
case err != nil:
return nil, err
case ok(res):
return res, nil
case isRetriable(res.StatusCode):
retry.inc()
resErr := responseError(res)
res.Body.Close()
// Ignore the error value from retry.backoff
// and return the one from last retry, as received from the CA.
if retry.backoff(ctx, req, res) != nil {
return nil, resErr
}
default:
defer res.Body.Close()
return nil, responseError(res)
}
}
}
// post issues a signed POST request in JWS format using the provided key
// to the specified URL.
// It returns a non-error value only when ok reports true.
//
// post retries unsuccessful attempts according to c.RetryBackoff
// until the context is done or a non-retriable error is received.
// It uses postNoRetry to make individual requests.
func (c *Client) post(ctx context.Context, key crypto.Signer, url string, body interface{}, ok resOkay) (*http.Response, error) {
retry := c.retryTimer()
for {
res, req, err := c.postNoRetry(ctx, key, url, body)
if err != nil {
return nil, err
}
if ok(res) {
return res, nil
}
resErr := responseError(res)
res.Body.Close()
switch {
// Check for bad nonce before isRetriable because it may have been returned
// with an unretriable response code such as 400 Bad Request.
case isBadNonce(resErr):
// Consider any previously stored nonce values to be invalid.
c.clearNonces()
case !isRetriable(res.StatusCode):
return nil, resErr
}
retry.inc()
// Ignore the error value from retry.backoff
// and return the one from last retry, as received from the CA.
if err := retry.backoff(ctx, req, res); err != nil {
return nil, resErr
}
}
}
// postNoRetry signs the body with the given key and POSTs it to the provided url.
// The body argument must be JSON-serializable.
// It is used by c.post to retry unsuccessful attempts.
func (c *Client) postNoRetry(ctx context.Context, key crypto.Signer, url string, body interface{}) (*http.Response, *http.Request, error) {
nonce, err := c.popNonce(ctx, url)
if err != nil {
return nil, nil, err
}
b, err := jwsEncodeJSON(body, key, nonce)
if err != nil {
return nil, nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
if err != nil {
return nil, nil, err
}
req.Header.Set("Content-Type", "application/jose+json")
res, err := c.doNoRetry(ctx, req)
if err != nil {
return nil, nil, err
}
c.addNonce(res.Header)
return res, req, nil
}
// doNoRetry issues a request req, replacing its context (if any) with ctx.
func (c *Client) doNoRetry(ctx context.Context, req *http.Request) (*http.Response, error) {
res, err := c.httpClient().Do(req.WithContext(ctx))
if err != nil {
select {
case <-ctx.Done():
// Prefer the unadorned context error.
// (The acme package had tests assuming this, previously from ctxhttp's
// behavior, predating net/http supporting contexts natively)
// TODO(bradfitz): reconsider this in the future. But for now this
// requires no test updates.
return nil, ctx.Err()
default:
return nil, err
}
}
return res, nil
}
func (c *Client) httpClient() *http.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
return http.DefaultClient
}
// isBadNonce reports whether err is an ACME "badnonce" error.
func isBadNonce(err error) bool {
// According to the spec badNonce is urn:ietf:params:acme:error:badNonce.
// However, ACME servers in the wild return their versions of the error.
// See https://tools.ietf.org/html/draft-ietf-acme-acme-02#section-5.4
// and https://github.com/letsencrypt/boulder/blob/0e07eacb/docs/acme-divergences.md#section-66.
ae, ok := err.(*Error)
return ok && strings.HasSuffix(strings.ToLower(ae.ProblemType), ":badnonce")
}
// isRetriable reports whether a request can be retried
// based on the response status code.
//
// Note that a "bad nonce" error is returned with a non-retriable 400 Bad Request code.
// Callers should parse the response and check with isBadNonce.
func isRetriable(code int) bool {
return code <= 399 || code >= 500 || code == http.StatusTooManyRequests
}
// responseError creates an error of Error type from resp.
func responseError(resp *http.Response) error {
// don't care if ReadAll returns an error:
// json.Unmarshal will fail in that case anyway
b, _ := ioutil.ReadAll(resp.Body)
e := &wireError{Status: resp.StatusCode}
if err := json.Unmarshal(b, e); err != nil {
// this is not a regular error response:
// populate detail with anything we received,
// e.Status will already contain HTTP response code value
e.Detail = string(b)
if e.Detail == "" {
e.Detail = resp.Status
}
}
return e.error(resp.Header)
}

153
vendor/golang.org/x/crypto/acme/jws.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package acme
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
_ "crypto/sha512" // need for EC keys
"encoding/base64"
"encoding/json"
"fmt"
"math/big"
)
// jwsEncodeJSON signs claimset using provided key and a nonce.
// The result is serialized in JSON format.
// See https://tools.ietf.org/html/rfc7515#section-7.
func jwsEncodeJSON(claimset interface{}, key crypto.Signer, nonce string) ([]byte, error) {
jwk, err := jwkEncode(key.Public())
if err != nil {
return nil, err
}
alg, sha := jwsHasher(key)
if alg == "" || !sha.Available() {
return nil, ErrUnsupportedKey
}
phead := fmt.Sprintf(`{"alg":%q,"jwk":%s,"nonce":%q}`, alg, jwk, nonce)
phead = base64.RawURLEncoding.EncodeToString([]byte(phead))
cs, err := json.Marshal(claimset)
if err != nil {
return nil, err
}
payload := base64.RawURLEncoding.EncodeToString(cs)
hash := sha.New()
hash.Write([]byte(phead + "." + payload))
sig, err := jwsSign(key, sha, hash.Sum(nil))
if err != nil {
return nil, err
}
enc := struct {
Protected string `json:"protected"`
Payload string `json:"payload"`
Sig string `json:"signature"`
}{
Protected: phead,
Payload: payload,
Sig: base64.RawURLEncoding.EncodeToString(sig),
}
return json.Marshal(&enc)
}
// jwkEncode encodes public part of an RSA or ECDSA key into a JWK.
// The result is also suitable for creating a JWK thumbprint.
// https://tools.ietf.org/html/rfc7517
func jwkEncode(pub crypto.PublicKey) (string, error) {
switch pub := pub.(type) {
case *rsa.PublicKey:
// https://tools.ietf.org/html/rfc7518#section-6.3.1
n := pub.N
e := big.NewInt(int64(pub.E))
// Field order is important.
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
return fmt.Sprintf(`{"e":"%s","kty":"RSA","n":"%s"}`,
base64.RawURLEncoding.EncodeToString(e.Bytes()),
base64.RawURLEncoding.EncodeToString(n.Bytes()),
), nil
case *ecdsa.PublicKey:
// https://tools.ietf.org/html/rfc7518#section-6.2.1
p := pub.Curve.Params()
n := p.BitSize / 8
if p.BitSize%8 != 0 {
n++
}
x := pub.X.Bytes()
if n > len(x) {
x = append(make([]byte, n-len(x)), x...)
}
y := pub.Y.Bytes()
if n > len(y) {
y = append(make([]byte, n-len(y)), y...)
}
// Field order is important.
// See https://tools.ietf.org/html/rfc7638#section-3.3 for details.
return fmt.Sprintf(`{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`,
p.Name,
base64.RawURLEncoding.EncodeToString(x),
base64.RawURLEncoding.EncodeToString(y),
), nil
}
return "", ErrUnsupportedKey
}
// jwsSign signs the digest using the given key.
// It returns ErrUnsupportedKey if the key type is unknown.
// The hash is used only for RSA keys.
func jwsSign(key crypto.Signer, hash crypto.Hash, digest []byte) ([]byte, error) {
switch key := key.(type) {
case *rsa.PrivateKey:
return key.Sign(rand.Reader, digest, hash)
case *ecdsa.PrivateKey:
r, s, err := ecdsa.Sign(rand.Reader, key, digest)
if err != nil {
return nil, err
}
rb, sb := r.Bytes(), s.Bytes()
size := key.Params().BitSize / 8
if size%8 > 0 {
size++
}
sig := make([]byte, size*2)
copy(sig[size-len(rb):], rb)
copy(sig[size*2-len(sb):], sb)
return sig, nil
}
return nil, ErrUnsupportedKey
}
// jwsHasher indicates suitable JWS algorithm name and a hash function
// to use for signing a digest with the provided key.
// It returns ("", 0) if the key is not supported.
func jwsHasher(key crypto.Signer) (string, crypto.Hash) {
switch key := key.(type) {
case *rsa.PrivateKey:
return "RS256", crypto.SHA256
case *ecdsa.PrivateKey:
switch key.Params().Name {
case "P-256":
return "ES256", crypto.SHA256
case "P-384":
return "ES384", crypto.SHA384
case "P-521":
return "ES512", crypto.SHA512
}
}
return "", 0
}
// JWKThumbprint creates a JWK thumbprint out of pub
// as specified in https://tools.ietf.org/html/rfc7638.
func JWKThumbprint(pub crypto.PublicKey) (string, error) {
jwk, err := jwkEncode(pub)
if err != nil {
return "", err
}
b := sha256.Sum256([]byte(jwk))
return base64.RawURLEncoding.EncodeToString(b[:]), nil
}

329
vendor/golang.org/x/crypto/acme/types.go generated vendored Normal file
View File

@ -0,0 +1,329 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package acme
import (
"crypto"
"crypto/x509"
"errors"
"fmt"
"net/http"
"strings"
"time"
)
// ACME server response statuses used to describe Authorization and Challenge states.
const (
StatusUnknown = "unknown"
StatusPending = "pending"
StatusProcessing = "processing"
StatusValid = "valid"
StatusInvalid = "invalid"
StatusRevoked = "revoked"
)
// CRLReasonCode identifies the reason for a certificate revocation.
type CRLReasonCode int
// CRL reason codes as defined in RFC 5280.
const (
CRLReasonUnspecified CRLReasonCode = 0
CRLReasonKeyCompromise CRLReasonCode = 1
CRLReasonCACompromise CRLReasonCode = 2
CRLReasonAffiliationChanged CRLReasonCode = 3
CRLReasonSuperseded CRLReasonCode = 4
CRLReasonCessationOfOperation CRLReasonCode = 5
CRLReasonCertificateHold CRLReasonCode = 6
CRLReasonRemoveFromCRL CRLReasonCode = 8
CRLReasonPrivilegeWithdrawn CRLReasonCode = 9
CRLReasonAACompromise CRLReasonCode = 10
)
// ErrUnsupportedKey is returned when an unsupported key type is encountered.
var ErrUnsupportedKey = errors.New("acme: unknown key type; only RSA and ECDSA are supported")
// Error is an ACME error, defined in Problem Details for HTTP APIs doc
// http://tools.ietf.org/html/draft-ietf-appsawg-http-problem.
type Error struct {
// StatusCode is The HTTP status code generated by the origin server.
StatusCode int
// ProblemType is a URI reference that identifies the problem type,
// typically in a "urn:acme:error:xxx" form.
ProblemType string
// Detail is a human-readable explanation specific to this occurrence of the problem.
Detail string
// Header is the original server error response headers.
// It may be nil.
Header http.Header
}
func (e *Error) Error() string {
return fmt.Sprintf("%d %s: %s", e.StatusCode, e.ProblemType, e.Detail)
}
// AuthorizationError indicates that an authorization for an identifier
// did not succeed.
// It contains all errors from Challenge items of the failed Authorization.
type AuthorizationError struct {
// URI uniquely identifies the failed Authorization.
URI string
// Identifier is an AuthzID.Value of the failed Authorization.
Identifier string
// Errors is a collection of non-nil error values of Challenge items
// of the failed Authorization.
Errors []error
}
func (a *AuthorizationError) Error() string {
e := make([]string, len(a.Errors))
for i, err := range a.Errors {
e[i] = err.Error()
}
return fmt.Sprintf("acme: authorization error for %s: %s", a.Identifier, strings.Join(e, "; "))
}
// RateLimit reports whether err represents a rate limit error and
// any Retry-After duration returned by the server.
//
// See the following for more details on rate limiting:
// https://tools.ietf.org/html/draft-ietf-acme-acme-05#section-5.6
func RateLimit(err error) (time.Duration, bool) {
e, ok := err.(*Error)
if !ok {
return 0, false
}
// Some CA implementations may return incorrect values.
// Use case-insensitive comparison.
if !strings.HasSuffix(strings.ToLower(e.ProblemType), ":ratelimited") {
return 0, false
}
if e.Header == nil {
return 0, true
}
return retryAfter(e.Header.Get("Retry-After")), true
}
// Account is a user account. It is associated with a private key.
type Account struct {
// URI is the account unique ID, which is also a URL used to retrieve
// account data from the CA.
URI string
// Contact is a slice of contact info used during registration.
Contact []string
// The terms user has agreed to.
// A value not matching CurrentTerms indicates that the user hasn't agreed
// to the actual Terms of Service of the CA.
AgreedTerms string
// Actual terms of a CA.
CurrentTerms string
// Authz is the authorization URL used to initiate a new authz flow.
Authz string
// Authorizations is a URI from which a list of authorizations
// granted to this account can be fetched via a GET request.
Authorizations string
// Certificates is a URI from which a list of certificates
// issued for this account can be fetched via a GET request.
Certificates string
}
// Directory is ACME server discovery data.
type Directory struct {
// RegURL is an account endpoint URL, allowing for creating new
// and modifying existing accounts.
RegURL string
// AuthzURL is used to initiate Identifier Authorization flow.
AuthzURL string
// CertURL is a new certificate issuance endpoint URL.
CertURL string
// RevokeURL is used to initiate a certificate revocation flow.
RevokeURL string
// Term is a URI identifying the current terms of service.
Terms string
// Website is an HTTP or HTTPS URL locating a website
// providing more information about the ACME server.
Website string
// CAA consists of lowercase hostname elements, which the ACME server
// recognises as referring to itself for the purposes of CAA record validation
// as defined in RFC6844.
CAA []string
}
// Challenge encodes a returned CA challenge.
// Its Error field may be non-nil if the challenge is part of an Authorization
// with StatusInvalid.
type Challenge struct {
// Type is the challenge type, e.g. "http-01", "tls-sni-02", "dns-01".
Type string
// URI is where a challenge response can be posted to.
URI string
// Token is a random value that uniquely identifies the challenge.
Token string
// Status identifies the status of this challenge.
Status string
// Error indicates the reason for an authorization failure
// when this challenge was used.
// The type of a non-nil value is *Error.
Error error
}
// Authorization encodes an authorization response.
type Authorization struct {
// URI uniquely identifies a authorization.
URI string
// Status identifies the status of an authorization.
Status string
// Identifier is what the account is authorized to represent.
Identifier AuthzID
// Challenges that the client needs to fulfill in order to prove possession
// of the identifier (for pending authorizations).
// For final authorizations, the challenges that were used.
Challenges []*Challenge
// A collection of sets of challenges, each of which would be sufficient
// to prove possession of the identifier.
// Clients must complete a set of challenges that covers at least one set.
// Challenges are identified by their indices in the challenges array.
// If this field is empty, the client needs to complete all challenges.
Combinations [][]int
}
// AuthzID is an identifier that an account is authorized to represent.
type AuthzID struct {
Type string // The type of identifier, e.g. "dns".
Value string // The identifier itself, e.g. "example.org".
}
// wireAuthz is ACME JSON representation of Authorization objects.
type wireAuthz struct {
Status string
Challenges []wireChallenge
Combinations [][]int
Identifier struct {
Type string
Value string
}
}
func (z *wireAuthz) authorization(uri string) *Authorization {
a := &Authorization{
URI: uri,
Status: z.Status,
Identifier: AuthzID{Type: z.Identifier.Type, Value: z.Identifier.Value},
Combinations: z.Combinations, // shallow copy
Challenges: make([]*Challenge, len(z.Challenges)),
}
for i, v := range z.Challenges {
a.Challenges[i] = v.challenge()
}
return a
}
func (z *wireAuthz) error(uri string) *AuthorizationError {
err := &AuthorizationError{
URI: uri,
Identifier: z.Identifier.Value,
}
for _, raw := range z.Challenges {
if raw.Error != nil {
err.Errors = append(err.Errors, raw.Error.error(nil))
}
}
return err
}
// wireChallenge is ACME JSON challenge representation.
type wireChallenge struct {
URI string `json:"uri"`
Type string
Token string
Status string
Error *wireError
}
func (c *wireChallenge) challenge() *Challenge {
v := &Challenge{
URI: c.URI,
Type: c.Type,
Token: c.Token,
Status: c.Status,
}
if v.Status == "" {
v.Status = StatusPending
}
if c.Error != nil {
v.Error = c.Error.error(nil)
}
return v
}
// wireError is a subset of fields of the Problem Details object
// as described in https://tools.ietf.org/html/rfc7807#section-3.1.
type wireError struct {
Status int
Type string
Detail string
}
func (e *wireError) error(h http.Header) *Error {
return &Error{
StatusCode: e.Status,
ProblemType: e.Type,
Detail: e.Detail,
Header: h,
}
}
// CertOption is an optional argument type for the TLS ChallengeCert methods for
// customizing a temporary certificate for TLS-based challenges.
type CertOption interface {
privateCertOpt()
}
// WithKey creates an option holding a private/public key pair.
// The private part signs a certificate, and the public part represents the signee.
func WithKey(key crypto.Signer) CertOption {
return &certOptKey{key}
}
type certOptKey struct {
key crypto.Signer
}
func (*certOptKey) privateCertOpt() {}
// WithTemplate creates an option for specifying a certificate template.
// See x509.CreateCertificate for template usage details.
//
// In TLS ChallengeCert methods, the template is also used as parent,
// resulting in a self-signed certificate.
// The DNSNames field of t is always overwritten for tls-sni challenge certs.
func WithTemplate(t *x509.Certificate) CertOption {
return (*certOptTemplate)(t)
}
type certOptTemplate x509.Certificate
func (*certOptTemplate) privateCertOpt() {}

77
vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go generated vendored Normal file
View File

@ -0,0 +1,77 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
2898 / PKCS #5 v2.0.
A key derivation function is useful when encrypting data based on a password
or any other not-fully-random data. It uses a pseudorandom function to derive
a secure encryption key based on the password.
While v2.0 of the standard defines only one pseudorandom function to use,
HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
choose, you can pass the `New` functions from the different SHA packages to
pbkdf2.Key.
*/
package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
import (
"crypto/hmac"
"hash"
)
// Key derives a key from the password, salt and iteration count, returning a
// []byte of length keylen that can be used as cryptographic key. The key is
// derived based on the method described as PBKDF2 with the HMAC variant using
// the supplied hash function.
//
// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
// doing:
//
// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
//
// Remember to get a good random salt. At least 8 bytes is recommended by the
// RFC.
//
// Using a higher iteration count will increase the cost of an exhaustive
// search but will also make derivation proportionally slower.
func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
prf := hmac.New(h, password)
hashLen := prf.Size()
numBlocks := (keyLen + hashLen - 1) / hashLen
var buf [4]byte
dk := make([]byte, 0, numBlocks*hashLen)
U := make([]byte, hashLen)
for block := 1; block <= numBlocks; block++ {
// N.B.: || means concatenation, ^ means XOR
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
// U_1 = PRF(password, salt || uint(i))
prf.Reset()
prf.Write(salt)
buf[0] = byte(block >> 24)
buf[1] = byte(block >> 16)
buf[2] = byte(block >> 8)
buf[3] = byte(block)
prf.Write(buf[:4])
dk = prf.Sum(dk)
T := dk[len(dk)-hashLen:]
copy(U, T)
// U_n = PRF(password, U_(n-1))
for n := 2; n <= iter; n++ {
prf.Reset()
prf.Write(U)
U = U[:0]
U = prf.Sum(U)
for x := range U {
T[x] ^= U[x]
}
}
}
return dk[:keyLen]
}

201
vendor/gopkg.in/yaml.v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

31
vendor/gopkg.in/yaml.v2/LICENSE.libyaml generated vendored Normal file
View File

@ -0,0 +1,31 @@
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

13
vendor/gopkg.in/yaml.v2/NOTICE generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

133
vendor/gopkg.in/yaml.v2/README.md generated vendored Normal file
View File

@ -0,0 +1,133 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *gopkg.in/yaml.v2*.
To install it, run:
go get gopkg.in/yaml.v2
API documentation
-----------------
If opened in a browser, the import path itself leads to the API documentation:
* [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
API stability
-------------
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

739
vendor/gopkg.in/yaml.v2/apic.go generated vendored Normal file
View File

@ -0,0 +1,739 @@
package yaml
import (
"io"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

775
vendor/gopkg.in/yaml.v2/decode.go generated vendored Normal file
View File

@ -0,0 +1,775 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"io"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
// For an alias node, alias holds the resolved alias.
alias *node
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
doneInit bool
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
return &p
}
func newParserFromReader(r io.Reader) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
yaml_parser_set_input_reader(&p.parser, r)
return &p
}
func (p *parser) init() {
if p.doneInit {
return
}
p.expect(yaml_STREAM_START_EVENT)
p.doneInit = true
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
// expect consumes an event from the event stream and
// checks that it's of the expected type.
func (p *parser) expect(e yaml_event_type_t) {
if p.event.typ == yaml_NO_EVENT {
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
if p.event.typ != e {
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
p.fail()
}
yaml_event_delete(&p.event)
p.event.typ = yaml_NO_EVENT
}
// peek peeks at the next event in the event stream,
// puts the results into p.event and returns the event type.
func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
return p.event.typ
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
// Scanner errors don't iterate line before returning error
if p.parser.error == yaml_SCANNER_ERROR {
line++
}
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
p.init()
switch p.peek() {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + p.event.typ.String())
}
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.expect(yaml_DOCUMENT_START_EVENT)
n.children = append(n.children, p.parse())
p.expect(yaml_DOCUMENT_END_EVENT)
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
n.alias = p.doc.anchors[n.value]
if n.alias == nil {
failf("unknown anchor '%s' referenced", n.value)
}
p.expect(yaml_ALIAS_EVENT)
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.expect(yaml_SCALAR_EVENT)
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.expect(yaml_SEQUENCE_START_EVENT)
for p.peek() != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.expect(yaml_SEQUENCE_END_EVENT)
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.expect(yaml_MAPPING_START_EVENT)
for p.peek() != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.expect(yaml_MAPPING_END_EVENT)
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[*node]bool
mapType reflect.Type
terrors []string
strict bool
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
timeType = reflect.TypeOf(time.Time{})
ptrTimeType = reflect.TypeOf(&time.Time{})
)
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[*node]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
if d.aliases[n] {
// TODO this could actually be allowed in some circumstances.
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n] = true
good = d.unmarshal(n.alias, out)
delete(d.aliases, n)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) bool {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
// We've resolved to exactly the type we want, so use that.
out.Set(resolvedv)
return true
}
// Perhaps we can use the value as a TextUnmarshaler to
// set its value.
if out.CanAddr() {
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
if ok {
var text []byte
if tag == yaml_BINARY_TAG {
text = []byte(resolved.(string))
} else {
// We let any value be unmarshaled into TextUnmarshaler.
// That might be more lax than we'd like, but the
// TextUnmarshaler itself should bowl out any dubious values.
text = []byte(n.value)
}
err := u.UnmarshalText(text)
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
return true
}
if resolved != nil {
out.SetString(n.value)
return true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else if tag == yaml_TIMESTAMP_TAG {
// It looks like a timestamp but for backward compatibility
// reasons we set it as a string, so that code that unmarshals
// timestamp-like values into interface{} will continue to
// see a string and not a time.Time.
// TODO(v3) Drop this.
out.Set(reflect.ValueOf(n.value))
} else {
out.Set(reflect.ValueOf(resolved))
}
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
return true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
return true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
return true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
return true
case int64:
out.SetFloat(float64(resolved))
return true
case uint64:
out.SetFloat(float64(resolved))
return true
case float64:
out.SetFloat(resolved)
return true
}
case reflect.Struct:
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
out.Set(resolvedv)
return true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
return true
}
}
d.terror(n, tag, out)
return false
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Array:
if l != out.Len() {
failf("invalid array: want %d elements but got %d", out.Len(), l)
}
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
if out.Kind() != reflect.Array {
out.Set(out.Slice(0, j))
}
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
d.setMapIndex(n.children[i+1], out, k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
if d.strict && out.MapIndex(k) != zeroValue {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
return
}
out.SetMapIndex(k, v)
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
var doneFields []bool
if d.strict {
doneFields = make([]bool, len(sinfo.FieldsList))
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
if d.strict {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
continue
}
doneFields[info.Id] = true
}
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
d.setMapIndex(n.children[i+1], inlineMap, name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
an, ok := d.doc.anchors[n.value]
if ok && an.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
an, ok := d.doc.anchors[ni.value]
if ok && an.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

1685
vendor/gopkg.in/yaml.v2/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

362
vendor/gopkg.in/yaml.v2/encode.go generated vendored Normal file
View File

@ -0,0 +1,362 @@
package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
// doneInit holds whether the initial stream_start_event has been
// emitted.
doneInit bool
}
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
switch m := iface.(type) {
case time.Time, *time.Time:
// Although time.Time implements TextMarshaler,
// we don't want to treat it as a string for YAML
// purposes because YAML has special support for
// timestamps.
case Marshaler:
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
case encoding.TextMarshaler:
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.Type() == ptrTimeType {
e.timev(tag, in.Elem())
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
if in.Type() == timeType {
e.timev(tag, in)
} else {
e.structv(tag, in)
}
case reflect.Slice, reflect.Array:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = yaml_BINARY_TAG
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
}
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// Issue #352: When formatting, use the precision of the underlying value
precision := 64
if in.Kind() == reflect.Float32 {
precision = 32
}
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

5
vendor/gopkg.in/yaml.v2/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module "gopkg.in/yaml.v2"
require (
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
)

1095
vendor/gopkg.in/yaml.v2/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More