From 4fe72ebf69f560849d8f128de18901d872c6e3f9 Mon Sep 17 00:00:00 2001 From: bergquist Date: Wed, 13 Jan 2016 15:11:23 +0100 Subject: [PATCH] feat(macaron): upgrades macaron version --- Godeps/Godeps.json | 65 +- .../src/github.com/Unknwon/macaron/README.md | 94 -- .../Unknwon/macaron/context_test.go | 370 ----- .../src/github.com/Unknwon/macaron/gzip.go | 81 -- .../github.com/Unknwon/macaron/gzip_test.go | 65 - .../Unknwon/macaron/inject/README.md | 4 - .../Unknwon/macaron/inject/inject.goconvey | 1 - .../Unknwon/macaron/inject/inject_test.go | 174 --- .../github.com/Unknwon/macaron/logger_test.go | 67 - .../Unknwon/macaron/macaron_test.go | 218 --- .../Unknwon/macaron/recovery_test.go | 74 - .../github.com/Unknwon/macaron/render_test.go | 581 -------- .../Unknwon/macaron/response_writer_test.go | 188 --- .../Unknwon/macaron/return_handler_test.go | 69 - .../github.com/Unknwon/macaron/router_test.go | 199 --- .../github.com/Unknwon/macaron/static_test.go | 246 ---- .../src/github.com/Unknwon/macaron/tree.go | 421 ------ .../github.com/Unknwon/macaron/tree_test.go | 112 -- .../github.com/go-macaron/binding/.travis.yml | 14 + .../binding/LICENSE | 0 .../github.com/go-macaron/binding/README.md | 20 + .../binding/binding.go | 317 ++-- .../binding/errors.go | 5 +- .../github.com/go-macaron/gzip/.travis.yml | 14 + .../macaron => go-macaron/gzip}/LICENSE | 0 .../src/github.com/go-macaron/gzip/README.md | 20 + .../src/github.com/go-macaron/gzip/gzip.go | 118 ++ .../github.com/go-macaron/inject/.travis.yml | 14 + .../src/github.com/go-macaron/inject/LICENSE | 191 +++ .../github.com/go-macaron/inject/README.md | 11 + .../macaron => go-macaron}/inject/inject.go | 15 + .../session/.gitignore | 0 .../github.com/go-macaron/session/.travis.yml | 14 + .../session/LICENSE | 0 .../github.com/go-macaron/session/README.md | 20 + .../session/couchbase/couchbase.go | 4 +- .../session/file.go | 20 +- .../session/ledis/ledis.go | 4 +- .../session/ledis/ledis.goconvey | 0 .../session/memcache/memcache.go | 4 +- .../session/memcache/memcache.goconvey | 0 .../session/memory.go | 429 +++--- .../session/mysql/mysql.go | 4 +- .../session/mysql/mysql.goconvey | 0 .../session/nodb/nodb.go | 4 +- .../session/nodb/nodb.goconvey | 0 .../session/postgres/postgres.go | 4 +- .../session/postgres/postgres.goconvey | 0 .../session/redis/redis.go | 46 +- .../session/redis/redis.goconvey | 0 .../session/session.go | 8 +- .../session/utils.go | 13 +- .../src/github.com/jtolds/gls/LICENSE | 18 - .../src/github.com/jtolds/gls/README.md | 64 - .../src/github.com/jtolds/gls/context.go | 150 -- .../src/github.com/jtolds/gls/context_test.go | 139 -- .../src/github.com/jtolds/gls/gen_sym.go | 13 - .../src/github.com/jtolds/gls/id_pool.go | 34 - .../src/github.com/jtolds/gls/stack_tags.go | 93 -- .../src/github.com/klauspost/compress/LICENSE | 27 + .../klauspost/compress/flate/copy.go | 32 + .../klauspost/compress/flate/crc32_amd64.go | 39 + .../klauspost/compress/flate/crc32_amd64.s | 212 +++ .../klauspost/compress/flate/crc32_noasm.go | 34 + .../klauspost/compress/flate/deflate.go | 1293 +++++++++++++++++ .../klauspost/compress/flate/fixedhuff.go | 78 + .../klauspost/compress/flate/gen.go | 265 ++++ .../compress/flate/huffman_bit_writer.go | 690 +++++++++ .../klauspost/compress/flate/huffman_code.go | 363 +++++ .../klauspost/compress/flate/inflate.go | 846 +++++++++++ .../klauspost/compress/flate/reverse_bits.go | 48 + .../klauspost/compress/flate/snappy.go | 97 ++ .../klauspost/compress/flate/token.go | 105 ++ .../klauspost/compress/gzip/gunzip.go | 342 +++++ .../klauspost/compress/gzip/gzip.go | 274 ++++ .../src/github.com/klauspost/cpuid/.gitignore | 24 + .../github.com/klauspost/cpuid/.travis.yml | 7 + .../src/github.com/klauspost/cpuid/LICENSE | 22 + .../src/github.com/klauspost/cpuid/README.md | 145 ++ .../src/github.com/klauspost/cpuid/cpuid.go | 1022 +++++++++++++ .../github.com/klauspost/cpuid/cpuid_386.s | 40 + .../github.com/klauspost/cpuid/cpuid_amd64.s | 40 + .../klauspost/cpuid/detect_intel.go | 17 + .../github.com/klauspost/cpuid/detect_ref.go | 23 + .../github.com/klauspost/cpuid/generate.go | 3 + .../github.com/klauspost/cpuid/private-gen.go | 476 ++++++ .../klauspost/cpuid/private/README.md | 6 + .../klauspost/cpuid/private/cpuid.go | 987 +++++++++++++ .../klauspost/cpuid/private/cpuid_386.s | 40 + .../klauspost/cpuid/private/cpuid_amd64.s | 40 + .../cpuid/private/cpuid_detect_intel.go | 17 + .../cpuid/private/cpuid_detect_ref.go | 23 + .../src/github.com/klauspost/crc32/.gitignore | 24 + .../github.com/klauspost/crc32/.travis.yml | 11 + .../src/github.com/klauspost/crc32/LICENSE | 28 + .../src/github.com/klauspost/crc32/README.md | 84 ++ .../src/github.com/klauspost/crc32/crc32.go | 182 +++ .../github.com/klauspost/crc32/crc32_amd64.go | 62 + .../github.com/klauspost/crc32/crc32_amd64.s | 237 +++ .../klauspost/crc32/crc32_amd64p32.go | 39 + .../klauspost/crc32/crc32_amd64p32.s | 67 + .../klauspost/crc32/crc32_generic.go | 28 + .../macaron-contrib/binding/README.md | 21 - .../macaron-contrib/binding/bind_test.go | 57 - .../macaron-contrib/binding/common_test.go | 115 -- .../binding/errorhandler_test.go | 162 --- .../macaron-contrib/binding/errors_test.go | 115 -- .../macaron-contrib/binding/file_test.go | 191 --- .../macaron-contrib/binding/form_test.go | 282 ---- .../macaron-contrib/binding/json_test.go | 222 --- .../macaron-contrib/binding/misc_test.go | 118 -- .../macaron-contrib/binding/multipart_test.go | 155 -- .../macaron-contrib/binding/validate_test.go | 370 ----- .../macaron-contrib/session/README.md | 21 - .../macaron-contrib/session/file_test.go | 34 - .../session/ledis/ledis_test.go | 105 -- .../session/memcache/memcache_test.go | 107 -- .../macaron-contrib/session/memory_test.go | 27 - .../session/mysql/mysql_test.go | 138 -- .../macaron-contrib/session/nodb/nodb_test.go | 105 -- .../session/postgres/postgres_test.go | 138 -- .../session/redis/redis_test.go | 107 -- .../macaron-contrib/session/session_test.go | 200 --- .../macaron.v1}/.gitignore | 0 .../src/gopkg.in/macaron.v1/.travis.yml | 13 + .../src/gopkg.in/macaron.v1/LICENSE | 191 +++ .../src/gopkg.in/macaron.v1/README.md | 94 ++ .../macaron.v1}/context.go | 50 +- .../fixtures/basic/admin/index.tmpl | 0 .../fixtures/basic/another_layout.tmpl | 0 .../macaron.v1}/fixtures/basic/content.tmpl | 0 .../fixtures/basic/current_layout.tmpl | 0 .../macaron.v1}/fixtures/basic/delims.tmpl | 0 .../macaron.v1}/fixtures/basic/hello.tmpl | 0 .../macaron.v1}/fixtures/basic/hypertext.html | 0 .../macaron.v1}/fixtures/basic/layout.tmpl | 0 .../macaron.v1}/fixtures/basic2/hello.tmpl | 0 .../macaron.v1}/fixtures/basic2/hello2.tmpl | 0 .../fixtures/custom_funcs/index.tmpl | 0 .../macaron => gopkg.in/macaron.v1}/logger.go | 2 +- .../macaron.v1}/macaron.go | 29 +- .../macaron.v1}/macaronlogo.png | Bin .../macaron.v1}/recovery.go | 4 +- .../macaron => gopkg.in/macaron.v1}/render.go | 97 +- .../macaron.v1}/response_writer.go | 0 .../macaron.v1}/return_handler.go | 29 +- .../macaron => gopkg.in/macaron.v1}/router.go | 166 ++- .../macaron => gopkg.in/macaron.v1}/static.go | 2 +- .../src/gopkg.in/macaron.v1/tree.go | 379 +++++ pkg/api/api.go | 4 +- pkg/api/common.go | 2 +- pkg/api/dataproxy.go | 2 +- pkg/api/static/static.go | 2 +- pkg/cmd/web.go | 2 +- pkg/middleware/auth.go | 2 +- pkg/middleware/logger.go | 2 +- pkg/middleware/middleware.go | 2 +- pkg/middleware/middleware_test.go | 2 +- pkg/middleware/quota.go | 2 +- pkg/middleware/session.go | 12 +- pkg/middleware/util.go | 5 +- pkg/middleware/validate_host.go | 2 +- pkg/setting/setting.go | 2 +- 163 files changed, 10334 insertions(+), 6847 deletions(-) delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/README.md delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/context_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/gzip.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/gzip_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/inject/README.md delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.goconvey delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/logger_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/macaron_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/recovery_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/render_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/router_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/static_test.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/tree.go delete mode 100644 Godeps/_workspace/src/github.com/Unknwon/macaron/tree_test.go create mode 100644 Godeps/_workspace/src/github.com/go-macaron/binding/.travis.yml rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/binding/LICENSE (100%) create mode 100644 Godeps/_workspace/src/github.com/go-macaron/binding/README.md rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/binding/binding.go (70%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/binding/errors.go (97%) create mode 100644 Godeps/_workspace/src/github.com/go-macaron/gzip/.travis.yml rename Godeps/_workspace/src/github.com/{Unknwon/macaron => go-macaron/gzip}/LICENSE (100%) create mode 100644 Godeps/_workspace/src/github.com/go-macaron/gzip/README.md create mode 100644 Godeps/_workspace/src/github.com/go-macaron/gzip/gzip.go create mode 100644 Godeps/_workspace/src/github.com/go-macaron/inject/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/go-macaron/inject/LICENSE create mode 100644 Godeps/_workspace/src/github.com/go-macaron/inject/README.md rename Godeps/_workspace/src/github.com/{Unknwon/macaron => go-macaron}/inject/inject.go (89%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/.gitignore (100%) create mode 100644 Godeps/_workspace/src/github.com/go-macaron/session/.travis.yml rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/LICENSE (100%) create mode 100644 Godeps/_workspace/src/github.com/go-macaron/session/README.md rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/couchbase/couchbase.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/file.go (94%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/ledis/ledis.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/ledis/ledis.goconvey (100%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/memcache/memcache.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/memcache/memcache.goconvey (100%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/memory.go (93%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/mysql/mysql.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/mysql/mysql.goconvey (100%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/nodb/nodb.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/nodb/nodb.goconvey (100%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/postgres/postgres.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/postgres/postgres.goconvey (100%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/redis/redis.go (81%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/redis/redis.goconvey (100%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/session.go (98%) rename Godeps/_workspace/src/github.com/{macaron-contrib => go-macaron}/session/utils.go (80%) delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/README.md delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/context.go delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/context_test.go delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/gen_sym.go delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/id_pool.go delete mode 100644 Godeps/_workspace/src/github.com/jtolds/gls/stack_tags.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/LICENSE create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/copy.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_noasm.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/deflate.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/fixedhuff.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/gen.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_bit_writer.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_code.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/inflate.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/reverse_bits.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/snappy.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/flate/token.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/gzip/gunzip.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/compress/gzip/gzip.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/.gitignore create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/LICENSE create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/README.md create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_386.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_amd64.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/detect_intel.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/detect_ref.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/generate.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private-gen.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private/README.md create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_386.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_amd64.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_intel.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_ref.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/README.md create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s create mode 100644 Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/README.md delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/bind_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/common_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/errorhandler_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/errors_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/file_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/form_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/json_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/misc_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/multipart_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/binding/validate_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/README.md delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/file_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/memory_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go delete mode 100644 Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/.gitignore (100%) create mode 100644 Godeps/_workspace/src/gopkg.in/macaron.v1/.travis.yml create mode 100644 Godeps/_workspace/src/gopkg.in/macaron.v1/LICENSE create mode 100644 Godeps/_workspace/src/gopkg.in/macaron.v1/README.md rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/context.go (92%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/admin/index.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/another_layout.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/content.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/current_layout.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/delims.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/hello.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/hypertext.html (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic/layout.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic2/hello.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/basic2/hello2.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/fixtures/custom_funcs/index.tmpl (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/logger.go (97%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/macaron.go (92%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/macaronlogo.png (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/recovery.go (98%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/render.go (87%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/response_writer.go (100%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/return_handler.go (74%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/router.go (58%) rename Godeps/_workspace/src/{github.com/Unknwon/macaron => gopkg.in/macaron.v1}/static.go (99%) create mode 100644 Godeps/_workspace/src/gopkg.in/macaron.v1/tree.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index d8832be8fa4..00a151ffeb7 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -14,10 +14,6 @@ "ImportPath": "github.com/Unknwon/com", "Rev": "d9bcf409c8a368d06c9b347705c381e7c12d54df" }, - { - "ImportPath": "github.com/Unknwon/macaron", - "Rev": "93de4f3fad97bf246b838f828e2348f46f21f20a" - }, { "ImportPath": "github.com/aws/aws-sdk-go/aws", "Comment": "v1.0.0", @@ -68,6 +64,11 @@ "Comment": "v1.0.0", "Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f" }, + { + "ImportPath": "github.com/bradfitz/gomemcache/memcache", + "Comment": "release.r60-40-g72a6864", + "Rev": "72a68649ba712ee7c4b5b4a943a626bcd7d90eb8" + }, { "ImportPath": "github.com/davecgh/go-spew/spew", "Rev": "2df174808ee097f90d259e432cc04442cf60be21" @@ -82,6 +83,22 @@ "Comment": "v1-19-g83e6542", "Rev": "83e65426fd1c06626e88aa8a085e5bfed0208e29" }, + { + "ImportPath": "github.com/go-macaron/binding", + "Rev": "2502aaf4bce3a4e6451b4610847bfb8dffdb6266" + }, + { + "ImportPath": "github.com/go-macaron/gzip", + "Rev": "4938e9be6b279d8426cb1c89a6bcf7af70b0c21d" + }, + { + "ImportPath": "github.com/go-macaron/inject", + "Rev": "c5ab7bf3a307593cd44cb272d1a5beea473dd072" + }, + { + "ImportPath": "github.com/go-macaron/session", + "Rev": "66031fcb37a0fff002a1f028eb0b3a815c78306b" + }, { "ImportPath": "github.com/go-sql-driver/mysql", "Comment": "v1.2-26-g9543750", @@ -106,22 +123,26 @@ "Rev": "3433f3ea46d9f8019119e7dd41274e112a2359a9" }, { - "ImportPath": "github.com/jtolds/gls", - "Rev": "f1ac7f4f24f50328e6bc838ca4437d1612a0243c" + "ImportPath": "github.com/klauspost/compress/flate", + "Rev": "7b02889a2005228347aef0e76beeaee564d82f8c" + }, + { + "ImportPath": "github.com/klauspost/compress/gzip", + "Rev": "7b02889a2005228347aef0e76beeaee564d82f8c" + }, + { + "ImportPath": "github.com/klauspost/cpuid", + "Rev": "349c675778172472f5e8f3a3e0fe187e302e5a10" + }, + { + "ImportPath": "github.com/klauspost/crc32", + "Rev": "6834731faf32e62a2dd809d99fb24d1e4ae5a92d" }, { "ImportPath": "github.com/lib/pq", "Comment": "go1.0-cutoff-13-g19eeca3", "Rev": "19eeca3e30d2577b1761db471ec130810e67f532" }, - { - "ImportPath": "github.com/macaron-contrib/binding", - "Rev": "0fbe4b9707e6eb556ef843e5471592f55ce0a5e7" - }, - { - "ImportPath": "github.com/macaron-contrib/session", - "Rev": "31e841d95c7302b9ac456c830ea2d6dfcef4f84a" - }, { "ImportPath": "github.com/mattn/go-sqlite3", "Rev": "e28cd440fabdd39b9520344bc26829f61db40ece" @@ -130,11 +151,6 @@ "ImportPath": "github.com/rainycape/unidecode", "Rev": "836ef0a715aedf08a12d595ed73ec8ed5b288cac" }, - { - "ImportPath": "github.com/smartystreets/goconvey/convey", - "Comment": "1.5.0-356-gfbc0a1c", - "Rev": "fbc0a1c888f9f96263f9a559d1769905245f1123" - }, { "ImportPath": "github.com/streadway/amqp", "Rev": "150b7f24d6ad507e6026c13d85ce1f1391ac7400" @@ -162,15 +178,14 @@ "Comment": "v0-16-g1772191", "Rev": "177219109c97e7920c933e21c9b25f874357b237" }, + { + "ImportPath": "gopkg.in/macaron.v1", + "Rev": "1c6dd87797ae9319b4658cbd48d1d0420b279fd5" + }, { "ImportPath": "gopkg.in/redis.v2", "Comment": "v2.3.2", "Rev": "e6179049628164864e6e84e973cfb56335748dea" - }, - { - "ImportPath": "github.com/bradfitz/gomemcache/memcache", - "Comment": "release.r60-40-g72a6864", - "Rev": "72a68649ba712ee7c4b5b4a943a626bcd7d90eb8" - } + } ] } diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/README.md b/Godeps/_workspace/src/github.com/Unknwon/macaron/README.md deleted file mode 100644 index 8b201624f61..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/README.md +++ /dev/null @@ -1,94 +0,0 @@ -Macaron [![Build Status](https://drone.io/github.com/Unknwon/macaron/status.png)](https://drone.io/github.com/Unknwon/macaron/latest) [![](http://gocover.io/_badge/github.com/Unknwon/macaron)](http://gocover.io/github.com/Unknwon/macaron) -======================= - -![Macaron Logo](https://raw.githubusercontent.com/Unknwon/macaron/master/macaronlogo.png) - -Package macaron is a high productive and modular design web framework in Go. - -##### Current version: 0.5.4 - -## Getting Started - -To install Macaron: - - go get github.com/Unknwon/macaron - -The very basic usage of Macaron: - -```go -package main - -import "github.com/Unknwon/macaron" - -func main() { - m := macaron.Classic() - m.Get("/", func() string { - return "Hello world!" - }) - m.Run() -} -``` - -## Features - -- Powerful routing with suburl. -- Flexible routes combinations. -- Unlimited nested group routers. -- Directly integrate with existing services. -- Dynamically change template files at runtime. -- Allow to use in-memory template and static files. -- Easy to plugin/unplugin features with modular design. -- Handy dependency injection powered by [inject](https://github.com/codegangsta/inject). -- Better router layer and less reflection make faster speed. - -## Middlewares - -Middlewares allow you easily plugin/unplugin features for your Macaron applications. - -There are already many [middlewares](https://github.com/macaron-contrib) to simplify your work: - -- gzip - Gzip compression to all requests -- render - Go template engine -- static - Serves static files -- [binding](https://github.com/macaron-contrib/binding) - Request data binding and validation -- [i18n](https://github.com/macaron-contrib/i18n) - Internationalization and Localization -- [cache](https://github.com/macaron-contrib/cache) - Cache manager -- [session](https://github.com/macaron-contrib/session) - Session manager -- [csrf](https://github.com/macaron-contrib/csrf) - Generates and validates csrf tokens -- [captcha](https://github.com/macaron-contrib/captcha) - Captcha service -- [pongo2](https://github.com/macaron-contrib/pongo2) - Pongo2 template engine support -- [sockets](https://github.com/macaron-contrib/sockets) - WebSockets channels binding -- [bindata](https://github.com/macaron-contrib/bindata) - Embed binary data as static and template files -- [toolbox](https://github.com/macaron-contrib/toolbox) - Health check, pprof, profile and statistic services -- [oauth2](https://github.com/macaron-contrib/oauth2) - OAuth 2.0 backend -- [switcher](https://github.com/macaron-contrib/switcher) - Multiple-site support -- [method](https://github.com/macaron-contrib/method) - HTTP method override -- [permissions2](https://github.com/xyproto/permissions2) - Cookies, users and permissions -- [renders](https://github.com/macaron-contrib/renders) - Beego-like render engine(Macaron has built-in template engine, this is another option) - -## Use Cases - -- [Gogs](https://github.com/gogits/gogs): Go Git Service -- [Gogs Web](https://github.com/gogits/gogsweb): Gogs official website -- [Go Walker](https://gowalker.org): Go online API documentation -- [Switch](https://github.com/gpmgo/switch): Gopm registry -- [YouGam](http://yougam.com): Online Forum -- [Car Girl](http://qcnl.gzsy.com/): Online campaign -- [Critical Stack Intel](https://intel.criticalstack.com/): A 100% free intel marketplace from Critical Stack, Inc. - -## Getting Help - -- [API Reference](https://gowalker.org/github.com/Unknwon/macaron) -- [Documentation](http://macaron.gogs.io) -- [FAQs](http://macaron.gogs.io/docs/faqs) -- [![Join the chat at https://gitter.im/Unknwon/macaron](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Unknwon/macaron?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -## Credits - -- Basic design of [Martini](https://github.com/go-martini/martini). -- Router layer of [beego](https://github.com/astaxie/beego). -- Logo is modified by [@insionng](https://github.com/insionng) based on [Tribal Dragon](http://xtremeyamazaki.deviantart.com/art/Tribal-Dragon-27005087). - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/context_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/context_test.go deleted file mode 100644 index c4b4752e12f..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/context_test.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "bytes" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" - - "github.com/Unknwon/com" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Context(t *testing.T) { - Convey("Do advanced encapsulation operations", t, func() { - m := Classic() - m.Use(Renderers(RenderOptions{ - Directory: "fixtures/basic", - }, "fixtures/basic2")) - - Convey("Get request body", func() { - m.Get("/body1", func(ctx *Context) { - data, err := ioutil.ReadAll(ctx.Req.Body().ReadCloser()) - So(err, ShouldBeNil) - So(string(data), ShouldEqual, "This is my request body") - }) - m.Get("/body2", func(ctx *Context) { - data, err := ctx.Req.Body().Bytes() - So(err, ShouldBeNil) - So(string(data), ShouldEqual, "This is my request body") - }) - m.Get("/body3", func(ctx *Context) { - data, err := ctx.Req.Body().String() - So(err, ShouldBeNil) - So(data, ShouldEqual, "This is my request body") - }) - - for i := 1; i <= 3; i++ { - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/body"+com.ToStr(i), nil) - req.Body = ioutil.NopCloser(bytes.NewBufferString("This is my request body")) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - } - }) - - Convey("Get remote IP address", func() { - m.Get("/remoteaddr", func(ctx *Context) string { - return ctx.RemoteAddr() - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/remoteaddr", nil) - req.RemoteAddr = "127.0.0.1:3333" - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "127.0.0.1") - }) - - Convey("Render HTML", func() { - - Convey("Normal HTML", func() { - m.Get("/html", func(ctx *Context) { - ctx.HTML(304, "hello", "Unknwon") // 304 for logger test. - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/html", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "

Hello Unknwon

") - }) - - Convey("HTML template set", func() { - m.Get("/html2", func(ctx *Context) { - ctx.Data["Name"] = "Unknwon" - ctx.HTMLSet(200, "basic2", "hello2") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/html2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "

Hello Unknwon

") - }) - - Convey("With layout", func() { - m.Get("/layout", func(ctx *Context) { - ctx.HTML(200, "hello", "Unknwon", HTMLOptions{"layout"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/layout", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "head

Hello Unknwon

foot") - }) - }) - - Convey("Parse from and query", func() { - m.Get("/query", func(ctx *Context) string { - var buf bytes.Buffer - buf.WriteString(ctx.QueryTrim("name") + " ") - buf.WriteString(ctx.QueryEscape("name") + " ") - buf.WriteString(com.ToStr(ctx.QueryInt("int")) + " ") - buf.WriteString(com.ToStr(ctx.QueryInt64("int64")) + " ") - buf.WriteString(com.ToStr(ctx.QueryFloat64("float64")) + " ") - return buf.String() - }) - m.Get("/query2", func(ctx *Context) string { - var buf bytes.Buffer - buf.WriteString(strings.Join(ctx.QueryStrings("list"), ",") + " ") - buf.WriteString(strings.Join(ctx.QueryStrings("404"), ",") + " ") - return buf.String() - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/query?name=Unknwon&int=12&int64=123&float64=1.25", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Unknwon Unknwon 12 123 1.25 ") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/query2?list=item1&list=item2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "item1,item2 ") - }) - - Convey("URL parameter", func() { - m.Get("/:name/:int/:int64/:float64", func(ctx *Context) string { - var buf bytes.Buffer - ctx.SetParams("name", ctx.Params("name")) - buf.WriteString(ctx.Params("")) - buf.WriteString(ctx.Params(":name") + " ") - buf.WriteString(ctx.ParamsEscape(":name") + " ") - buf.WriteString(com.ToStr(ctx.ParamsInt(":int")) + " ") - buf.WriteString(com.ToStr(ctx.ParamsInt64(":int64")) + " ") - buf.WriteString(com.ToStr(ctx.ParamsFloat64(":float64")) + " ") - return buf.String() - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/user/1/13/1.24", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "user user 1 13 1.24 ") - }) - - Convey("Get file", func() { - m.Get("/getfile", func(ctx *Context) { - ctx.GetFile("hi") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/getfile", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - - Convey("Set and get cookie", func() { - m.Get("/set", func(ctx *Context) { - ctx.SetCookie("user", "Unknwon", 1, "/", "localhost", true, true) - ctx.SetCookie("user", "Unknwon", int32(1), "/", "localhost", 1) - ctx.SetCookie("user", "Unknwon", int64(1)) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/set", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Header().Get("Set-Cookie"), ShouldEqual, "user=Unknwon; Path=/; Domain=localhost; Max-Age=1; HttpOnly; Secure") - - m.Get("/get", func(ctx *Context) string { - ctx.GetCookie("404") - So(ctx.GetCookieInt("uid"), ShouldEqual, 1) - So(ctx.GetCookieInt64("uid"), ShouldEqual, 1) - So(ctx.GetCookieFloat64("balance"), ShouldEqual, 1.25) - return ctx.GetCookie("user") - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "user=Unknwon; uid=1; balance=1.25") - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Unknwon") - }) - - Convey("Set and get secure cookie", func() { - m.SetDefaultCookieSecret("macaron") - m.Get("/set", func(ctx *Context) { - ctx.SetSecureCookie("user", "Unknwon", 1) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/set", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - m.Get("/get", func(ctx *Context) string { - name, ok := ctx.GetSecureCookie("user") - So(ok, ShouldBeTrue) - return name - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Unknwon") - }) - - Convey("Serve files", func() { - m.Get("/file", func(ctx *Context) { - ctx.ServeFile("fixtures/custom_funcs/index.tmpl") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/file", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") - - m.Get("/file2", func(ctx *Context) { - ctx.ServeFile("fixtures/custom_funcs/index.tmpl", "ok.tmpl") - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/file2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") - }) - - Convey("Serve file content", func() { - m.Get("/file", func(ctx *Context) { - ctx.ServeFileContent("fixtures/custom_funcs/index.tmpl") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/file", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") - - m.Get("/file2", func(ctx *Context) { - ctx.ServeFileContent("fixtures/custom_funcs/index.tmpl", "ok.tmpl") - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/file2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") - - m.Get("/file3", func(ctx *Context) { - ctx.ServeFileContent("404.tmpl") - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/file3", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "open 404.tmpl: no such file or directory\n") - So(resp.Code, ShouldEqual, 500) - }) - - Convey("Serve content", func() { - m.Get("/content", func(ctx *Context) { - ctx.ServeContent("content1", bytes.NewReader([]byte("Hello world!"))) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/content", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Hello world!") - - m.Get("/content2", func(ctx *Context) { - ctx.ServeContent("content1", bytes.NewReader([]byte("Hello world!")), time.Now()) - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/content2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Hello world!") - }) - }) -} - -func Test_Context_Render(t *testing.T) { - Convey("Invalid render", t, func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - - m := New() - m.Get("/", func(ctx *Context) { - ctx.HTML(200, "hey") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) -} - -func Test_Context_Redirect(t *testing.T) { - Convey("Context with default redirect", t, func() { - url, err := url.Parse("http://localhost/path/one") - So(err, ShouldBeNil) - resp := httptest.NewRecorder() - req := http.Request{ - Method: "GET", - URL: url, - } - ctx := &Context{ - Req: Request{&req}, - Resp: NewResponseWriter(resp), - Data: make(map[string]interface{}), - } - ctx.Redirect("two") - - So(resp.Code, ShouldEqual, http.StatusFound) - So(resp.HeaderMap["Location"][0], ShouldEqual, "/path/two") - }) - - Convey("Context with custom redirect", t, func() { - url, err := url.Parse("http://localhost/path/one") - So(err, ShouldBeNil) - resp := httptest.NewRecorder() - req := http.Request{ - Method: "GET", - URL: url, - } - ctx := &Context{ - Req: Request{&req}, - Resp: NewResponseWriter(resp), - Data: make(map[string]interface{}), - } - ctx.Redirect("two", 307) - - So(resp.Code, ShouldEqual, http.StatusTemporaryRedirect) - So(resp.HeaderMap["Location"][0], ShouldEqual, "/path/two") - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/gzip.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/gzip.go deleted file mode 100644 index 2e935f35076..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/gzip.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "bufio" - "compress/gzip" - "fmt" - "net" - "net/http" - "strings" -) - -const ( - HeaderAcceptEncoding = "Accept-Encoding" - HeaderContentEncoding = "Content-Encoding" - HeaderContentLength = "Content-Length" - HeaderContentType = "Content-Type" - HeaderVary = "Vary" -) - -// Gziper returns a Handler that adds gzip compression to all requests. -// Make sure to include the Gzip middleware above other middleware -// that alter the response body (like the render middleware). -func Gziper() Handler { - return func(ctx *Context) { - if !strings.Contains(ctx.Req.Header.Get(HeaderAcceptEncoding), "gzip") { - return - } - - headers := ctx.Resp.Header() - headers.Set(HeaderContentEncoding, "gzip") - headers.Set(HeaderVary, HeaderAcceptEncoding) - - gz := gzip.NewWriter(ctx.Resp) - defer gz.Close() - - gzw := gzipResponseWriter{gz, ctx.Resp} - ctx.Resp = gzw - ctx.MapTo(gzw, (*http.ResponseWriter)(nil)) - - ctx.Next() - - // delete content length after we know we have been written to - gzw.Header().Del("Content-Length") - } -} - -type gzipResponseWriter struct { - w *gzip.Writer - ResponseWriter -} - -func (grw gzipResponseWriter) Write(p []byte) (int, error) { - if len(grw.Header().Get(HeaderContentType)) == 0 { - grw.Header().Set(HeaderContentType, http.DetectContentType(p)) - } - - return grw.w.Write(p) -} - -func (grw gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := grw.ResponseWriter.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/gzip_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/gzip_test.go deleted file mode 100644 index 565eed66453..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/gzip_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Gzip(t *testing.T) { - Convey("Gzip response content", t, func() { - before := false - - m := New() - m.Use(Gziper()) - m.Use(func(r http.ResponseWriter) { - r.(ResponseWriter).Before(func(rw ResponseWriter) { - before = true - }) - }) - m.Get("/", func() string { return "hello wolrd!" }) - - // Not yet gzip. - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - _, ok := resp.HeaderMap[HeaderContentEncoding] - So(ok, ShouldBeFalse) - - ce := resp.Header().Get(HeaderContentEncoding) - So(strings.EqualFold(ce, "gzip"), ShouldBeFalse) - - // Gzip now. - resp = httptest.NewRecorder() - req.Header.Set(HeaderAcceptEncoding, "gzip") - m.ServeHTTP(resp, req) - - _, ok = resp.HeaderMap[HeaderContentEncoding] - So(ok, ShouldBeTrue) - - ce = resp.Header().Get(HeaderContentEncoding) - So(strings.EqualFold(ce, "gzip"), ShouldBeTrue) - - So(before, ShouldBeTrue) - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/README.md b/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/README.md deleted file mode 100644 index 1721ab4a892..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/README.md +++ /dev/null @@ -1,4 +0,0 @@ -inject -====== - -Dependency injection for go diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.goconvey b/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.goconvey deleted file mode 100644 index 8485e986e45..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.goconvey +++ /dev/null @@ -1 +0,0 @@ -ignore \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject_test.go deleted file mode 100644 index 748572ac2bf..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject_test.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package inject_test - -import ( - "fmt" - "reflect" - "testing" - - "github.com/Unknwon/macaron/inject" -) - -type SpecialString interface { -} - -type TestStruct struct { - Dep1 string `inject:"t" json:"-"` - Dep2 SpecialString `inject` - Dep3 string -} - -type Greeter struct { - Name string -} - -func (g *Greeter) String() string { - return "Hello, My name is" + g.Name -} - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func Test_InjectorInvoke(t *testing.T) { - injector := inject.New() - expect(t, injector == nil, false) - - dep := "some dependency" - injector.Map(dep) - dep2 := "another dep" - injector.MapTo(dep2, (*SpecialString)(nil)) - dep3 := make(chan *SpecialString) - dep4 := make(chan *SpecialString) - typRecv := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(dep3).Elem()) - typSend := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(dep4).Elem()) - injector.Set(typRecv, reflect.ValueOf(dep3)) - injector.Set(typSend, reflect.ValueOf(dep4)) - - _, err := injector.Invoke(func(d1 string, d2 SpecialString, d3 <-chan *SpecialString, d4 chan<- *SpecialString) { - expect(t, d1, dep) - expect(t, d2, dep2) - expect(t, reflect.TypeOf(d3).Elem(), reflect.TypeOf(dep3).Elem()) - expect(t, reflect.TypeOf(d4).Elem(), reflect.TypeOf(dep4).Elem()) - expect(t, reflect.TypeOf(d3).ChanDir(), reflect.RecvDir) - expect(t, reflect.TypeOf(d4).ChanDir(), reflect.SendDir) - }) - - expect(t, err, nil) -} - -func Test_InjectorInvokeReturnValues(t *testing.T) { - injector := inject.New() - expect(t, injector == nil, false) - - dep := "some dependency" - injector.Map(dep) - dep2 := "another dep" - injector.MapTo(dep2, (*SpecialString)(nil)) - - result, err := injector.Invoke(func(d1 string, d2 SpecialString) string { - expect(t, d1, dep) - expect(t, d2, dep2) - return "Hello world" - }) - - expect(t, result[0].String(), "Hello world") - expect(t, err, nil) -} - -func Test_InjectorApply(t *testing.T) { - injector := inject.New() - - injector.Map("a dep").MapTo("another dep", (*SpecialString)(nil)) - - s := TestStruct{} - err := injector.Apply(&s) - expect(t, err, nil) - - expect(t, s.Dep1, "a dep") - expect(t, s.Dep2, "another dep") -} - -func Test_InterfaceOf(t *testing.T) { - iType := inject.InterfaceOf((*SpecialString)(nil)) - expect(t, iType.Kind(), reflect.Interface) - - iType = inject.InterfaceOf((**SpecialString)(nil)) - expect(t, iType.Kind(), reflect.Interface) - - // Expecting nil - defer func() { - rec := recover() - refute(t, rec, nil) - }() - iType = inject.InterfaceOf((*testing.T)(nil)) -} - -func Test_InjectorSet(t *testing.T) { - injector := inject.New() - typ := reflect.TypeOf("string") - typSend := reflect.ChanOf(reflect.SendDir, typ) - typRecv := reflect.ChanOf(reflect.RecvDir, typ) - - // instantiating unidirectional channels is not possible using reflect - // http://golang.org/src/pkg/reflect/value.go?s=60463:60504#L2064 - chanRecv := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0) - chanSend := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0) - - injector.Set(typSend, chanSend) - injector.Set(typRecv, chanRecv) - - expect(t, injector.GetVal(typSend).IsValid(), true) - expect(t, injector.GetVal(typRecv).IsValid(), true) - expect(t, injector.GetVal(chanSend.Type()).IsValid(), false) -} - -func Test_InjectorGet(t *testing.T) { - injector := inject.New() - - injector.Map("some dependency") - - expect(t, injector.GetVal(reflect.TypeOf("string")).IsValid(), true) - expect(t, injector.GetVal(reflect.TypeOf(11)).IsValid(), false) -} - -func Test_InjectorSetParent(t *testing.T) { - injector := inject.New() - injector.MapTo("another dep", (*SpecialString)(nil)) - - injector2 := inject.New() - injector2.SetParent(injector) - - expect(t, injector2.GetVal(inject.InterfaceOf((*SpecialString)(nil))).IsValid(), true) -} - -func TestInjectImplementors(t *testing.T) { - injector := inject.New() - g := &Greeter{"Jeremy"} - injector.Map(g) - - expect(t, injector.GetVal(inject.InterfaceOf((*fmt.Stringer)(nil))).IsValid(), true) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/logger_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/logger_test.go deleted file mode 100644 index c81f70237f9..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/logger_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "bytes" - "log" - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/com" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Logger(t *testing.T) { - Convey("Global logger", t, func() { - buf := bytes.NewBufferString("") - m := New() - m.Map(log.New(buf, "[Macaron] ", 0)) - m.Use(Logger()) - m.Use(func(res http.ResponseWriter) { - res.WriteHeader(http.StatusNotFound) - }) - m.Get("/", func() {}) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusNotFound) - So(len(buf.String()), ShouldBeGreaterThan, 0) - }) - - if ColorLog { - Convey("Color console output", t, func() { - m := Classic() - m.Get("/:code:int", func(ctx *Context) (int, string) { - return ctx.ParamsInt(":code"), "" - }) - - // Just for testing if logger would capture. - codes := []int{200, 201, 202, 301, 302, 304, 401, 403, 404, 500} - for _, code := range codes { - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/"+com.ToStr(code), nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, code) - } - }) - } -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/macaron_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/macaron_test.go deleted file mode 100644 index 35cc8f8a218..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/macaron_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Version(t *testing.T) { - Convey("Get version", t, func() { - So(Version(), ShouldEqual, _VERSION) - }) -} - -func Test_New(t *testing.T) { - Convey("Initialize a new instance", t, func() { - So(New(), ShouldNotBeNil) - }) - - Convey("Just test that Run doesn't bomb", t, func() { - go New().Run() - time.Sleep(1 * time.Second) - os.Setenv("PORT", "4001") - go New().Run("0.0.0.0") - go New().Run(4002) - go New().Run("0.0.0.0", 4003) - }) -} - -func Test_Macaron_Before(t *testing.T) { - Convey("Register before handlers", t, func() { - m := New() - m.Before(func(rw http.ResponseWriter, req *http.Request) bool { - return false - }) - m.Before(func(rw http.ResponseWriter, req *http.Request) bool { - return true - }) - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) -} - -func Test_Macaron_ServeHTTP(t *testing.T) { - Convey("Serve HTTP requests", t, func() { - result := "" - m := New() - m.Use(func(c *Context) { - result += "foo" - c.Next() - result += "ban" - }) - m.Use(func(c *Context) { - result += "bar" - c.Next() - result += "baz" - }) - m.Get("/", func() {}) - m.Action(func(res http.ResponseWriter, req *http.Request) { - result += "bat" - res.WriteHeader(http.StatusBadRequest) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(result, ShouldEqual, "foobarbatbazban") - So(resp.Code, ShouldEqual, http.StatusBadRequest) - }) -} - -func Test_Macaron_Handlers(t *testing.T) { - Convey("Add custom handlers", t, func() { - result := "" - batman := func(c *Context) { - result += "batman!" - } - - m := New() - m.Use(func(c *Context) { - result += "foo" - c.Next() - result += "ban" - }) - m.Handlers( - batman, - batman, - batman, - ) - - Convey("Add not callable function", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - m.Use("shit") - }) - - m.Get("/", func() {}) - m.Action(func(res http.ResponseWriter, req *http.Request) { - result += "bat" - res.WriteHeader(http.StatusBadRequest) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(result, ShouldEqual, "batman!batman!batman!bat") - So(resp.Code, ShouldEqual, http.StatusBadRequest) - }) -} - -func Test_Macaron_EarlyWrite(t *testing.T) { - Convey("Write early content to response", t, func() { - result := "" - m := New() - m.Use(func(res http.ResponseWriter) { - result += "foobar" - res.Write([]byte("Hello world")) - }) - m.Use(func() { - result += "bat" - }) - m.Get("/", func() {}) - m.Action(func(res http.ResponseWriter) { - result += "baz" - res.WriteHeader(http.StatusBadRequest) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(result, ShouldEqual, "foobar") - So(resp.Code, ShouldEqual, http.StatusOK) - }) -} - -func Test_Macaron_Written(t *testing.T) { - Convey("Written sign", t, func() { - resp := httptest.NewRecorder() - m := New() - m.Handlers(func(res http.ResponseWriter) { - res.WriteHeader(http.StatusOK) - }) - - ctx := m.createContext(resp, &http.Request{Method: "GET"}) - So(ctx.Written(), ShouldBeFalse) - - ctx.run() - So(ctx.Written(), ShouldBeTrue) - }) -} - -func Test_Macaron_Basic_NoRace(t *testing.T) { - Convey("Make sure no race between requests", t, func() { - m := New() - handlers := []Handler{func() {}, func() {}} - // Ensure append will not realloc to trigger the race condition - m.handlers = handlers[:1] - m.Get("/", func() {}) - req, _ := http.NewRequest("GET", "/", nil) - for i := 0; i < 2; i++ { - go func() { - resp := httptest.NewRecorder() - m.ServeHTTP(resp, req) - }() - } - }) -} - -func Test_SetENV(t *testing.T) { - Convey("Get and save environment variable", t, func() { - tests := []struct { - in string - out string - }{ - {"", "development"}, - {"not_development", "not_development"}, - } - - for _, test := range tests { - setENV(test.in) - So(Env, ShouldEqual, test.out) - } - }) -} - -func Test_Config(t *testing.T) { - Convey("Set and get configuration object", t, func() { - So(Config(), ShouldNotBeNil) - cfg, err := SetConfig([]byte("")) - So(err, ShouldBeNil) - So(cfg, ShouldNotBeNil) - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/recovery_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/recovery_test.go deleted file mode 100644 index dc9b341f361..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/recovery_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "bytes" - "log" - "net/http" - "net/http/httptest" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Recovery(t *testing.T) { - Convey("Recovery from panic", t, func() { - buf := bytes.NewBufferString("") - setENV(DEV) - - m := New() - m.Map(log.New(buf, "[Macaron] ", 0)) - m.Use(func(res http.ResponseWriter, req *http.Request) { - res.Header().Set("Content-Type", "unpredictable") - }) - m.Use(Recovery()) - m.Use(func(res http.ResponseWriter, req *http.Request) { - panic("here is a panic!") - }) - m.Get("/", func() {}) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusInternalServerError) - So(resp.HeaderMap.Get("Content-Type"), ShouldEqual, "text/html") - So(buf.String(), ShouldNotBeEmpty) - }) - - Convey("Revocery panic to another response writer", t, func() { - resp := httptest.NewRecorder() - resp2 := httptest.NewRecorder() - setENV(DEV) - - m := New() - m.Use(Recovery()) - m.Use(func(c *Context) { - c.MapTo(resp2, (*http.ResponseWriter)(nil)) - panic("here is a panic!") - }) - m.Get("/", func() {}) - - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp2.Code, ShouldEqual, http.StatusInternalServerError) - So(resp2.HeaderMap.Get("Content-Type"), ShouldEqual, "text/html") - So(resp2.Body.Len(), ShouldBeGreaterThan, 0) - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/render_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/render_test.go deleted file mode 100644 index 16318733ec8..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/render_test.go +++ /dev/null @@ -1,581 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "encoding/xml" - "html/template" - "net/http" - "net/http/httptest" - "testing" - "time" - - . "github.com/smartystreets/goconvey/convey" -) - -type Greeting struct { - One string `json:"one"` - Two string `json:"two"` -} - -type GreetingXML struct { - XMLName xml.Name `xml:"greeting"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func Test_Render_JSON(t *testing.T) { - Convey("Render JSON", t, func() { - m := Classic() - m.Use(Renderer()) - m.Get("/foobar", func(r Render) { - r.JSON(300, Greeting{"hello", "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, `{"one":"hello","two":"world"}`) - }) - - Convey("Render JSON with prefix", t, func() { - m := Classic() - prefix := ")]}',\n" - m.Use(Renderer(RenderOptions{ - PrefixJSON: []byte(prefix), - })) - m.Get("/foobar", func(r Render) { - r.JSON(300, Greeting{"hello", "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, prefix+`{"one":"hello","two":"world"}`) - }) - - Convey("Render Indented JSON", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - IndentJSON: true, - })) - m.Get("/foobar", func(r Render) { - r.JSON(300, Greeting{"hello", "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, `{ - "one": "hello", - "two": "world" -}`) - }) - - Convey("Render JSON and return string", t, func() { - m := Classic() - m.Use(Renderer()) - m.Get("/foobar", func(r Render) { - result, err := r.JSONString(Greeting{"hello", "world"}) - So(err, ShouldBeNil) - So(result, ShouldEqual, `{"one":"hello","two":"world"}`) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - - Convey("Render with charset JSON", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Charset: "foobar", - })) - m.Get("/foobar", func(r Render) { - r.JSON(300, Greeting{"hello", "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=foobar") - So(resp.Body.String(), ShouldEqual, `{"one":"hello","two":"world"}`) - }) -} - -func Test_Render_XML(t *testing.T) { - Convey("Render XML", t, func() { - m := Classic() - m.Use(Renderer()) - m.Get("/foobar", func(r Render) { - r.XML(300, GreetingXML{One: "hello", Two: "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentXML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, ``) - }) - - Convey("Render XML with prefix", t, func() { - m := Classic() - prefix := ")]}',\n" - m.Use(Renderer(RenderOptions{ - PrefixXML: []byte(prefix), - })) - m.Get("/foobar", func(r Render) { - r.XML(300, GreetingXML{One: "hello", Two: "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentXML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, prefix+``) - }) - - Convey("Render Indented XML", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - IndentXML: true, - })) - m.Get("/foobar", func(r Render) { - r.XML(300, GreetingXML{One: "hello", Two: "world"}) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusMultipleChoices) - So(resp.Header().Get(ContentType), ShouldEqual, ContentXML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, ``) - }) -} - -func Test_Render_HTML(t *testing.T) { - Convey("Render HTML", t, func() { - m := Classic() - m.Use(Renderers(RenderOptions{ - Directory: "fixtures/basic", - }, "fixtures/basic2")) - m.Get("/foobar", func(r Render) { - r.HTML(200, "hello", "jeremy") - r.SetTemplatePath("", "fixtures/basic2") - }) - m.Get("/foobar2", func(r Render) { - if r.HasTemplateSet("basic2") { - r.HTMLSet(200, "basic2", "hello", "jeremy") - } - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "

Hello jeremy

") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/foobar2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "

What's up, jeremy

") - - Convey("Change render templates path", func() { - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "

What's up, jeremy

") - }) - }) - - Convey("Render HTML and return string", t, func() { - m := Classic() - m.Use(Renderers(RenderOptions{ - Directory: "fixtures/basic", - }, "basic2:fixtures/basic2")) - m.Get("/foobar", func(r Render) { - result, err := r.HTMLString("hello", "jeremy") - So(err, ShouldBeNil) - So(result, ShouldEqual, "

Hello jeremy

") - }) - m.Get("/foobar2", func(r Render) { - result, err := r.HTMLSetString("basic2", "hello", "jeremy") - So(err, ShouldBeNil) - So(result, ShouldEqual, "

What's up, jeremy

") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/foobar2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - - Convey("Render with nested HTML", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "admin/index", "jeremy") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "

Admin jeremy

") - }) - - Convey("Render bad HTML", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "nope", nil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusInternalServerError) - So(resp.Body.String(), ShouldEqual, "html/template: \"nope\" is undefined\n") - }) - - Convey("Invalid template set", t, func() { - Convey("Empty template set argument", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - m := Classic() - m.Use(Renderers(RenderOptions{ - Directory: "fixtures/basic", - }, "")) - }) - - Convey("Bad template set path", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - m := Classic() - m.Use(Renderers(RenderOptions{ - Directory: "fixtures/basic", - }, "404")) - }) - }) -} - -func Test_Render_XHTML(t *testing.T) { - Convey("Render XHTML", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - HTMLContentType: ContentXHTML, - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "hello", "jeremy") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentXHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "

Hello jeremy

") - }) -} - -func Test_Render_Extensions(t *testing.T) { - Convey("Render with extensions", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - Extensions: []string{".tmpl", ".html"}, - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "hypertext", nil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "Hypertext!") - }) -} - -func Test_Render_Funcs(t *testing.T) { - Convey("Render with functions", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/custom_funcs", - Funcs: []template.FuncMap{ - { - "myCustomFunc": func() string { - return "My custom function" - }, - }, - }, - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "index", "jeremy") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Body.String(), ShouldEqual, "My custom function") - }) -} - -func Test_Render_Layout(t *testing.T) { - Convey("Render with layout", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - Layout: "layout", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "content", "jeremy") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Body.String(), ShouldEqual, "head

jeremy

foot") - }) - - Convey("Render with current layout", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - Layout: "current_layout", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "content", "jeremy") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Body.String(), ShouldEqual, "content head

jeremy

content foot") - }) - - Convey("Render with override layout", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - Layout: "layout", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "content", "jeremy", HTMLOptions{ - Layout: "another_layout", - }) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "another head

jeremy

another foot") - }) -} - -func Test_Render_Delimiters(t *testing.T) { - Convey("Render with delimiters", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Delims: Delims{"{[{", "}]}"}, - Directory: "fixtures/basic", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "delims", "jeremy") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8") - So(resp.Body.String(), ShouldEqual, "

Hello jeremy

") - }) -} - -func Test_Render_BinaryData(t *testing.T) { - Convey("Render binary data", t, func() { - m := Classic() - m.Use(Renderer()) - m.Get("/foobar", func(r Render) { - r.RawData(200, []byte("hello there")) - }) - m.Get("/foobar2", func(r Render) { - r.RenderData(200, []byte("hello there")) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, ContentBinary) - So(resp.Body.String(), ShouldEqual, "hello there") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/foobar2", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, CONTENT_PLAIN) - So(resp.Body.String(), ShouldEqual, "hello there") - }) - - Convey("Render binary data with mime type", t, func() { - m := Classic() - m.Use(Renderer()) - m.Get("/foobar", func(r Render) { - r.RW().Header().Set(ContentType, "image/jpeg") - r.RawData(200, []byte("..jpeg data..")) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/foobar", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get(ContentType), ShouldEqual, "image/jpeg") - So(resp.Body.String(), ShouldEqual, "..jpeg data..") - }) -} - -func Test_Render_Status(t *testing.T) { - Convey("Render with status 204", t, func() { - resp := httptest.NewRecorder() - r := TplRender{resp, newTemplateSet(), &RenderOptions{}, "", time.Now()} - r.Status(204) - So(resp.Code, ShouldEqual, http.StatusNoContent) - }) - - Convey("Render with status 404", t, func() { - resp := httptest.NewRecorder() - r := TplRender{resp, newTemplateSet(), &RenderOptions{}, "", time.Now()} - r.Error(404) - So(resp.Code, ShouldEqual, http.StatusNotFound) - }) - - Convey("Render with status 500", t, func() { - resp := httptest.NewRecorder() - r := TplRender{resp, newTemplateSet(), &RenderOptions{}, "", time.Now()} - r.Error(500) - So(resp.Code, ShouldEqual, http.StatusInternalServerError) - }) -} - -func Test_Render_NoRace(t *testing.T) { - Convey("Make sure render has no race", t, func() { - m := Classic() - m.Use(Renderer(RenderOptions{ - Directory: "fixtures/basic", - })) - m.Get("/foobar", func(r Render) { - r.HTML(200, "hello", "world") - }) - - done := make(chan bool) - doreq := func() { - resp := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/foobar", nil) - m.ServeHTTP(resp, req) - done <- true - } - // Run two requests to check there is no race condition - go doreq() - go doreq() - <-done - <-done - }) -} - -func Test_GetExt(t *testing.T) { - Convey("Get extension", t, func() { - So(GetExt("test"), ShouldBeBlank) - So(GetExt("test.tmpl"), ShouldEqual, ".tmpl") - So(GetExt("test.go.tmpl"), ShouldEqual, ".go.tmpl") - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer_test.go deleted file mode 100644 index 322396b62d0..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer_test.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "bufio" - "io" - "net" - "net/http" - "net/http/httptest" - "testing" - "time" - - . "github.com/smartystreets/goconvey/convey" -) - -type closeNotifyingRecorder struct { - *httptest.ResponseRecorder - closed chan bool -} - -func newCloseNotifyingRecorder() *closeNotifyingRecorder { - return &closeNotifyingRecorder{ - httptest.NewRecorder(), - make(chan bool, 1), - } -} - -func (c *closeNotifyingRecorder) close() { - c.closed <- true -} - -func (c *closeNotifyingRecorder) CloseNotify() <-chan bool { - return c.closed -} - -type hijackableResponse struct { - Hijacked bool -} - -func newHijackableResponse() *hijackableResponse { - return &hijackableResponse{} -} - -func (h *hijackableResponse) Header() http.Header { return nil } -func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil } -func (h *hijackableResponse) WriteHeader(code int) {} -func (h *hijackableResponse) Flush() {} -func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h.Hijacked = true - return nil, nil, nil -} - -func Test_ResponseWriter(t *testing.T) { - Convey("Write string to response writer", t, func() { - resp := httptest.NewRecorder() - rw := NewResponseWriter(resp) - rw.Write([]byte("Hello world")) - - So(resp.Code, ShouldEqual, rw.Status()) - So(resp.Body.String(), ShouldEqual, "Hello world") - So(rw.Status(), ShouldEqual, http.StatusOK) - So(rw.Size(), ShouldEqual, 11) - So(rw.Written(), ShouldBeTrue) - }) - - Convey("Write strings to response writer", t, func() { - resp := httptest.NewRecorder() - rw := NewResponseWriter(resp) - rw.Write([]byte("Hello world")) - rw.Write([]byte("foo bar bat baz")) - - So(resp.Code, ShouldEqual, rw.Status()) - So(resp.Body.String(), ShouldEqual, "Hello worldfoo bar bat baz") - So(rw.Status(), ShouldEqual, http.StatusOK) - So(rw.Size(), ShouldEqual, 26) - So(rw.Written(), ShouldBeTrue) - }) - - Convey("Write header to response writer", t, func() { - resp := httptest.NewRecorder() - rw := NewResponseWriter(resp) - rw.WriteHeader(http.StatusNotFound) - - So(resp.Code, ShouldEqual, rw.Status()) - So(resp.Body.String(), ShouldBeBlank) - So(rw.Status(), ShouldEqual, http.StatusNotFound) - So(rw.Size(), ShouldEqual, 0) - }) - - Convey("Write before response write", t, func() { - result := "" - resp := httptest.NewRecorder() - rw := NewResponseWriter(resp) - rw.Before(func(ResponseWriter) { - result += "foo" - }) - rw.Before(func(ResponseWriter) { - result += "bar" - }) - rw.WriteHeader(http.StatusNotFound) - - So(resp.Code, ShouldEqual, rw.Status()) - So(resp.Body.String(), ShouldBeBlank) - So(rw.Status(), ShouldEqual, http.StatusNotFound) - So(rw.Size(), ShouldEqual, 0) - So(result, ShouldEqual, "barfoo") - }) - - Convey("Response writer with Hijack", t, func() { - hijackable := newHijackableResponse() - rw := NewResponseWriter(hijackable) - hijacker, ok := rw.(http.Hijacker) - So(ok, ShouldBeTrue) - _, _, err := hijacker.Hijack() - So(err, ShouldBeNil) - So(hijackable.Hijacked, ShouldBeTrue) - }) - - Convey("Response writer with bad Hijack", t, func() { - hijackable := new(http.ResponseWriter) - rw := NewResponseWriter(*hijackable) - hijacker, ok := rw.(http.Hijacker) - So(ok, ShouldBeTrue) - _, _, err := hijacker.Hijack() - So(err, ShouldNotBeNil) - }) - - Convey("Response writer with close notify", t, func() { - resp := newCloseNotifyingRecorder() - rw := NewResponseWriter(resp) - closed := false - notifier := rw.(http.CloseNotifier).CloseNotify() - resp.close() - select { - case <-notifier: - closed = true - case <-time.After(time.Second): - } - So(closed, ShouldBeTrue) - }) - - Convey("Response writer with flusher", t, func() { - resp := httptest.NewRecorder() - rw := NewResponseWriter(resp) - _, ok := rw.(http.Flusher) - So(ok, ShouldBeTrue) - }) - - Convey("Response writer with flusher handler", t, func() { - m := Classic() - m.Get("/events", func(w http.ResponseWriter, r *http.Request) { - f, ok := w.(http.Flusher) - So(ok, ShouldBeTrue) - - w.Header().Set("Content-Type", "text/event-stream") - w.Header().Set("Cache-Control", "no-cache") - w.Header().Set("Connection", "keep-alive") - - for i := 0; i < 2; i++ { - time.Sleep(10 * time.Millisecond) - io.WriteString(w, "data: Hello\n\n") - f.Flush() - } - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/events", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Body.String(), ShouldEqual, "data: Hello\n\ndata: Hello\n\n") - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler_test.go deleted file mode 100644 index 02325b218b0..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "net/http" - "net/http/httptest" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Return_Handler(t *testing.T) { - Convey("Return with status and body", t, func() { - m := Classic() - m.Get("/", func() (int, string) { - return 418, "i'm a teapot" - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusTeapot) - So(resp.Body.String(), ShouldEqual, "i'm a teapot") - }) - - Convey("Return with pointer", t, func() { - m := Classic() - m.Get("/", func() *string { - str := "hello world" - return &str - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Body.String(), ShouldEqual, "hello world") - }) - - Convey("Return with byte slice", t, func() { - m := Classic() - m.Get("/", func() []byte { - return []byte("hello world") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Body.String(), ShouldEqual, "hello world") - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/router_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/router_test.go deleted file mode 100644 index f4044965bb3..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/router_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "net/http" - "net/http/httptest" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Router_Handle(t *testing.T) { - Convey("Register all HTTP methods routes", t, func() { - m := Classic() - m.Get("/get", func() string { - return "GET" - }) - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "GET") - - m.Patch("/patch", func() string { - return "PATCH" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("PATCH", "/patch", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "PATCH") - - m.Post("/post", func() string { - return "POST" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("POST", "/post", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "POST") - - m.Put("/put", func() string { - return "PUT" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("PUT", "/put", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "PUT") - - m.Delete("/delete", func() string { - return "DELETE" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("DELETE", "/delete", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "DELETE") - - m.Options("/options", func() string { - return "OPTIONS" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("OPTIONS", "/options", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "OPTIONS") - - m.Head("/head", func() string { - return "HEAD" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("HEAD", "/head", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "HEAD") - - m.Any("/any", func() string { - return "ANY" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/any", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "ANY") - - m.Route("/route", "GET,POST", func() string { - return "ROUTE" - }) - resp = httptest.NewRecorder() - req, err = http.NewRequest("POST", "/route", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "ROUTE") - }) - - Convey("Register all HTTP methods routes with combo", t, func() { - m := Classic() - m.SetURLPrefix("/prefix") - m.Use(Renderer()) - m.Combo("/", func(ctx *Context) { - ctx.Data["prefix"] = "Prefix_" - }). - Get(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "GET" }). - Patch(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "PATCH" }). - Post(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "POST" }). - Put(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "PUT" }). - Delete(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "DELETE" }). - Options(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "OPTIONS" }). - Head(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "HEAD" }) - - for name := range _HTTP_METHODS { - resp := httptest.NewRecorder() - req, err := http.NewRequest(name, "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Prefix_"+name) - } - - defer func() { - So(recover(), ShouldNotBeNil) - }() - m.Combo("/").Get(func() {}).Get(nil) - }) - - Convey("Register duplicated routes", t, func() { - r := NewRouter() - r.Get("/") - r.Get("/") - }) - - Convey("Register invalid HTTP method", t, func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - r := NewRouter() - r.Handle("404", "/", nil) - }) -} - -func Test_Router_Group(t *testing.T) { - Convey("Register route group", t, func() { - m := Classic() - m.Group("/api", func() { - m.Group("/v1", func() { - m.Get("/list", func() string { - return "Well done!" - }) - }) - }) - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/api/v1/list", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Well done!") - }) -} - -func Test_Router_NotFound(t *testing.T) { - Convey("Custom not found handler", t, func() { - m := Classic() - m.Get("/", func() {}) - m.NotFound(func() string { - return "Custom not found" - }) - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/404", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "Custom not found") - }) -} - -func Test_Router_splat(t *testing.T) { - Convey("Register router with glob", t, func() { - m := Classic() - m.Get("/*", func(ctx *Context) string { - return ctx.Params("*") - }) - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/hahaha", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Body.String(), ShouldEqual, "hahaha") - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/static_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/static_test.go deleted file mode 100644 index ecef35236c6..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/static_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2013 Martini Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - "bytes" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "path" - "strings" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -var currentRoot, _ = os.Getwd() - -func Test_Static(t *testing.T) { - Convey("Serve static files", t, func() { - m := New() - m.Use(Static("./")) - - resp := httptest.NewRecorder() - resp.Body = new(bytes.Buffer) - req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get("Expires"), ShouldBeBlank) - So(resp.Body.Len(), ShouldBeGreaterThan, 0) - - Convey("Change static path", func() { - m.Get("/", func(ctx *Context) { - ctx.ChangeStaticPath("./", "inject") - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - resp = httptest.NewRecorder() - resp.Body = new(bytes.Buffer) - req, err = http.NewRequest("GET", "http://localhost:4000/inject.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get("Expires"), ShouldBeBlank) - So(resp.Body.Len(), ShouldBeGreaterThan, 0) - }) - }) - - Convey("Serve static files with local path", t, func() { - Root = os.TempDir() - f, err := ioutil.TempFile(Root, "static_content") - So(err, ShouldBeNil) - f.WriteString("Expected Content") - f.Close() - - m := New() - m.Use(Static(".")) - - resp := httptest.NewRecorder() - resp.Body = new(bytes.Buffer) - req, err := http.NewRequest("GET", "http://localhost:4000/"+path.Base(strings.Replace(f.Name(), "\\", "/", -1)), nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Header().Get("Expires"), ShouldBeBlank) - So(resp.Body.String(), ShouldEqual, "Expected Content") - }) - - Convey("Serve static files with head", t, func() { - m := New() - m.Use(Static(currentRoot)) - - resp := httptest.NewRecorder() - resp.Body = new(bytes.Buffer) - req, err := http.NewRequest("HEAD", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusOK) - So(resp.Body.Len(), ShouldEqual, 0) - }) - - Convey("Serve static files as post", t, func() { - m := New() - m.Use(Static(currentRoot)) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("POST", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldEqual, http.StatusNotFound) - }) - - Convey("Serve static files with bad directory", t, func() { - m := Classic() - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - So(resp.Code, ShouldNotEqual, http.StatusOK) - }) -} - -func Test_Static_Options(t *testing.T) { - Convey("Serve static files with options logging", t, func() { - var buf bytes.Buffer - m := NewWithLogger(&buf) - opt := StaticOptions{} - m.Use(Static(currentRoot, opt)) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") - - // Not disable logging. - m.Handlers() - buf.Reset() - opt.SkipLogging = true - m.Use(Static(currentRoot, opt)) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(buf.Len(), ShouldEqual, 0) - }) - - Convey("Serve static files with options serve index", t, func() { - var buf bytes.Buffer - m := NewWithLogger(&buf) - opt := StaticOptions{IndexFile: "macaron.go"} - m.Use(Static(currentRoot, opt)) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") - }) - - Convey("Serve static files with options prefix", t, func() { - var buf bytes.Buffer - m := NewWithLogger(&buf) - opt := StaticOptions{Prefix: "public"} - m.Use(Static(currentRoot, opt)) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/public/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") - }) - - Convey("Serve static files with options expires", t, func() { - var buf bytes.Buffer - m := NewWithLogger(&buf) - opt := StaticOptions{Expires: func() string { return "46" }} - m.Use(Static(currentRoot, opt)) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Header().Get("Expires"), ShouldEqual, "46") - }) -} - -func Test_Static_Redirect(t *testing.T) { - Convey("Serve static files with redirect", t, func() { - m := New() - m.Use(Static(currentRoot, StaticOptions{Prefix: "/public"})) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/public", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusFound) - So(resp.Header().Get("Location"), ShouldEqual, "/public/") - }) -} - -func Test_Statics(t *testing.T) { - Convey("Serve multiple static routers", t, func() { - Convey("Register empty directory", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - - m := New() - m.Use(Statics(StaticOptions{})) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - - Convey("Serve normally", func() { - var buf bytes.Buffer - m := NewWithLogger(&buf) - m.Use(Statics(StaticOptions{}, currentRoot, currentRoot+"/inject")) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "http://localhost:4000/inject/inject.go", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - So(resp.Code, ShouldEqual, http.StatusOK) - So(buf.String(), ShouldEndWith, "[Macaron] [Static] Serving /inject/inject.go\n") - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/tree.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/tree.go deleted file mode 100644 index 7bde5add69d..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/tree.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright 2013 Beego Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -// NOTE: last sync 0c93364 on Dec 19, 2014. - -import ( - "path" - "regexp" - "strings" - - "github.com/Unknwon/com" -) - -type leafInfo struct { - // Names of wildcards that lead to this leaf. - // eg, ["id" "name"] for the wildcard ":id" and ":name". - wildcards []string - // Not nil if the leaf is regexp. - regexps *regexp.Regexp - handle Handle -} - -func (leaf *leafInfo) match(wildcardValues []string) (ok bool, params Params) { - if leaf.regexps == nil { - if len(wildcardValues) == 0 && len(leaf.wildcards) > 0 { - if com.IsSliceContainsStr(leaf.wildcards, ":") { - params = make(map[string]string) - j := 0 - for _, v := range leaf.wildcards { - if v == ":" { - continue - } - params[v] = "" - j += 1 - } - return true, params - } - return false, nil - } else if len(wildcardValues) == 0 { - return true, nil // Static path. - } - - // Match * - if len(leaf.wildcards) == 1 && leaf.wildcards[0] == ":splat" { - params = make(map[string]string) - params[":splat"] = path.Join(wildcardValues...) - return true, params - } - - // Match *.* - if len(leaf.wildcards) == 3 && leaf.wildcards[0] == "." { - params = make(map[string]string) - lastone := wildcardValues[len(wildcardValues)-1] - strs := strings.SplitN(lastone, ".", 2) - if len(strs) == 2 { - params[":ext"] = strs[1] - } else { - params[":ext"] = "" - } - params[":path"] = path.Join(wildcardValues[:len(wildcardValues)-1]...) + "/" + strs[0] - return true, params - } - - // Match :id - params = make(map[string]string) - j := 0 - for _, v := range leaf.wildcards { - if v == ":" { - continue - } - if v == "." { - lastone := wildcardValues[len(wildcardValues)-1] - strs := strings.SplitN(lastone, ".", 2) - if len(strs) == 2 { - params[":ext"] = strs[1] - } else { - params[":ext"] = "" - } - if len(wildcardValues[j:]) == 1 { - params[":path"] = strs[0] - } else { - params[":path"] = path.Join(wildcardValues[j:]...) + "/" + strs[0] - } - return true, params - } - if len(wildcardValues) <= j { - return false, nil - } - params[v] = wildcardValues[j] - j++ - } - if len(params) != len(wildcardValues) { - return false, nil - } - return true, params - } - - if !leaf.regexps.MatchString(path.Join(wildcardValues...)) { - return false, nil - } - params = make(map[string]string) - matches := leaf.regexps.FindStringSubmatch(path.Join(wildcardValues...)) - for i, match := range matches[1:] { - params[leaf.wildcards[i]] = match - } - return true, params -} - -// Tree represents a router tree for Macaron instance. -type Tree struct { - fixroutes map[string]*Tree - wildcard *Tree - leaves []*leafInfo -} - -// NewTree initializes and returns a router tree. -func NewTree() *Tree { - return &Tree{ - fixroutes: make(map[string]*Tree), - } -} - -// splitPath splites patthen into parts. -// -// Examples: -// "/" -> [] -// "/admin" -> ["admin"] -// "/admin/" -> ["admin"] -// "/admin/users" -> ["admin", "users"] -func splitPath(pattern string) []string { - if len(pattern) == 0 { - return []string{} - } - - elements := strings.Split(pattern, "/") - if elements[0] == "" { - elements = elements[1:] - } - if elements[len(elements)-1] == "" { - elements = elements[:len(elements)-1] - } - return elements -} - -// AddRouter adds a new route to router tree. -func (t *Tree) AddRouter(pattern string, handle Handle) { - t.addSegments(splitPath(pattern), handle, nil, "") -} - -// splitSegment splits segment into parts. -// -// Examples: -// "admin" -> false, nil, "" -// ":id" -> true, [:id], "" -// "?:id" -> true, [: :id], "" : meaning can empty -// ":id:int" -> true, [:id], ([0-9]+) -// ":name:string" -> true, [:name], ([\w]+) -// ":id([0-9]+)" -> true, [:id], ([0-9]+) -// ":id([0-9]+)_:name" -> true, [:id :name], ([0-9]+)_(.+) -// "cms_:id_:page.html" -> true, [:id :page], cms_(.+)_(.+).html -// "*" -> true, [:splat], "" -// "*.*" -> true,[. :path :ext], "" . meaning separator -func splitSegment(key string) (bool, []string, string) { - if strings.HasPrefix(key, "*") { - if key == "*.*" { - return true, []string{".", ":path", ":ext"}, "" - } else { - return true, []string{":splat"}, "" - } - } - if strings.ContainsAny(key, ":") { - var paramsNum int - var out []rune - var start bool - var startexp bool - var param []rune - var expt []rune - var skipnum int - params := []string{} - reg := regexp.MustCompile(`[a-zA-Z0-9]+`) - for i, v := range key { - if skipnum > 0 { - skipnum -= 1 - continue - } - if start { - //:id:int and :name:string - if v == ':' { - if len(key) >= i+4 { - if key[i+1:i+4] == "int" { - out = append(out, []rune("([0-9]+)")...) - params = append(params, ":"+string(param)) - start = false - startexp = false - skipnum = 3 - param = make([]rune, 0) - paramsNum += 1 - continue - } - } - if len(key) >= i+7 { - if key[i+1:i+7] == "string" { - out = append(out, []rune(`([\w]+)`)...) - params = append(params, ":"+string(param)) - paramsNum += 1 - start = false - startexp = false - skipnum = 6 - param = make([]rune, 0) - continue - } - } - } - // params only support a-zA-Z0-9 - if reg.MatchString(string(v)) { - param = append(param, v) - continue - } - if v != '(' { - out = append(out, []rune(`(.+)`)...) - params = append(params, ":"+string(param)) - param = make([]rune, 0) - paramsNum += 1 - start = false - startexp = false - } - } - if startexp { - if v != ')' { - expt = append(expt, v) - continue - } - } - if v == ':' { - param = make([]rune, 0) - start = true - } else if v == '(' { - startexp = true - start = false - params = append(params, ":"+string(param)) - paramsNum += 1 - expt = make([]rune, 0) - expt = append(expt, '(') - } else if v == ')' { - startexp = false - expt = append(expt, ')') - out = append(out, expt...) - param = make([]rune, 0) - } else if v == '?' { - params = append(params, ":") - } else { - out = append(out, v) - } - } - if len(param) > 0 { - if paramsNum > 0 { - out = append(out, []rune(`(.+)`)...) - } - params = append(params, ":"+string(param)) - } - return true, params, string(out) - } else { - return false, nil, "" - } -} - -// addSegments add segments to the router tree. -func (t *Tree) addSegments(segments []string, handle Handle, wildcards []string, reg string) { - // Fixed root route. - if len(segments) == 0 { - if reg != "" { - filterCards := make([]string, 0, len(wildcards)) - for _, v := range wildcards { - if v == ":" || v == "." { - continue - } - filterCards = append(filterCards, v) - } - t.leaves = append(t.leaves, &leafInfo{ - handle: handle, - wildcards: filterCards, - regexps: regexp.MustCompile("^" + reg + "$"), - }) - } else { - t.leaves = append(t.leaves, &leafInfo{ - handle: handle, - wildcards: wildcards, - }) - } - return - } - - seg := segments[0] - iswild, params, regexpStr := splitSegment(seg) - //for the router /login/*/access match /login/2009/11/access - if !iswild && com.IsSliceContainsStr(wildcards, ":splat") { - iswild = true - regexpStr = seg - } - if seg == "*" && len(wildcards) > 0 && reg == "" { - iswild = true - regexpStr = "(.+)" - } - if iswild { - if t.wildcard == nil { - t.wildcard = NewTree() - } - if regexpStr != "" { - if reg == "" { - rr := "" - for _, w := range wildcards { - if w == "." || w == ":" { - continue - } - if w == ":splat" { - rr = rr + "(.+)/" - } else { - rr = rr + "([^/]+)/" - } - } - regexpStr = rr + regexpStr - } else { - regexpStr = "/" + regexpStr - } - } else if reg != "" { - if seg == "*.*" { - regexpStr = "/([^.]+).(.+)" - } else { - for _, w := range params { - if w == "." || w == ":" { - continue - } - regexpStr = "/([^/]+)" + regexpStr - } - } - } - t.wildcard.addSegments(segments[1:], handle, append(wildcards, params...), reg+regexpStr) - } else { - subTree, ok := t.fixroutes[seg] - if !ok { - subTree = NewTree() - t.fixroutes[seg] = subTree - } - subTree.addSegments(segments[1:], handle, wildcards, reg) - } -} - -func (t *Tree) match(segments []string, wildcardValues []string) (handle Handle, params Params) { - // Handle leaf nodes. - if len(segments) == 0 { - for _, l := range t.leaves { - if ok, pa := l.match(wildcardValues); ok { - return l.handle, pa - } - } - if t.wildcard != nil { - for _, l := range t.wildcard.leaves { - if ok, pa := l.match(wildcardValues); ok { - return l.handle, pa - } - } - - } - return nil, nil - } - - seg, segs := segments[0], segments[1:] - - subTree, ok := t.fixroutes[seg] - if ok { - handle, params = subTree.match(segs, wildcardValues) - } else if len(segs) == 0 { //.json .xml - if subindex := strings.LastIndex(seg, "."); subindex != -1 { - subTree, ok = t.fixroutes[seg[:subindex]] - if ok { - handle, params = subTree.match(segs, wildcardValues) - if handle != nil { - if params == nil { - params = make(map[string]string) - } - params[":ext"] = seg[subindex+1:] - return handle, params - } - } - } - } - if handle == nil && t.wildcard != nil { - handle, params = t.wildcard.match(segs, append(wildcardValues, seg)) - } - if handle == nil { - for _, l := range t.leaves { - if ok, pa := l.match(append(wildcardValues, segments...)); ok { - return l.handle, pa - } - } - } - return handle, params -} - -// Match returns Handle and params if any route is matched. -func (t *Tree) Match(pattern string) (Handle, Params) { - if len(pattern) == 0 || pattern[0] != '/' { - return nil, nil - } - - return t.match(splitPath(pattern), nil) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/tree_test.go b/Godeps/_workspace/src/github.com/Unknwon/macaron/tree_test.go deleted file mode 100644 index c8144160e8c..00000000000 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/tree_test.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package macaron - -import ( - // "net/http" - "strings" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_splitSegment(t *testing.T) { - type result struct { - Ok bool - Parts []string - Regex string - } - cases := map[string]result{ - "admin": result{false, nil, ""}, - ":id": result{true, []string{":id"}, ""}, - "?:id": result{true, []string{":", ":id"}, ""}, - ":id:int": result{true, []string{":id"}, "([0-9]+)"}, - ":name:string": result{true, []string{":name"}, `([\w]+)`}, - ":id([0-9]+)": result{true, []string{":id"}, "([0-9]+)"}, - ":id([0-9]+)_:name": result{true, []string{":id", ":name"}, "([0-9]+)_(.+)"}, - "cms_:id_:page.html": result{true, []string{":id", ":page"}, "cms_(.+)_(.+).html"}, - "*": result{true, []string{":splat"}, ""}, - "*.*": result{true, []string{".", ":path", ":ext"}, ""}, - } - Convey("Splits segment into parts", t, func() { - for key, result := range cases { - ok, parts, regex := splitSegment(key) - So(ok, ShouldEqual, result.Ok) - if result.Parts == nil { - So(parts, ShouldBeNil) - } else { - So(parts, ShouldNotBeNil) - So(strings.Join(parts, " "), ShouldEqual, strings.Join(result.Parts, " ")) - } - So(regex, ShouldEqual, result.Regex) - } - }) -} - -func Test_Tree_Match(t *testing.T) { - type result struct { - pattern string - reqUrl string - params map[string]string - } - - cases := []result{ - {"/:id", "/123", map[string]string{":id": "123"}}, - {"/hello/?:id", "/hello", map[string]string{":id": ""}}, - {"/", "/", nil}, - {"", "", nil}, - {"/customer/login", "/customer/login", nil}, - {"/customer/login", "/customer/login.json", map[string]string{":ext": "json"}}, - {"/*", "/customer/123", map[string]string{":splat": "customer/123"}}, - {"/*", "/customer/2009/12/11", map[string]string{":splat": "customer/2009/12/11"}}, - {"/aa/*/bb", "/aa/2009/bb", map[string]string{":splat": "2009"}}, - {"/cc/*/dd", "/cc/2009/11/dd", map[string]string{":splat": "2009/11"}}, - {"/ee/:year/*/ff", "/ee/2009/11/ff", map[string]string{":year": "2009", ":splat": "11"}}, - {"/thumbnail/:size/uploads/*", "/thumbnail/100x100/uploads/items/2014/04/20/dPRCdChkUd651t1Hvs18.jpg", - map[string]string{":size": "100x100", ":splat": "items/2014/04/20/dPRCdChkUd651t1Hvs18.jpg"}}, - {"/*.*", "/nice/api.json", map[string]string{":path": "nice/api", ":ext": "json"}}, - {"/:name/*.*", "/nice/api.json", map[string]string{":name": "nice", ":path": "api", ":ext": "json"}}, - {"/:name/test/*.*", "/nice/test/api.json", map[string]string{":name": "nice", ":path": "api", ":ext": "json"}}, - {"/dl/:width:int/:height:int/*.*", "/dl/48/48/05ac66d9bda00a3acf948c43e306fc9a.jpg", - map[string]string{":width": "48", ":height": "48", ":ext": "jpg", ":path": "05ac66d9bda00a3acf948c43e306fc9a"}}, - {"/v1/shop/:id:int", "/v1/shop/123", map[string]string{":id": "123"}}, - {"/:year:int/:month:int/:id/:endid", "/1111/111/aaa/aaa", map[string]string{":year": "1111", ":month": "111", ":id": "aaa", ":endid": "aaa"}}, - {"/v1/shop/:id/:name", "/v1/shop/123/nike", map[string]string{":id": "123", ":name": "nike"}}, - {"/v1/shop/:id/account", "/v1/shop/123/account", map[string]string{":id": "123"}}, - {"/v1/shop/:name:string", "/v1/shop/nike", map[string]string{":name": "nike"}}, - {"/v1/shop/:id([0-9]+)", "/v1/shop//123", map[string]string{":id": "123"}}, - {"/v1/shop/:id([0-9]+)_:name", "/v1/shop/123_nike", map[string]string{":id": "123", ":name": "nike"}}, - {"/v1/shop/:id(.+)_cms.html", "/v1/shop/123_cms.html", map[string]string{":id": "123"}}, - {"/v1/shop/cms_:id(.+)_:page(.+).html", "/v1/shop/cms_123_1.html", map[string]string{":id": "123", ":page": "1"}}, - {"/v1/:v/cms/aaa_:id(.+)_:page(.+).html", "/v1/2/cms/aaa_123_1.html", map[string]string{":v": "2", ":id": "123", ":page": "1"}}, - {"/v1/:v/cms_:id(.+)_:page(.+).html", "/v1/2/cms_123_1.html", map[string]string{":v": "2", ":id": "123", ":page": "1"}}, - {"/v1/:v(.+)_cms/ttt_:id(.+)_:page(.+).html", "/v1/2_cms/ttt_123_1.html", map[string]string{":v": "2", ":id": "123", ":page": "1"}}, - } - - Convey("Match routers in tree", t, func() { - for _, c := range cases { - t := NewTree() - t.AddRouter(c.pattern, nil) - _, params := t.Match(c.reqUrl) - if params != nil { - for k, v := range c.params { - vv, ok := params[k] - So(ok, ShouldBeTrue) - So(vv, ShouldEqual, v) - } - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/go-macaron/binding/.travis.yml b/Godeps/_workspace/src/github.com/go-macaron/binding/.travis.yml new file mode 100644 index 00000000000..2774fb35d50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/binding/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/LICENSE b/Godeps/_workspace/src/github.com/go-macaron/binding/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/binding/LICENSE rename to Godeps/_workspace/src/github.com/go-macaron/binding/LICENSE diff --git a/Godeps/_workspace/src/github.com/go-macaron/binding/README.md b/Godeps/_workspace/src/github.com/go-macaron/binding/README.md new file mode 100644 index 00000000000..a6748b57a1d --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/binding/README.md @@ -0,0 +1,20 @@ +# binding [![Build Status](https://travis-ci.org/go-macaron/binding.svg?branch=master)](https://travis-ci.org/go-macaron/binding) [![](http://gocover.io/_badge/github.com/go-macaron/binding)](http://gocover.io/github.com/go-macaron/binding) + +Middleware binding provides request data binding and validation for [Macaron](https://github.com/go-macaron/macaron). + +### Installation + + go get github.com/go-macaron/binding + +## Getting Help + +- [API Reference](https://gowalker.org/github.com/go-macaron/binding) +- [Documentation](http://go-macaron.com/docs/middlewares/binding) + +## Credits + +This package is a modified version of [martini-contrib/binding](https://github.com/martini-contrib/binding). + +## License + +This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/binding.go b/Godeps/_workspace/src/github.com/go-macaron/binding/binding.go similarity index 70% rename from Godeps/_workspace/src/github.com/macaron-contrib/binding/binding.go rename to Godeps/_workspace/src/github.com/go-macaron/binding/binding.go index 651d6660156..4b958ec964d 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/binding.go +++ b/Godeps/_workspace/src/github.com/go-macaron/binding/binding.go @@ -1,5 +1,5 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -29,12 +29,10 @@ import ( "unicode/utf8" "github.com/Unknwon/com" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" ) -// NOTE: last sync 1928ed2 on Aug 26, 2014. - -const _VERSION = "0.0.4" +const _VERSION = "0.2.0" func Version() string { return _VERSION @@ -58,6 +56,7 @@ func bind(ctx *macaron.Context, obj interface{}, ifacePtr ...interface{}) { errors.Add([]string{}, ERR_CONTENT_TYPE, "Unsupported Content-Type") } ctx.Map(errors) + ctx.Map(obj) // Map a fake struct so handler won't panic. } } else { ctx.Invoke(Form(obj, ifacePtr...)) @@ -175,6 +174,14 @@ func MultipartForm(formStruct interface{}, ifacePtr ...interface{}) macaron.Hand if parseErr != nil { errors.Add([]string{}, ERR_DESERIALIZATION, parseErr.Error()) } + + if ctx.Req.Form == nil { + ctx.Req.ParseForm() + } + for k, v := range form.Value { + ctx.Req.Form[k] = append(ctx.Req.Form[k], v...) + } + ctx.Req.MultipartForm = form } } @@ -310,122 +317,162 @@ func validateStruct(errors Errors, obj interface{}) Errors { field.Type.Elem().Kind() == reflect.Struct) { errors = validateStruct(errors, fieldValue) } + errors = validateField(errors, zero, field, fieldVal, fieldValue) + } + return errors +} - VALIDATE_RULES: - for _, rule := range strings.Split(field.Tag.Get("binding"), ";") { - if len(rule) == 0 { - continue +func validateField(errors Errors, zero interface{}, field reflect.StructField, fieldVal reflect.Value, fieldValue interface{}) Errors { + if fieldVal.Kind() == reflect.Slice { + for i := 0; i < fieldVal.Len(); i++ { + sliceVal := fieldVal.Index(i) + if sliceVal.Kind() == reflect.Ptr { + sliceVal = sliceVal.Elem() } - switch { - case rule == "Required": - if reflect.DeepEqual(zero, fieldValue) { - errors.Add([]string{field.Name}, ERR_REQUIRED, "Required") + sliceValue := sliceVal.Interface() + zero := reflect.Zero(sliceVal.Type()).Interface() + if sliceVal.Kind() == reflect.Struct || + (sliceVal.Kind() == reflect.Ptr && !reflect.DeepEqual(zero, sliceValue) && + sliceVal.Elem().Kind() == reflect.Struct) { + errors = validateStruct(errors, sliceValue) + } + /* Apply validation rules to each item in a slice. ISSUE #3 + else { + errors = validateField(errors, zero, field, sliceVal, sliceValue) + }*/ + } + } + +VALIDATE_RULES: + for _, rule := range strings.Split(field.Tag.Get("binding"), ";") { + if len(rule) == 0 { + continue + } + + switch { + case rule == "OmitEmpty": + if reflect.DeepEqual(zero, fieldValue) { + break VALIDATE_RULES + } + case rule == "Required": + if reflect.DeepEqual(zero, fieldValue) { + errors.Add([]string{field.Name}, ERR_REQUIRED, "Required") + break VALIDATE_RULES + } + case rule == "AlphaDash": + if alphaDashPattern.MatchString(fmt.Sprintf("%v", fieldValue)) { + errors.Add([]string{field.Name}, ERR_ALPHA_DASH, "AlphaDash") + break VALIDATE_RULES + } + case rule == "AlphaDashDot": + if alphaDashDotPattern.MatchString(fmt.Sprintf("%v", fieldValue)) { + errors.Add([]string{field.Name}, ERR_ALPHA_DASH_DOT, "AlphaDashDot") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "Size("): + size, _ := strconv.Atoi(rule[5 : len(rule)-1]) + if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) != size { + errors.Add([]string{field.Name}, ERR_SIZE, "Size") + break VALIDATE_RULES + } + v := reflect.ValueOf(fieldValue) + if v.Kind() == reflect.Slice && v.Len() != size { + errors.Add([]string{field.Name}, ERR_SIZE, "Size") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "MinSize("): + min, _ := strconv.Atoi(rule[8 : len(rule)-1]) + if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) < min { + errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize") + break VALIDATE_RULES + } + v := reflect.ValueOf(fieldValue) + if v.Kind() == reflect.Slice && v.Len() < min { + errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "MaxSize("): + max, _ := strconv.Atoi(rule[8 : len(rule)-1]) + if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) > max { + errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize") + break VALIDATE_RULES + } + v := reflect.ValueOf(fieldValue) + if v.Kind() == reflect.Slice && v.Len() > max { + errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "Range("): + nums := strings.Split(rule[6:len(rule)-1], ",") + if len(nums) != 2 { + break VALIDATE_RULES + } + val := com.StrTo(fmt.Sprintf("%v", fieldValue)).MustInt() + if val < com.StrTo(nums[0]).MustInt() || val > com.StrTo(nums[1]).MustInt() { + errors.Add([]string{field.Name}, ERR_RANGE, "Range") + break VALIDATE_RULES + } + case rule == "Email": + if !emailPattern.MatchString(fmt.Sprintf("%v", fieldValue)) { + errors.Add([]string{field.Name}, ERR_EMAIL, "Email") + break VALIDATE_RULES + } + case rule == "Url": + str := fmt.Sprintf("%v", fieldValue) + if len(str) == 0 { + continue + } else if !urlPattern.MatchString(str) { + errors.Add([]string{field.Name}, ERR_URL, "Url") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "In("): + if !in(fieldValue, rule[3:len(rule)-1]) { + errors.Add([]string{field.Name}, ERR_IN, "In") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "NotIn("): + if in(fieldValue, rule[6:len(rule)-1]) { + errors.Add([]string{field.Name}, ERR_NOT_INT, "NotIn") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "Include("): + if !strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) { + errors.Add([]string{field.Name}, ERR_INCLUDE, "Include") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "Exclude("): + if strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) { + errors.Add([]string{field.Name}, ERR_EXCLUDE, "Exclude") + break VALIDATE_RULES + } + case strings.HasPrefix(rule, "Default("): + if reflect.DeepEqual(zero, fieldValue) { + if fieldVal.CanAddr() { + setWithProperType(field.Type.Kind(), rule[8:len(rule)-1], fieldVal, field.Tag.Get("form"), errors) + } else { + errors.Add([]string{field.Name}, ERR_EXCLUDE, "Default") break VALIDATE_RULES } - case rule == "AlphaDash": - if alphaDashPattern.MatchString(fmt.Sprintf("%v", fieldValue)) { - errors.Add([]string{field.Name}, ERR_ALPHA_DASH, "AlphaDash") + } + default: + // Apply custom validation rules. + for i := range ruleMapper { + if ruleMapper[i].IsMatch(rule) && !ruleMapper[i].IsValid(errors, field.Name, fieldValue) { break VALIDATE_RULES } - case rule == "AlphaDashDot": - if alphaDashDotPattern.MatchString(fmt.Sprintf("%v", fieldValue)) { - errors.Add([]string{field.Name}, ERR_ALPHA_DASH_DOT, "AlphaDashDot") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "MinSize("): - min, _ := strconv.Atoi(rule[8 : len(rule)-1]) - if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) < min { - errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize") - break VALIDATE_RULES - } - v := reflect.ValueOf(fieldValue) - if v.Kind() == reflect.Slice && v.Len() < min { - errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "MaxSize("): - max, _ := strconv.Atoi(rule[8 : len(rule)-1]) - if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) > max { - errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize") - break VALIDATE_RULES - } - v := reflect.ValueOf(fieldValue) - if v.Kind() == reflect.Slice && v.Len() > max { - errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "Range("): - nums := strings.Split(rule[6:len(rule)-1], ",") - if len(nums) != 2 { - break VALIDATE_RULES - } - val := com.StrTo(fmt.Sprintf("%v", fieldValue)).MustInt() - if val < com.StrTo(nums[0]).MustInt() || val > com.StrTo(nums[1]).MustInt() { - errors.Add([]string{field.Name}, ERR_RANGE, "Range") - break VALIDATE_RULES - } - case rule == "Email": - if !emailPattern.MatchString(fmt.Sprintf("%v", fieldValue)) { - errors.Add([]string{field.Name}, ERR_EMAIL, "Email") - break VALIDATE_RULES - } - case rule == "Url": - str := fmt.Sprintf("%v", fieldValue) - if len(str) == 0 { - continue - } else if !urlPattern.MatchString(str) { - errors.Add([]string{field.Name}, ERR_URL, "Url") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "In("): - if !in(fieldValue, rule[3:len(rule)-1]) { - errors.Add([]string{field.Name}, ERR_IN, "In") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "NotIn("): - if in(fieldValue, rule[6:len(rule)-1]) { - errors.Add([]string{field.Name}, ERR_NOT_INT, "NotIn") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "Include("): - if !strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) { - errors.Add([]string{field.Name}, ERR_INCLUDE, "Include") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "Exclude("): - if strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) { - errors.Add([]string{field.Name}, ERR_EXCLUDE, "Exclude") - break VALIDATE_RULES - } - case strings.HasPrefix(rule, "Default("): - if reflect.DeepEqual(zero, fieldValue) { - if fieldVal.CanAddr() { - setWithProperType(field.Type.Kind(), rule[8:len(rule)-1], fieldVal, field.Tag.Get("form"), errors) - } else { - errors.Add([]string{field.Name}, ERR_EXCLUDE, "Default") - break VALIDATE_RULES - } - } - default: - // Apply custom validation rules. - for i := range ruleMapper { - if ruleMapper[i].IsMatch(rule) && !ruleMapper[i].IsValid(errors, field.Name, fieldValue) { - break VALIDATE_RULES - } - } } } } return errors } -// NameMapper represents a form/json tag name mapper. +// NameMapper represents a form tag name mapper. type NameMapper func(string) string var ( nameMapper = func(field string) string { - newstr := make([]rune, 0, 10) + newstr := make([]rune, 0, len(field)) for i, chr := range field { if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { if i > 0 { @@ -468,42 +515,40 @@ func mapForm(formStruct reflect.Value, form map[string][]string, } inputFieldName := parseFormName(typeField.Name, typeField.Tag.Get("form")) - if len(inputFieldName) > 0 { - if !structField.CanSet() { - continue - } + if len(inputFieldName) == 0 || !structField.CanSet() { + continue + } - inputValue, exists := form[inputFieldName] - if exists { - numElems := len(inputValue) - if structField.Kind() == reflect.Slice && numElems > 0 { - sliceOf := structField.Type().Elem().Kind() - slice := reflect.MakeSlice(structField.Type(), numElems, numElems) - for i := 0; i < numElems; i++ { - setWithProperType(sliceOf, inputValue[i], slice.Index(i), inputFieldName, errors) - } - formStruct.Field(i).Set(slice) - } else { - setWithProperType(typeField.Type.Kind(), inputValue[0], structField, inputFieldName, errors) - } - continue - } - - inputFile, exists := formfile[inputFieldName] - if !exists { - continue - } - fhType := reflect.TypeOf((*multipart.FileHeader)(nil)) - numElems := len(inputFile) - if structField.Kind() == reflect.Slice && numElems > 0 && structField.Type().Elem() == fhType { + inputValue, exists := form[inputFieldName] + if exists { + numElems := len(inputValue) + if structField.Kind() == reflect.Slice && numElems > 0 { + sliceOf := structField.Type().Elem().Kind() slice := reflect.MakeSlice(structField.Type(), numElems, numElems) for i := 0; i < numElems; i++ { - slice.Index(i).Set(reflect.ValueOf(inputFile[i])) + setWithProperType(sliceOf, inputValue[i], slice.Index(i), inputFieldName, errors) } - structField.Set(slice) - } else if structField.Type() == fhType { - structField.Set(reflect.ValueOf(inputFile[0])) + formStruct.Field(i).Set(slice) + } else { + setWithProperType(typeField.Type.Kind(), inputValue[0], structField, inputFieldName, errors) } + continue + } + + inputFile, exists := formfile[inputFieldName] + if !exists { + continue + } + fhType := reflect.TypeOf((*multipart.FileHeader)(nil)) + numElems := len(inputFile) + if structField.Kind() == reflect.Slice && numElems > 0 && structField.Type().Elem() == fhType { + slice := reflect.MakeSlice(structField.Type(), numElems, numElems) + for i := 0; i < numElems; i++ { + slice.Index(i).Set(reflect.ValueOf(inputFile[i])) + } + structField.Set(slice) + } else if structField.Type() == fhType { + structField.Set(reflect.ValueOf(inputFile[0])) } } } diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/errors.go b/Godeps/_workspace/src/github.com/go-macaron/binding/errors.go similarity index 97% rename from Godeps/_workspace/src/github.com/macaron-contrib/binding/errors.go rename to Godeps/_workspace/src/github.com/go-macaron/binding/errors.go index 9a9c006b866..8cbe44a9d17 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/errors.go +++ b/Godeps/_workspace/src/github.com/go-macaron/binding/errors.go @@ -1,5 +1,5 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -27,6 +27,7 @@ const ( ERR_REQUIRED = "RequiredError" ERR_ALPHA_DASH = "AlphaDashError" ERR_ALPHA_DASH_DOT = "AlphaDashDotError" + ERR_SIZE = "SizeError" ERR_MIN_SIZE = "MinSizeError" ERR_MAX_SIZE = "MaxSizeError" ERR_RANGE = "RangeError" diff --git a/Godeps/_workspace/src/github.com/go-macaron/gzip/.travis.yml b/Godeps/_workspace/src/github.com/go-macaron/gzip/.travis.yml new file mode 100644 index 00000000000..2774fb35d50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/gzip/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/LICENSE b/Godeps/_workspace/src/github.com/go-macaron/gzip/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/LICENSE rename to Godeps/_workspace/src/github.com/go-macaron/gzip/LICENSE diff --git a/Godeps/_workspace/src/github.com/go-macaron/gzip/README.md b/Godeps/_workspace/src/github.com/go-macaron/gzip/README.md new file mode 100644 index 00000000000..0c438a72027 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/gzip/README.md @@ -0,0 +1,20 @@ +# gzip [![Build Status](https://travis-ci.org/go-macaron/gzip.svg?branch=master)](https://travis-ci.org/go-macaron/gzip) [![](http://gocover.io/_badge/github.com/go-macaron/gzip)](http://gocover.io/github.com/go-macaron/gzip) + +Middleware gzip provides compress to responses for [Macaron](https://github.com/go-macaron/macaron). + +### Installation + + go get github.com/go-macaron/gzip + +## Getting Help + +- [API Reference](https://gowalker.org/github.com/go-macaron/gzip) +- [Documentation](http://go-macaron.com/docs/middlewares/gzip) + +## Credits + +This package is a modified version of [martini-contrib/gzip](https://github.com/martini-contrib/gzip). + +## License + +This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/go-macaron/gzip/gzip.go b/Godeps/_workspace/src/github.com/go-macaron/gzip/gzip.go new file mode 100644 index 00000000000..ca54ec67d32 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/gzip/gzip.go @@ -0,0 +1,118 @@ +// Copyright 2013 Martini Authors +// Copyright 2015 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package gzip + +import ( + "bufio" + "fmt" + "net" + "net/http" + "strings" + + "github.com/klauspost/compress/gzip" + "gopkg.in/macaron.v1" +) + +const ( + _HEADER_ACCEPT_ENCODING = "Accept-Encoding" + _HEADER_CONTENT_ENCODING = "Content-Encoding" + _HEADER_CONTENT_LENGTH = "Content-Length" + _HEADER_CONTENT_TYPE = "Content-Type" + _HEADER_VARY = "Vary" +) + +// Options represents a struct for specifying configuration options for the GZip middleware. +type Options struct { + // Compression level. Can be DefaultCompression(-1), ConstantCompression(-2) + // or any integer value between BestSpeed(1) and BestCompression(9) inclusive. + CompressionLevel int +} + +func isCompressionLevelValid(level int) bool { + return level == gzip.DefaultCompression || + level == gzip.ConstantCompression || + (level >= gzip.BestSpeed && level <= gzip.BestCompression) +} + +func prepareOptions(options []Options) Options { + var opt Options + if len(options) > 0 { + opt = options[0] + } + + if !isCompressionLevelValid(opt.CompressionLevel) { + // For web content, level 4 seems to be a sweet spot. + opt.CompressionLevel = 4 + } + return opt +} + +// Gziper returns a Handler that adds gzip compression to all requests. +// Make sure to include the Gzip middleware above other middleware +// that alter the response body (like the render middleware). +func Gziper(options ...Options) macaron.Handler { + opt := prepareOptions(options) + + return func(ctx *macaron.Context) { + if !strings.Contains(ctx.Req.Header.Get(_HEADER_ACCEPT_ENCODING), "gzip") { + return + } + + headers := ctx.Resp.Header() + headers.Set(_HEADER_CONTENT_ENCODING, "gzip") + headers.Set(_HEADER_VARY, _HEADER_ACCEPT_ENCODING) + + // We've made sure compression level is valid in prepareGzipOptions, + // no need to check same error again. + gz, err := gzip.NewWriterLevel(ctx.Resp, opt.CompressionLevel) + if err != nil { + panic(err.Error()) + } + defer gz.Close() + + gzw := gzipResponseWriter{gz, ctx.Resp} + ctx.Resp = gzw + ctx.MapTo(gzw, (*http.ResponseWriter)(nil)) + if ctx.Render != nil { + ctx.Render.SetResponseWriter(gzw) + } + + ctx.Next() + + // delete content length after we know we have been written to + gzw.Header().Del("Content-Length") + } +} + +type gzipResponseWriter struct { + w *gzip.Writer + macaron.ResponseWriter +} + +func (grw gzipResponseWriter) Write(p []byte) (int, error) { + if len(grw.Header().Get(_HEADER_CONTENT_TYPE)) == 0 { + grw.Header().Set(_HEADER_CONTENT_TYPE, http.DetectContentType(p)) + } + return grw.w.Write(p) +} + +func (grw gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hijacker, ok := grw.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") + } + return hijacker.Hijack() +} diff --git a/Godeps/_workspace/src/github.com/go-macaron/inject/.travis.yml b/Godeps/_workspace/src/github.com/go-macaron/inject/.travis.yml new file mode 100644 index 00000000000..2774fb35d50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/inject/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/Godeps/_workspace/src/github.com/go-macaron/inject/LICENSE b/Godeps/_workspace/src/github.com/go-macaron/inject/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/inject/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/go-macaron/inject/README.md b/Godeps/_workspace/src/github.com/go-macaron/inject/README.md new file mode 100644 index 00000000000..c65c76955dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/inject/README.md @@ -0,0 +1,11 @@ +# inject [![Build Status](https://travis-ci.org/go-macaron/inject.svg?branch=master)](https://travis-ci.org/go-macaron/inject) [![](http://gocover.io/_badge/github.com/go-macaron/inject)](http://gocover.io/github.com/go-macaron/inject) + +Package inject provides utilities for mapping and injecting dependencies in various ways. + +**This a modified version of [codegangsta/inject](https://github.com/codegangsta/inject) for special purpose of Macaron** + +**Please use the original version if you need dependency injection feature** + +## License + +This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.go b/Godeps/_workspace/src/github.com/go-macaron/inject/inject.go similarity index 89% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.go rename to Godeps/_workspace/src/github.com/go-macaron/inject/inject.go index 87cbe22b5e8..b4a6ea45fe7 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.go +++ b/Godeps/_workspace/src/github.com/go-macaron/inject/inject.go @@ -1,3 +1,18 @@ +// Copyright 2013 Jeremy Saenz +// Copyright 2015 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + // Package inject provides utilities for mapping and injecting dependencies in various ways. package inject diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore b/Godeps/_workspace/src/github.com/go-macaron/session/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore rename to Godeps/_workspace/src/github.com/go-macaron/session/.gitignore diff --git a/Godeps/_workspace/src/github.com/go-macaron/session/.travis.yml b/Godeps/_workspace/src/github.com/go-macaron/session/.travis.yml new file mode 100644 index 00000000000..2774fb35d50 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/session/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/LICENSE b/Godeps/_workspace/src/github.com/go-macaron/session/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/LICENSE rename to Godeps/_workspace/src/github.com/go-macaron/session/LICENSE diff --git a/Godeps/_workspace/src/github.com/go-macaron/session/README.md b/Godeps/_workspace/src/github.com/go-macaron/session/README.md new file mode 100644 index 00000000000..280ace36dc7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-macaron/session/README.md @@ -0,0 +1,20 @@ +# session [![Build Status](https://travis-ci.org/go-macaron/session.svg?branch=master)](https://travis-ci.org/go-macaron/session) [![](http://gocover.io/_badge/github.com/go-macaron/session)](http://gocover.io/github.com/go-macaron/session) + +Middleware session provides session management for [Macaron](https://github.com/go-macaron/macaron). It can use many session providers, including memory, file, Redis, Memcache, PostgreSQL, MySQL, Couchbase, Ledis and Nodb. + +### Installation + + go get github.com/go-macaron/session + +## Getting Help + +- [API Reference](https://gowalker.org/github.com/go-macaron/session) +- [Documentation](http://go-macaron.com/docs/middlewares/session) + +## Credits + +This package is a modified version of [beego/session](https://github.com/astaxie/beego/tree/master/session). + +## License + +This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/couchbase/couchbase.go b/Godeps/_workspace/src/github.com/go-macaron/session/couchbase/couchbase.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/couchbase/couchbase.go rename to Godeps/_workspace/src/github.com/go-macaron/session/couchbase/couchbase.go index 93953c69724..1a77a42b5f3 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/couchbase/couchbase.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/couchbase/couchbase.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -21,7 +21,7 @@ import ( "github.com/couchbaselabs/go-couchbase" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // CouchbaseSessionStore represents a couchbase session store implementation. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/file.go b/Godeps/_workspace/src/github.com/go-macaron/session/file.go similarity index 94% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/file.go rename to Godeps/_workspace/src/github.com/go-macaron/session/file.go index cab807d00bd..438269ea845 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/file.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/file.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -78,6 +78,9 @@ func (s *FileStore) ID() string { // Release releases resource and save data to provider. func (s *FileStore) Release() error { + s.p.lock.Lock() + defer s.p.lock.Unlock() + data, err := EncodeGob(s.data) if err != nil { return err @@ -97,14 +100,17 @@ func (s *FileStore) Flush() error { // FileProvider represents a file session provider implementation. type FileProvider struct { + lock sync.RWMutex maxlifetime int64 rootPath string } // Init initializes file session provider with given root path. func (p *FileProvider) Init(maxlifetime int64, rootPath string) error { + p.lock.Lock() p.maxlifetime = maxlifetime p.rootPath = rootPath + p.lock.Unlock() return nil } @@ -118,6 +124,8 @@ func (p *FileProvider) Read(sid string) (_ RawStore, err error) { if err = os.MkdirAll(path.Dir(filename), os.ModePerm); err != nil { return nil, err } + p.lock.RLock() + defer p.lock.RUnlock() var f *os.File if com.IsFile(filename) { @@ -152,15 +160,22 @@ func (p *FileProvider) Read(sid string) (_ RawStore, err error) { // Exist returns true if session with given ID exists. func (p *FileProvider) Exist(sid string) bool { + p.lock.RLock() + defer p.lock.RUnlock() return com.IsFile(p.filepath(sid)) } // Destory deletes a session by session ID. func (p *FileProvider) Destory(sid string) error { + p.lock.Lock() + defer p.lock.Unlock() return os.Remove(p.filepath(sid)) } func (p *FileProvider) regenerate(oldsid, sid string) (err error) { + p.lock.Lock() + defer p.lock.Unlock() + filename := p.filepath(sid) if com.IsExist(filename) { return fmt.Errorf("new sid '%s' already exists", sid) @@ -219,6 +234,9 @@ func (p *FileProvider) Count() int { // GC calls GC to clean expired sessions. func (p *FileProvider) GC() { + p.lock.RLock() + defer p.lock.RUnlock() + if !com.IsExist(p.rootPath) { return } diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go b/Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go rename to Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.go index afde7134cbd..73e27f5f22a 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -25,7 +25,7 @@ import ( "github.com/siddontang/ledisdb/ledis" "gopkg.in/ini.v1" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // LedisStore represents a ledis session store implementation. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey b/Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.goconvey similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey rename to Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.goconvey diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go b/Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go rename to Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.go index b4fcdde62bd..f4a9e81d257 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -22,7 +22,7 @@ import ( "github.com/bradfitz/gomemcache/memcache" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // MemcacheStore represents a memcache session store implementation. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey b/Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.goconvey similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey rename to Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.goconvey diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go b/Godeps/_workspace/src/github.com/go-macaron/session/memory.go similarity index 93% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go rename to Godeps/_workspace/src/github.com/go-macaron/session/memory.go index e717635b951..4ad929352e9 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/memory.go @@ -1,212 +1,217 @@ -// Copyright 2013 Beego Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "container/list" - "fmt" - "sync" - "time" -) - -// MemStore represents a in-memory session store implementation. -type MemStore struct { - sid string - lock sync.RWMutex - data map[interface{}]interface{} - lastAccess time.Time -} - -// NewMemStore creates and returns a memory session store. -func NewMemStore(sid string) *MemStore { - return &MemStore{ - sid: sid, - data: make(map[interface{}]interface{}), - lastAccess: time.Now(), - } -} - -// Set sets value to given key in session. -func (s *MemStore) Set(key, val interface{}) error { - s.lock.Lock() - defer s.lock.Unlock() - - s.data[key] = val - return nil -} - -// Get gets value by given key in session. -func (s *MemStore) Get(key interface{}) interface{} { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.data[key] -} - -// Delete deletes a key from session. -func (s *MemStore) Delete(key interface{}) error { - s.lock.Lock() - defer s.lock.Unlock() - - delete(s.data, key) - return nil -} - -// ID returns current session ID. -func (s *MemStore) ID() string { - return s.sid -} - -// Release releases resource and save data to provider. -func (_ *MemStore) Release() error { - return nil -} - -// Flush deletes all session data. -func (s *MemStore) Flush() error { - s.lock.Lock() - defer s.lock.Unlock() - - s.data = make(map[interface{}]interface{}) - return nil -} - -// MemProvider represents a in-memory session provider implementation. -type MemProvider struct { - lock sync.RWMutex - maxLifetime int64 - data map[string]*list.Element - // A priority list whose lastAccess newer gets higer priority. - list *list.List -} - -// Init initializes memory session provider. -func (p *MemProvider) Init(maxLifetime int64, _ string) error { - p.maxLifetime = maxLifetime - return nil -} - -// update expands time of session store by given ID. -func (p *MemProvider) update(sid string) error { - p.lock.Lock() - defer p.lock.Unlock() - - if e, ok := p.data[sid]; ok { - e.Value.(*MemStore).lastAccess = time.Now() - p.list.MoveToFront(e) - return nil - } - return nil -} - -// Read returns raw session store by session ID. -func (p *MemProvider) Read(sid string) (_ RawStore, err error) { - p.lock.RLock() - e, ok := p.data[sid] - p.lock.RUnlock() - - if ok { - if err = p.update(sid); err != nil { - return nil, err - } - return e.Value.(*MemStore), nil - } - - // Create a new session. - p.lock.Lock() - defer p.lock.Unlock() - - s := NewMemStore(sid) - p.data[sid] = p.list.PushBack(s) - return s, nil -} - -// Exist returns true if session with given ID exists. -func (p *MemProvider) Exist(sid string) bool { - p.lock.RLock() - defer p.lock.RUnlock() - - _, ok := p.data[sid] - return ok -} - -// Destory deletes a session by session ID. -func (p *MemProvider) Destory(sid string) error { - p.lock.Lock() - defer p.lock.Unlock() - - e, ok := p.data[sid] - if !ok { - return nil - } - - p.list.Remove(e) - delete(p.data, sid) - return nil -} - -// Regenerate regenerates a session store from old session ID to new one. -func (p *MemProvider) Regenerate(oldsid, sid string) (RawStore, error) { - if p.Exist(sid) { - return nil, fmt.Errorf("new sid '%s' already exists", sid) - } - - s, err := p.Read(oldsid) - if err != nil { - return nil, err - } - - if err = p.Destory(oldsid); err != nil { - return nil, err - } - - s.(*MemStore).sid = sid - p.data[sid] = p.list.PushBack(s) - return s, nil -} - -// Count counts and returns number of sessions. -func (p *MemProvider) Count() int { - return p.list.Len() -} - -// GC calls GC to clean expired sessions. -func (p *MemProvider) GC() { - p.lock.RLock() - for { - // No session in the list. - e := p.list.Back() - if e == nil { - break - } - - if (e.Value.(*MemStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() { - p.lock.RUnlock() - p.lock.Lock() - p.list.Remove(e) - delete(p.data, e.Value.(*MemStore).sid) - p.lock.Unlock() - p.lock.RLock() - } else { - break - } - } - p.lock.RUnlock() -} - -func init() { - Register("memory", &MemProvider{list: list.New(), data: make(map[string]*list.Element)}) -} +// Copyright 2013 Beego Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package session + +import ( + "container/list" + "fmt" + "sync" + "time" +) + +// MemStore represents a in-memory session store implementation. +type MemStore struct { + sid string + lock sync.RWMutex + data map[interface{}]interface{} + lastAccess time.Time +} + +// NewMemStore creates and returns a memory session store. +func NewMemStore(sid string) *MemStore { + return &MemStore{ + sid: sid, + data: make(map[interface{}]interface{}), + lastAccess: time.Now(), + } +} + +// Set sets value to given key in session. +func (s *MemStore) Set(key, val interface{}) error { + s.lock.Lock() + defer s.lock.Unlock() + + s.data[key] = val + return nil +} + +// Get gets value by given key in session. +func (s *MemStore) Get(key interface{}) interface{} { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.data[key] +} + +// Delete deletes a key from session. +func (s *MemStore) Delete(key interface{}) error { + s.lock.Lock() + defer s.lock.Unlock() + + delete(s.data, key) + return nil +} + +// ID returns current session ID. +func (s *MemStore) ID() string { + return s.sid +} + +// Release releases resource and save data to provider. +func (_ *MemStore) Release() error { + return nil +} + +// Flush deletes all session data. +func (s *MemStore) Flush() error { + s.lock.Lock() + defer s.lock.Unlock() + + s.data = make(map[interface{}]interface{}) + return nil +} + +// MemProvider represents a in-memory session provider implementation. +type MemProvider struct { + lock sync.RWMutex + maxLifetime int64 + data map[string]*list.Element + // A priority list whose lastAccess newer gets higer priority. + list *list.List +} + +// Init initializes memory session provider. +func (p *MemProvider) Init(maxLifetime int64, _ string) error { + p.lock.Lock() + p.maxLifetime = maxLifetime + p.lock.Unlock() + return nil +} + +// update expands time of session store by given ID. +func (p *MemProvider) update(sid string) error { + p.lock.Lock() + defer p.lock.Unlock() + + if e, ok := p.data[sid]; ok { + e.Value.(*MemStore).lastAccess = time.Now() + p.list.MoveToFront(e) + return nil + } + return nil +} + +// Read returns raw session store by session ID. +func (p *MemProvider) Read(sid string) (_ RawStore, err error) { + p.lock.RLock() + e, ok := p.data[sid] + p.lock.RUnlock() + + if ok { + if err = p.update(sid); err != nil { + return nil, err + } + return e.Value.(*MemStore), nil + } + + // Create a new session. + p.lock.Lock() + defer p.lock.Unlock() + + s := NewMemStore(sid) + p.data[sid] = p.list.PushBack(s) + return s, nil +} + +// Exist returns true if session with given ID exists. +func (p *MemProvider) Exist(sid string) bool { + p.lock.RLock() + defer p.lock.RUnlock() + + _, ok := p.data[sid] + return ok +} + +// Destory deletes a session by session ID. +func (p *MemProvider) Destory(sid string) error { + p.lock.Lock() + defer p.lock.Unlock() + + e, ok := p.data[sid] + if !ok { + return nil + } + + p.list.Remove(e) + delete(p.data, sid) + return nil +} + +// Regenerate regenerates a session store from old session ID to new one. +func (p *MemProvider) Regenerate(oldsid, sid string) (RawStore, error) { + if p.Exist(sid) { + return nil, fmt.Errorf("new sid '%s' already exists", sid) + } + + s, err := p.Read(oldsid) + if err != nil { + return nil, err + } + + if err = p.Destory(oldsid); err != nil { + return nil, err + } + + s.(*MemStore).sid = sid + + p.lock.Lock() + defer p.lock.Unlock() + p.data[sid] = p.list.PushBack(s) + return s, nil +} + +// Count counts and returns number of sessions. +func (p *MemProvider) Count() int { + return p.list.Len() +} + +// GC calls GC to clean expired sessions. +func (p *MemProvider) GC() { + p.lock.RLock() + for { + // No session in the list. + e := p.list.Back() + if e == nil { + break + } + + if (e.Value.(*MemStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() { + p.lock.RUnlock() + p.lock.Lock() + p.list.Remove(e) + delete(p.data, e.Value.(*MemStore).sid) + p.lock.Unlock() + p.lock.RLock() + } else { + break + } + } + p.lock.RUnlock() +} + +func init() { + Register("memory", &MemProvider{list: list.New(), data: make(map[string]*list.Element)}) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go b/Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go rename to Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.go index 7997e03c0d8..0d96fb8ec18 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -24,7 +24,7 @@ import ( _ "github.com/go-sql-driver/mysql" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // MysqlStore represents a mysql session store implementation. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey b/Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.goconvey similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey rename to Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.goconvey diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go b/Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go rename to Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.go index 7f017bf0457..c02f323f322 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.go @@ -1,4 +1,4 @@ -// Copyright 2015 Unknwon +// Copyright 2015 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -21,7 +21,7 @@ import ( "github.com/lunny/nodb" "github.com/lunny/nodb/config" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // NodbStore represents a nodb session store implementation. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey b/Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.goconvey similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey rename to Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.goconvey diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go b/Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go rename to Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.go index 5cb4c82ea84..8749f575f8d 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -24,7 +24,7 @@ import ( _ "github.com/lib/pq" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // PostgresStore represents a postgres session store implementation. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey b/Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.goconvey similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey rename to Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.goconvey diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go b/Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.go similarity index 81% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go rename to Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.go index 6d6a2c464c8..ca1cf88de61 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -25,22 +25,23 @@ import ( "gopkg.in/ini.v1" "gopkg.in/redis.v2" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" ) // RedisStore represents a redis session store implementation. type RedisStore struct { - c *redis.Client - sid string - duration time.Duration - lock sync.RWMutex - data map[interface{}]interface{} + c *redis.Client + prefix, sid string + duration time.Duration + lock sync.RWMutex + data map[interface{}]interface{} } // NewRedisStore creates and returns a redis session store. -func NewRedisStore(c *redis.Client, sid string, dur time.Duration, kv map[interface{}]interface{}) *RedisStore { +func NewRedisStore(c *redis.Client, prefix, sid string, dur time.Duration, kv map[interface{}]interface{}) *RedisStore { return &RedisStore{ c: c, + prefix: prefix, sid: sid, duration: dur, data: kv, @@ -85,7 +86,7 @@ func (s *RedisStore) Release() error { return err } - return s.c.SetEx(s.sid, s.duration, string(data)).Err() + return s.c.SetEx(s.prefix+s.sid, s.duration, string(data)).Err() } // Flush deletes all session data. @@ -101,10 +102,11 @@ func (s *RedisStore) Flush() error { type RedisProvider struct { c *redis.Client duration time.Duration + prefix string } // Init initializes redis session provider. -// configs: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180 +// configs: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180,prefix=session; func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) { p.duration, err = time.ParseDuration(fmt.Sprintf("%ds", maxlifetime)) if err != nil { @@ -136,6 +138,8 @@ func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) { if err != nil { return fmt.Errorf("error parsing idle timeout: %v", err) } + case "prefix": + p.prefix = v default: return fmt.Errorf("session/redis: unsupported option '%s'", k) } @@ -147,14 +151,15 @@ func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) { // Read returns raw session store by session ID. func (p *RedisProvider) Read(sid string) (session.RawStore, error) { + psid := p.prefix + sid if !p.Exist(sid) { - if err := p.c.Set(sid, "").Err(); err != nil { + if err := p.c.Set(psid, "").Err(); err != nil { return nil, err } } var kv map[interface{}]interface{} - kvs, err := p.c.Get(sid).Result() + kvs, err := p.c.Get(psid).Result() if err != nil { return nil, err } @@ -167,37 +172,40 @@ func (p *RedisProvider) Read(sid string) (session.RawStore, error) { } } - return NewRedisStore(p.c, sid, p.duration, kv), nil + return NewRedisStore(p.c, p.prefix, sid, p.duration, kv), nil } // Exist returns true if session with given ID exists. func (p *RedisProvider) Exist(sid string) bool { - has, err := p.c.Exists(sid).Result() + has, err := p.c.Exists(p.prefix + sid).Result() return err == nil && has } // Destory deletes a session by session ID. func (p *RedisProvider) Destory(sid string) error { - return p.c.Del(sid).Err() + return p.c.Del(p.prefix + sid).Err() } // Regenerate regenerates a session store from old session ID to new one. func (p *RedisProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) { + poldsid := p.prefix + oldsid + psid := p.prefix + sid + if p.Exist(sid) { return nil, fmt.Errorf("new sid '%s' already exists", sid) } else if !p.Exist(oldsid) { // Make a fake old session. - if err = p.c.SetEx(oldsid, p.duration, "").Err(); err != nil { + if err = p.c.SetEx(poldsid, p.duration, "").Err(); err != nil { return nil, err } } - if err = p.c.Rename(oldsid, sid).Err(); err != nil { + if err = p.c.Rename(poldsid, psid).Err(); err != nil { return nil, err } var kv map[interface{}]interface{} - kvs, err := p.c.Get(sid).Result() + kvs, err := p.c.Get(psid).Result() if err != nil { return nil, err } @@ -211,7 +219,7 @@ func (p *RedisProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err } } - return NewRedisStore(p.c, sid, p.duration, kv), nil + return NewRedisStore(p.c, p.prefix, sid, p.duration, kv), nil } // Count counts and returns number of sessions. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey b/Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.goconvey similarity index 100% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey rename to Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.goconvey diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/session.go b/Godeps/_workspace/src/github.com/go-macaron/session/session.go similarity index 98% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/session.go rename to Godeps/_workspace/src/github.com/go-macaron/session/session.go index 9cc1d528749..7e7b833c552 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/session.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/session.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -16,8 +16,6 @@ // Package session a middleware that provides the session management of Macaron. package session -// NOTE: last sync 000033e on Nov 4, 2014. - import ( "encoding/hex" "fmt" @@ -25,10 +23,10 @@ import ( "net/url" "time" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" ) -const _VERSION = "0.1.6" +const _VERSION = "0.3.0" func Version() string { return _VERSION diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go b/Godeps/_workspace/src/github.com/go-macaron/session/utils.go similarity index 80% rename from Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go rename to Godeps/_workspace/src/github.com/go-macaron/session/utils.go index 6c9ea495fe5..07a1283df94 100644 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go +++ b/Godeps/_workspace/src/github.com/go-macaron/session/utils.go @@ -1,5 +1,5 @@ // Copyright 2013 Beego Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -24,6 +24,17 @@ import ( "github.com/Unknwon/com" ) +func init() { + gob.Register([]interface{}{}) + gob.Register(map[int]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register(map[interface{}]interface{}{}) + gob.Register(map[string]string{}) + gob.Register(map[int]string{}) + gob.Register(map[int]int{}) + gob.Register(map[int]int64{}) +} + func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) { for _, v := range obj { gob.Register(v) diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/LICENSE b/Godeps/_workspace/src/github.com/jtolds/gls/LICENSE deleted file mode 100644 index 9b4a822d92c..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013, Space Monkey, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/README.md b/Godeps/_workspace/src/github.com/jtolds/gls/README.md deleted file mode 100644 index 1272b65a534..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/README.md +++ /dev/null @@ -1,64 +0,0 @@ -gls -=== - -Goroutine local storage - -### Huhwaht? Why? ### - -Every so often, a thread shows up on the -[golang-nuts](https://groups.google.com/d/forum/golang-nuts) asking for some -form of goroutine-local-storage, or some kind of goroutine id, or some kind of -context. There are a few valid use cases for goroutine-local-storage, one of -the most prominent being log line context. One poster was interested in being -able to log an HTTP request context id in every log line in the same goroutine -as the incoming HTTP request, without having to change every library and -function call he was interested in logging. - -This would be pretty useful. Provided that you could get some kind of -goroutine-local-storage, you could call -[log.SetOutput](http://golang.org/pkg/log/#SetOutput) with your own logging -writer that checks goroutine-local-storage for some context information and -adds that context to your log lines. - -But alas, Andrew Gerrand's typically diplomatic answer to the question of -goroutine-local variables was: - -> We wouldn't even be having this discussion if thread local storage wasn't -> useful. But every feature comes at a cost, and in my opinion the cost of -> threadlocals far outweighs their benefits. They're just not a good fit for -> Go. - -So, yeah, that makes sense. That's a pretty good reason for why the language -won't support a specific and (relatively) unuseful feature that requires some -runtime changes, just for the sake of a little bit of log improvement. - -But does Go require runtime changes? - -### How it works ### - -Go has pretty fantastic introspective and reflective features, but one thing Go -doesn't give you is any kind of access to the stack pointer, or frame pointer, -or goroutine id, or anything contextual about your current stack. It gives you -access to your list of callers, but only along with program counters, which are -fixed at compile time. - -But it does give you the stack. - -So, we define 16 special functions and embed base-16 tags into the stack using -the call order of those 16 functions. Then, we can read our tags back out of -the stack looking at the callers list. - -We then use these tags as an index into a traditional map for implementing -this library. - -### What are people saying? ### - -"Wow, that's horrifying." - -"This is the most terrible thing I have seen in a very long time." - -"Where is it getting a context from? Is this serializing all the requests? What the heck is the client being bound to? What are these tags? Why does he need callers? Oh god no. No no no." - -### Docs ### - -Please see the docs at http://godoc.org/github.com/jtolds/gls diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/context.go b/Godeps/_workspace/src/github.com/jtolds/gls/context.go deleted file mode 100644 index 94d7fbb7f2c..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/context.go +++ /dev/null @@ -1,150 +0,0 @@ -// Package gls implements goroutine-local storage. -package gls - -import ( - "runtime" - "sync" -) - -const ( - maxCallers = 64 -) - -var ( - stackTagPool = &idPool{} - mgrRegistry = make(map[*ContextManager]bool) - mgrRegistryMtx sync.RWMutex -) - -// Values is simply a map of key types to value types. Used by SetValues to -// set multiple values at once. -type Values map[interface{}]interface{} - -func currentStack(skip int) []uintptr { - stack := make([]uintptr, maxCallers) - return stack[:runtime.Callers(2+skip, stack)] -} - -// ContextManager is the main entrypoint for interacting with -// Goroutine-local-storage. You can have multiple independent ContextManagers -// at any given time. ContextManagers are usually declared globally for a given -// class of context variables. You should use NewContextManager for -// construction. -type ContextManager struct { - mtx sync.RWMutex - values map[uint]Values -} - -// NewContextManager returns a brand new ContextManager. It also registers the -// new ContextManager in the ContextManager registry which is used by the Go -// method. ContextManagers are typically defined globally at package scope. -func NewContextManager() *ContextManager { - mgr := &ContextManager{values: make(map[uint]Values)} - mgrRegistryMtx.Lock() - defer mgrRegistryMtx.Unlock() - mgrRegistry[mgr] = true - return mgr -} - -// Unregister removes a ContextManager from the global registry, used by the -// Go method. Only intended for use when you're completely done with a -// ContextManager. Use of Unregister at all is rare. -func (m *ContextManager) Unregister() { - mgrRegistryMtx.Lock() - defer mgrRegistryMtx.Unlock() - delete(mgrRegistry, m) -} - -// SetValues takes a collection of values and a function to call for those -// values to be set in. Anything further down the stack will have the set -// values available through GetValue. SetValues will add new values or replace -// existing values of the same key and will not mutate or change values for -// previous stack frames. -// SetValues is slow (makes a copy of all current and new values for the new -// gls-context) in order to reduce the amount of lookups GetValue requires. -func (m *ContextManager) SetValues(new_values Values, context_call func()) { - if len(new_values) == 0 { - context_call() - return - } - - tags := readStackTags(currentStack(1)) - - m.mtx.Lock() - values := new_values - for _, tag := range tags { - if existing_values, ok := m.values[tag]; ok { - // oh, we found existing values, let's make a copy - values = make(Values, len(existing_values)+len(new_values)) - for key, val := range existing_values { - values[key] = val - } - for key, val := range new_values { - values[key] = val - } - break - } - } - new_tag := stackTagPool.Acquire() - m.values[new_tag] = values - m.mtx.Unlock() - defer func() { - m.mtx.Lock() - delete(m.values, new_tag) - m.mtx.Unlock() - stackTagPool.Release(new_tag) - }() - - addStackTag(new_tag, context_call) -} - -// GetValue will return a previously set value, provided that the value was set -// by SetValues somewhere higher up the stack. If the value is not found, ok -// will be false. -func (m *ContextManager) GetValue(key interface{}) (value interface{}, ok bool) { - - tags := readStackTags(currentStack(1)) - m.mtx.RLock() - defer m.mtx.RUnlock() - for _, tag := range tags { - if values, ok := m.values[tag]; ok { - value, ok := values[key] - return value, ok - } - } - return "", false -} - -func (m *ContextManager) getValues() Values { - tags := readStackTags(currentStack(2)) - m.mtx.RLock() - defer m.mtx.RUnlock() - for _, tag := range tags { - if values, ok := m.values[tag]; ok { - return values - } - } - return nil -} - -// Go preserves ContextManager values and Goroutine-local-storage across new -// goroutine invocations. The Go method makes a copy of all existing values on -// all registered context managers and makes sure they are still set after -// kicking off the provided function in a new goroutine. If you don't use this -// Go method instead of the standard 'go' keyword, you will lose values in -// ContextManagers, as goroutines have brand new stacks. -func Go(cb func()) { - mgrRegistryMtx.RLock() - defer mgrRegistryMtx.RUnlock() - - for mgr, _ := range mgrRegistry { - values := mgr.getValues() - if len(values) > 0 { - mgr_copy := mgr - cb_copy := cb - cb = func() { mgr_copy.SetValues(values, cb_copy) } - } - } - - go cb() -} diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/context_test.go b/Godeps/_workspace/src/github.com/jtolds/gls/context_test.go deleted file mode 100644 index ae5bde4aede..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/context_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package gls - -import ( - "fmt" - "sync" - "testing" -) - -func TestContexts(t *testing.T) { - mgr1 := NewContextManager() - mgr2 := NewContextManager() - - CheckVal := func(mgr *ContextManager, key, exp_val string) { - val, ok := mgr.GetValue(key) - if len(exp_val) == 0 { - if ok { - t.Fatalf("expected no value for key %s, got %s", key, val) - } - return - } - if !ok { - t.Fatalf("expected value %s for key %s, got no value", - exp_val, key) - } - if exp_val != val { - t.Fatalf("expected value %s for key %s, got %s", exp_val, key, - val) - } - - } - - Check := func(exp_m1v1, exp_m1v2, exp_m2v1, exp_m2v2 string) { - CheckVal(mgr1, "key1", exp_m1v1) - CheckVal(mgr1, "key2", exp_m1v2) - CheckVal(mgr2, "key1", exp_m2v1) - CheckVal(mgr2, "key2", exp_m2v2) - } - - Check("", "", "", "") - mgr2.SetValues(Values{"key1": "val1c"}, func() { - Check("", "", "val1c", "") - mgr1.SetValues(Values{"key1": "val1a"}, func() { - Check("val1a", "", "val1c", "") - mgr1.SetValues(Values{"key2": "val1b"}, func() { - Check("val1a", "val1b", "val1c", "") - var wg sync.WaitGroup - wg.Add(2) - go func() { - defer wg.Done() - Check("", "", "", "") - }() - Go(func() { - defer wg.Done() - Check("val1a", "val1b", "val1c", "") - }) - wg.Wait() - }) - }) - }) -} - -func ExampleContextManager_SetValues() { - var ( - mgr = NewContextManager() - request_id_key = GenSym() - ) - - MyLog := func() { - if request_id, ok := mgr.GetValue(request_id_key); ok { - fmt.Println("My request id is:", request_id) - } else { - fmt.Println("No request id found") - } - } - - mgr.SetValues(Values{request_id_key: "12345"}, func() { - MyLog() - }) - MyLog() - - // Output: My request id is: 12345 - // No request id found -} - -func ExampleGo() { - var ( - mgr = NewContextManager() - request_id_key = GenSym() - ) - - MyLog := func() { - if request_id, ok := mgr.GetValue(request_id_key); ok { - fmt.Println("My request id is:", request_id) - } else { - fmt.Println("No request id found") - } - } - - mgr.SetValues(Values{request_id_key: "12345"}, func() { - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - MyLog() - }() - wg.Wait() - wg.Add(1) - Go(func() { - defer wg.Done() - MyLog() - }) - wg.Wait() - }) - - // Output: No request id found - // My request id is: 12345 -} - -func BenchmarkGetValue(b *testing.B) { - mgr := NewContextManager() - mgr.SetValues(Values{"test_key": "test_val"}, func() { - b.ResetTimer() - for i := 0; i < b.N; i++ { - val, ok := mgr.GetValue("test_key") - if !ok || val != "test_val" { - b.FailNow() - } - } - }) -} - -func BenchmarkSetValues(b *testing.B) { - mgr := NewContextManager() - for i := 0; i < b.N/2; i++ { - mgr.SetValues(Values{"test_key": "test_val"}, func() { - mgr.SetValues(Values{"test_key2": "test_val2"}, func() {}) - }) - } -} diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/gen_sym.go b/Godeps/_workspace/src/github.com/jtolds/gls/gen_sym.go deleted file mode 100644 index 8d5fc24d4a4..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/gen_sym.go +++ /dev/null @@ -1,13 +0,0 @@ -package gls - -var ( - symPool = &idPool{} -) - -// ContextKey is a throwaway value you can use as a key to a ContextManager -type ContextKey struct{ id uint } - -// GenSym will return a brand new, never-before-used ContextKey -func GenSym() ContextKey { - return ContextKey{id: symPool.Acquire()} -} diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/id_pool.go b/Godeps/_workspace/src/github.com/jtolds/gls/id_pool.go deleted file mode 100644 index b7974ae0026..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/id_pool.go +++ /dev/null @@ -1,34 +0,0 @@ -package gls - -// though this could probably be better at keeping ids smaller, the goal of -// this class is to keep a registry of the smallest unique integer ids -// per-process possible - -import ( - "sync" -) - -type idPool struct { - mtx sync.Mutex - released []uint - max_id uint -} - -func (p *idPool) Acquire() (id uint) { - p.mtx.Lock() - defer p.mtx.Unlock() - if len(p.released) > 0 { - id = p.released[len(p.released)-1] - p.released = p.released[:len(p.released)-1] - return id - } - id = p.max_id - p.max_id++ - return id -} - -func (p *idPool) Release(id uint) { - p.mtx.Lock() - defer p.mtx.Unlock() - p.released = append(p.released, id) -} diff --git a/Godeps/_workspace/src/github.com/jtolds/gls/stack_tags.go b/Godeps/_workspace/src/github.com/jtolds/gls/stack_tags.go deleted file mode 100644 index 562a2fab799..00000000000 --- a/Godeps/_workspace/src/github.com/jtolds/gls/stack_tags.go +++ /dev/null @@ -1,93 +0,0 @@ -package gls - -// so, basically, we're going to encode integer tags in base-16 on the stack - -import ( - "reflect" - "runtime" -) - -const ( - bitWidth = 4 -) - -func addStackTag(tag uint, context_call func()) { - if context_call == nil { - return - } - markS(tag, context_call) -} - -func markS(tag uint, cb func()) { _m(tag, cb) } -func mark0(tag uint, cb func()) { _m(tag, cb) } -func mark1(tag uint, cb func()) { _m(tag, cb) } -func mark2(tag uint, cb func()) { _m(tag, cb) } -func mark3(tag uint, cb func()) { _m(tag, cb) } -func mark4(tag uint, cb func()) { _m(tag, cb) } -func mark5(tag uint, cb func()) { _m(tag, cb) } -func mark6(tag uint, cb func()) { _m(tag, cb) } -func mark7(tag uint, cb func()) { _m(tag, cb) } -func mark8(tag uint, cb func()) { _m(tag, cb) } -func mark9(tag uint, cb func()) { _m(tag, cb) } -func markA(tag uint, cb func()) { _m(tag, cb) } -func markB(tag uint, cb func()) { _m(tag, cb) } -func markC(tag uint, cb func()) { _m(tag, cb) } -func markD(tag uint, cb func()) { _m(tag, cb) } -func markE(tag uint, cb func()) { _m(tag, cb) } -func markF(tag uint, cb func()) { _m(tag, cb) } - -var pc_lookup = make(map[uintptr]int8, 17) -var mark_lookup [16]func(uint, func()) - -func init() { - setEntries := func(f func(uint, func()), v int8) { - pc_lookup[reflect.ValueOf(f).Pointer()] = v - if v >= 0 { - mark_lookup[v] = f - } - } - setEntries(markS, -0x1) - setEntries(mark0, 0x0) - setEntries(mark1, 0x1) - setEntries(mark2, 0x2) - setEntries(mark3, 0x3) - setEntries(mark4, 0x4) - setEntries(mark5, 0x5) - setEntries(mark6, 0x6) - setEntries(mark7, 0x7) - setEntries(mark8, 0x8) - setEntries(mark9, 0x9) - setEntries(markA, 0xa) - setEntries(markB, 0xb) - setEntries(markC, 0xc) - setEntries(markD, 0xd) - setEntries(markE, 0xe) - setEntries(markF, 0xf) -} - -func _m(tag_remainder uint, cb func()) { - if tag_remainder == 0 { - cb() - } else { - mark_lookup[tag_remainder&0xf](tag_remainder>>bitWidth, cb) - } -} - -func readStackTags(stack []uintptr) (tags []uint) { - var current_tag uint - for _, pc := range stack { - pc = runtime.FuncForPC(pc).Entry() - val, ok := pc_lookup[pc] - if !ok { - continue - } - if val < 0 { - tags = append(tags, current_tag) - current_tag = 0 - continue - } - current_tag <<= bitWidth - current_tag += uint(val) - } - return -} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/LICENSE b/Godeps/_workspace/src/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/copy.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/copy.go new file mode 100644 index 00000000000..a3200a8f49e --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/copy.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// forwardCopy is like the built-in copy function except that it always goes +// forward from the start, even if the dst and src overlap. +// It is equivalent to: +// for i := 0; i < n; i++ { +// mem[dst+i] = mem[src+i] +// } +func forwardCopy(mem []byte, dst, src, n int) { + if dst <= src { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + for { + if dst >= src+n { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + // There is some forward overlap. The destination + // will be filled with a repeated pattern of mem[src:src+k]. + // We copy one instance of the pattern here, then repeat. + // Each time around this loop k will double. + k := dst - src + copy(mem[dst:dst+k], mem[src:src+k]) + n -= k + dst += k + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.go new file mode 100644 index 00000000000..45d52f629c2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.go @@ -0,0 +1,39 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +import ( + "github.com/klauspost/cpuid" +) + +// crc32sse returns a hash for the first 4 bytes of the slice +// len(a) must be >= 4. +//go:noescape +func crc32sse(a []byte) hash + +// crc32sseAll calculates hashes for each 4-byte set in a. +// dst must be east len(a) - 4 in size. +// The size is not checked by the assembly. +//go:noescape +func crc32sseAll(a []byte, dst []hash) + +// matchLenSSE4 returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +// It uses the PCMPESTRI SSE 4.2 instruction. +//go:noescape +func matchLenSSE4(a, b []byte, max int) int + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +//go:noescape +func histogram(b []byte, h []int32) + +// Detect SSE 4.2 feature. +func init() { + useSSE42 = cpuid.CPU.SSE42() +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.s b/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.s new file mode 100644 index 00000000000..f94969b7603 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.s @@ -0,0 +1,212 @@ +//+build !noasm !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +// func crc32sse(a []byte) hash +TEXT ·crc32sse(SB), 7, $0 + MOVQ a+0(FP), R10 + XORQ BX, BX + + // CRC32 dword (R10), EBX + BYTE $0xF2; BYTE $0x41; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0x1a + + MOVL BX, ret+24(FP) + RET + +// func crc32sseAll(a []byte, dst []hash) +TEXT ·crc32sseAll(SB), 7, $0 + MOVQ a+0(FP), R8 // R8: src + MOVQ a_len+8(FP), R10 // input length + MOVQ dst+24(FP), R9 // R9: dst + SUBQ $4, R10 + JS end + JZ one_crc + MOVQ R10, R13 + SHRQ $2, R10 // len/4 + ANDQ $3, R13 // len&3 + XORQ BX, BX + ADDQ $1, R13 + TESTQ R10, R10 + JZ rem_loop + +crc_loop: + MOVQ (R8), R11 + XORQ BX, BX + XORQ DX, DX + XORQ DI, DI + MOVQ R11, R12 + SHRQ $8, R11 + MOVQ R12, AX + MOVQ R11, CX + SHRQ $16, R12 + SHRQ $16, R11 + MOVQ R12, SI + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + // CRC32 ECX, EDX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd1 + + // CRC32 ESI, EDI + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xfe + MOVL BX, (R9) + MOVL DX, 4(R9) + MOVL DI, 8(R9) + + XORQ BX, BX + MOVL R11, AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + MOVL BX, 12(R9) + + ADDQ $16, R9 + ADDQ $4, R8 + XORQ BX, BX + SUBQ $1, R10 + JNZ crc_loop + +rem_loop: + MOVL (R8), AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + MOVL BX, (R9) + ADDQ $4, R9 + ADDQ $1, R8 + XORQ BX, BX + SUBQ $1, R13 + JNZ rem_loop + +end: + RET + +one_crc: + MOVQ $1, R13 + XORQ BX, BX + JMP rem_loop + +// func matchLenSSE4(a, b []byte, max int) int +TEXT ·matchLenSSE4(SB), 7, $0 + MOVQ a+0(FP), R8 // R8: &a + MOVQ b+24(FP), R9 // R9: &b + MOVQ max+48(FP), R10 // R10: max + XORQ R11, R11 // match length + + MOVQ R10, R12 + SHRQ $4, R10 // max/16 + ANDQ $15, R12 // max & 15 + CMPQ R10, $0 + JEQ matchlen_verysmall + +loopback_matchlen: + MOVOU (R8), X0 // a[x] + MOVOU (R9), X1 // b[x] + + // PCMPESTRI $0x18, X1, X0 + BYTE $0x66; BYTE $0x0f; BYTE $0x3a + BYTE $0x61; BYTE $0xc1; BYTE $0x18 + + JC match_ended + + ADDQ $16, R8 + ADDQ $16, R9 + ADDQ $16, R11 + + SUBQ $1, R10 + JNZ loopback_matchlen + +matchlen_verysmall: + CMPQ R12, $0 + JEQ done_matchlen + +loopback_matchlen_single: + // Naiive, but small use + MOVB (R8), R13 + MOVB (R9), R14 + CMPB R13, R14 + JNE done_matchlen + ADDQ $1, R8 + ADDQ $1, R9 + ADDQ $1, R11 + SUBQ $1, R12 + JNZ loopback_matchlen_single + MOVQ R11, ret+56(FP) + RET + +match_ended: + ADDQ CX, R11 + +done_matchlen: + MOVQ R11, ret+56(FP) + RET + +// func histogram(b []byte, h []int32) +TEXT ·histogram(SB), 7, $0 + MOVQ b+0(FP), SI // SI: &b + MOVQ b_len+8(FP), R9 // R9: len(b) + MOVQ h+24(FP), DI // DI: Histogram + MOVQ R9, R8 + SHRQ $3, R8 + JZ hist1 + XORQ R11, R11 + +loop_hist8: + MOVQ (SI), R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + INCL (DI)(R10*4) + + ADDQ $8, SI + DECQ R8 + JNZ loop_hist8 + +hist1: + ANDQ $7, R9 + JZ end_hist + XORQ R10, R10 + +loop_hist1: + MOVB (SI), R10 + INCL (DI)(R10*4) + INCQ SI + DECQ R9 + JNZ loop_hist1 + +end_hist: + RET diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_noasm.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_noasm.go new file mode 100644 index 00000000000..1c6d23eed63 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_noasm.go @@ -0,0 +1,34 @@ +//+build !amd64 noasm appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +func init() { + useSSE42 = false +} + +// crc32sse should never be called. +func crc32sse(a []byte) hash { + panic("no assembler") +} + +// crc32sseAll should never be called. +func crc32sseAll(a []byte, dst []hash) { + panic("no assembler") +} + +// matchLenSSE4 should never be called. +func matchLenSSE4(a, b []byte, max int) int { + panic("no assembler") + return 0 +} + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +func histogram(b []byte, h []int32) { + for _, t := range b { + h[t]++ + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/deflate.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 00000000000..a103c8f4ef5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1293 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + fastCompression = 3 + BestCompression = 9 + DefaultCompression = -1 + ConstantCompression = -2 // Does only Huffman encoding + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we put into a single flat block, just too + // stop things from getting too large. + maxFlateBlockTokens = 1 << 14 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 24 + + skipNever = math.MaxInt32 +) + +var useSSE42 bool + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +var levels = []compressionLevel{ + {}, // 0 + // For levels 1-3 we don't bother trying with lazy matches + {4, 0, 8, 4, 4, 1}, + {4, 0, 16, 8, 5, 2}, + {4, 0, 32, 32, 6, 3}, + // Levels 4-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {4, 4, 16, 16, skipNever, 4}, + {8, 16, 32, 32, skipNever, 5}, + {8, 16, 128, 128, skipNever, 6}, + {8, 32, 128, 256, skipNever, 7}, + {32, 128, 258, 1024, skipNever, 8}, + {32, 258, 258, 4096, skipNever, 9}, +} + +type hashid uint32 + +type compressor struct { + compressionLevel + + w *huffmanBitWriter + bulkHasher func([]byte, []hash) + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + sync bool // requesting flush + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + chainHead int + hashHead []hashid + hashPrev []hashid + hashOffset int + + // input window: unprocessed data is window[index:windowEnd] + index int + window []byte + windowEnd int + blockStart int // window index where current tokens start + byteAvailable bool // if true, still need to process window[index-1]. + + // queued output tokens + tokens tokens + + // deflate state + length int + offset int + hash hash + maxInsertIndex int + err error + ii uint16 // position of last match, intended to overflow to reset. + + hashMatch [maxMatchLength + minMatchLength]hash +} + +type hash int32 + +func (d *compressor) fillDeflate(b []byte) int { + if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + copy(d.window, d.window[windowSize:2*windowSize]) + d.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + d.hashOffset += windowSize + if d.hashOffset > maxHashOffset { + delta := d.hashOffset - 1 + d.hashOffset -= delta + d.chainHead -= delta + for i, v := range d.hashPrev { + if int(v) > delta { + d.hashPrev[i] = hashid(int(v) - delta) + } else { + d.hashPrev[i] = 0 + } + } + for i, v := range d.hashHead { + if int(v) > delta { + d.hashHead[i] = hashid(int(v) - delta) + } else { + d.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + d.w.writeBlock(tok, eof, window) + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only mode, + // use constant or Snappy compression. + if d.compressionLevel.level == 0 { + return + } + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := d.hashMatch[:dstSize] + d.bulkHasher(tocheck, dst) + var newH hash + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = hashid(di + d.hashOffset) + } + d.hash = newH + } + // Update window information. + d.windowEnd += n + d.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLenSSE4(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +// oldHash is the hash function used when no native crc32 calculation +// or similar is present. +func oldHash(b []byte) hash { + return hash(b[0])<<(hashShift*3) + hash(b[1])<<(hashShift*2) + hash(b[2])< d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = hashid(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + oldBulkHash(tocheck, dst) + var newH hash + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = hashid(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = hashid(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + oldBulkHash(tocheck, dst) + var newH hash + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = hashid(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateNoSkip +func (d *compressor) deflateSSE() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = hashid(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + + crc32sseAll(tocheck, dst) + var newH hash + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = hashid(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazySSE() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = hashid(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + crc32sseAll(tocheck, dst) + var newH hash + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = hashid(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) fillStore(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) store() { + if d.windowEnd > 0 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + } + d.windowEnd = 0 +} + +// fillHuff will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillHuff(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + // We only compress if we have maxStoreBlockSize or we are at end-of-stream + if d.windowEnd < maxStoreBlockSize && !d.sync { + return + } + if d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeSnappy() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < maxStoreBlockSize && !d.sync { + return + } + if d.windowEnd == 0 { + return + } + snappyEncode(&d.tokens, d.window[:d.windowEnd]) + d.w.writeBlock(d.tokens, false, d.window[:d.windowEnd]) + d.err = d.w.err + d.tokens.n = 0 + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + d.step(d) + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillStore + d.step = (*compressor).store + case level == ConstantCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillHuff + d.step = (*compressor).storeHuff + case level == 1: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillHuff + d.step = (*compressor).storeSnappy + d.tokens.tokens = make([]token, maxStoreBlockSize+1) + case level == DefaultCompression: + level = 6 + fallthrough + case 2 <= level && level <= 9: + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + if d.fastSkipHashing == skipNever { + if useSSE42 { + d.step = (*compressor).deflateLazySSE + } else { + d.step = (*compressor).deflateLazy + } + } else { + if useSSE42 { + d.step = (*compressor).deflateSSE + } else { + d.step = (*compressor).deflate + + } + } + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + return nil +} + +// Used for zeroing the hash slice +var hzeroes [256]hashid + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + d.chainHead = -1 + for s := d.hashHead; len(s) > 0; { + n := copy(s, hzeroes[:]) + s = s[n:] + } + for s := d.hashPrev; len(s) > 0; s = s[len(hzeroes):] { + copy(s, hzeroes[:]) + } + d.hashOffset = 1 + + d.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + + d.tokens.n = 0 + d.length = minMatchLength - 1 + d.offset = 0 + d.hash = 0 + d.ii = 0 + d.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. Level 0 +// (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. Level -1 (DefaultCompression) uses the default +// compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + dw := &dictWriter{w} + zw, err := NewWriter(dw, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +type dictWriter struct { + w io.Writer +} + +func (w *dictWriter) Write(b []byte) (n int, err error) { + return w.w.Write(b) +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending compressed data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if dw, ok := w.d.w.w.(*dictWriter); ok { + // w was created with NewWriterDict + dw.w = dst + w.d.reset(dw) + w.d.fillWindow(w.dict) + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/fixedhuff.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/fixedhuff.go new file mode 100644 index 00000000000..7df8b9a293f --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/fixedhuff.go @@ -0,0 +1,78 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT + +var fixedHuffmanDecoder = huffmanDecoder{ + 7, + [huffmanNumChunks]uint32{ + 0x1007, 0x0508, 0x0108, 0x1188, 0x1107, 0x0708, 0x0308, 0x0c09, + 0x1087, 0x0608, 0x0208, 0x0a09, 0x0008, 0x0808, 0x0408, 0x0e09, + 0x1047, 0x0588, 0x0188, 0x0909, 0x1147, 0x0788, 0x0388, 0x0d09, + 0x10c7, 0x0688, 0x0288, 0x0b09, 0x0088, 0x0888, 0x0488, 0x0f09, + 0x1027, 0x0548, 0x0148, 0x11c8, 0x1127, 0x0748, 0x0348, 0x0c89, + 0x10a7, 0x0648, 0x0248, 0x0a89, 0x0048, 0x0848, 0x0448, 0x0e89, + 0x1067, 0x05c8, 0x01c8, 0x0989, 0x1167, 0x07c8, 0x03c8, 0x0d89, + 0x10e7, 0x06c8, 0x02c8, 0x0b89, 0x00c8, 0x08c8, 0x04c8, 0x0f89, + 0x1017, 0x0528, 0x0128, 0x11a8, 0x1117, 0x0728, 0x0328, 0x0c49, + 0x1097, 0x0628, 0x0228, 0x0a49, 0x0028, 0x0828, 0x0428, 0x0e49, + 0x1057, 0x05a8, 0x01a8, 0x0949, 0x1157, 0x07a8, 0x03a8, 0x0d49, + 0x10d7, 0x06a8, 0x02a8, 0x0b49, 0x00a8, 0x08a8, 0x04a8, 0x0f49, + 0x1037, 0x0568, 0x0168, 0x11e8, 0x1137, 0x0768, 0x0368, 0x0cc9, + 0x10b7, 0x0668, 0x0268, 0x0ac9, 0x0068, 0x0868, 0x0468, 0x0ec9, + 0x1077, 0x05e8, 0x01e8, 0x09c9, 0x1177, 0x07e8, 0x03e8, 0x0dc9, + 0x10f7, 0x06e8, 0x02e8, 0x0bc9, 0x00e8, 0x08e8, 0x04e8, 0x0fc9, + 0x1007, 0x0518, 0x0118, 0x1198, 0x1107, 0x0718, 0x0318, 0x0c29, + 0x1087, 0x0618, 0x0218, 0x0a29, 0x0018, 0x0818, 0x0418, 0x0e29, + 0x1047, 0x0598, 0x0198, 0x0929, 0x1147, 0x0798, 0x0398, 0x0d29, + 0x10c7, 0x0698, 0x0298, 0x0b29, 0x0098, 0x0898, 0x0498, 0x0f29, + 0x1027, 0x0558, 0x0158, 0x11d8, 0x1127, 0x0758, 0x0358, 0x0ca9, + 0x10a7, 0x0658, 0x0258, 0x0aa9, 0x0058, 0x0858, 0x0458, 0x0ea9, + 0x1067, 0x05d8, 0x01d8, 0x09a9, 0x1167, 0x07d8, 0x03d8, 0x0da9, + 0x10e7, 0x06d8, 0x02d8, 0x0ba9, 0x00d8, 0x08d8, 0x04d8, 0x0fa9, + 0x1017, 0x0538, 0x0138, 0x11b8, 0x1117, 0x0738, 0x0338, 0x0c69, + 0x1097, 0x0638, 0x0238, 0x0a69, 0x0038, 0x0838, 0x0438, 0x0e69, + 0x1057, 0x05b8, 0x01b8, 0x0969, 0x1157, 0x07b8, 0x03b8, 0x0d69, + 0x10d7, 0x06b8, 0x02b8, 0x0b69, 0x00b8, 0x08b8, 0x04b8, 0x0f69, + 0x1037, 0x0578, 0x0178, 0x11f8, 0x1137, 0x0778, 0x0378, 0x0ce9, + 0x10b7, 0x0678, 0x0278, 0x0ae9, 0x0078, 0x0878, 0x0478, 0x0ee9, + 0x1077, 0x05f8, 0x01f8, 0x09e9, 0x1177, 0x07f8, 0x03f8, 0x0de9, + 0x10f7, 0x06f8, 0x02f8, 0x0be9, 0x00f8, 0x08f8, 0x04f8, 0x0fe9, + 0x1007, 0x0508, 0x0108, 0x1188, 0x1107, 0x0708, 0x0308, 0x0c19, + 0x1087, 0x0608, 0x0208, 0x0a19, 0x0008, 0x0808, 0x0408, 0x0e19, + 0x1047, 0x0588, 0x0188, 0x0919, 0x1147, 0x0788, 0x0388, 0x0d19, + 0x10c7, 0x0688, 0x0288, 0x0b19, 0x0088, 0x0888, 0x0488, 0x0f19, + 0x1027, 0x0548, 0x0148, 0x11c8, 0x1127, 0x0748, 0x0348, 0x0c99, + 0x10a7, 0x0648, 0x0248, 0x0a99, 0x0048, 0x0848, 0x0448, 0x0e99, + 0x1067, 0x05c8, 0x01c8, 0x0999, 0x1167, 0x07c8, 0x03c8, 0x0d99, + 0x10e7, 0x06c8, 0x02c8, 0x0b99, 0x00c8, 0x08c8, 0x04c8, 0x0f99, + 0x1017, 0x0528, 0x0128, 0x11a8, 0x1117, 0x0728, 0x0328, 0x0c59, + 0x1097, 0x0628, 0x0228, 0x0a59, 0x0028, 0x0828, 0x0428, 0x0e59, + 0x1057, 0x05a8, 0x01a8, 0x0959, 0x1157, 0x07a8, 0x03a8, 0x0d59, + 0x10d7, 0x06a8, 0x02a8, 0x0b59, 0x00a8, 0x08a8, 0x04a8, 0x0f59, + 0x1037, 0x0568, 0x0168, 0x11e8, 0x1137, 0x0768, 0x0368, 0x0cd9, + 0x10b7, 0x0668, 0x0268, 0x0ad9, 0x0068, 0x0868, 0x0468, 0x0ed9, + 0x1077, 0x05e8, 0x01e8, 0x09d9, 0x1177, 0x07e8, 0x03e8, 0x0dd9, + 0x10f7, 0x06e8, 0x02e8, 0x0bd9, 0x00e8, 0x08e8, 0x04e8, 0x0fd9, + 0x1007, 0x0518, 0x0118, 0x1198, 0x1107, 0x0718, 0x0318, 0x0c39, + 0x1087, 0x0618, 0x0218, 0x0a39, 0x0018, 0x0818, 0x0418, 0x0e39, + 0x1047, 0x0598, 0x0198, 0x0939, 0x1147, 0x0798, 0x0398, 0x0d39, + 0x10c7, 0x0698, 0x0298, 0x0b39, 0x0098, 0x0898, 0x0498, 0x0f39, + 0x1027, 0x0558, 0x0158, 0x11d8, 0x1127, 0x0758, 0x0358, 0x0cb9, + 0x10a7, 0x0658, 0x0258, 0x0ab9, 0x0058, 0x0858, 0x0458, 0x0eb9, + 0x1067, 0x05d8, 0x01d8, 0x09b9, 0x1167, 0x07d8, 0x03d8, 0x0db9, + 0x10e7, 0x06d8, 0x02d8, 0x0bb9, 0x00d8, 0x08d8, 0x04d8, 0x0fb9, + 0x1017, 0x0538, 0x0138, 0x11b8, 0x1117, 0x0738, 0x0338, 0x0c79, + 0x1097, 0x0638, 0x0238, 0x0a79, 0x0038, 0x0838, 0x0438, 0x0e79, + 0x1057, 0x05b8, 0x01b8, 0x0979, 0x1157, 0x07b8, 0x03b8, 0x0d79, + 0x10d7, 0x06b8, 0x02b8, 0x0b79, 0x00b8, 0x08b8, 0x04b8, 0x0f79, + 0x1037, 0x0578, 0x0178, 0x11f8, 0x1137, 0x0778, 0x0378, 0x0cf9, + 0x10b7, 0x0678, 0x0278, 0x0af9, 0x0078, 0x0878, 0x0478, 0x0ef9, + 0x1077, 0x05f8, 0x01f8, 0x09f9, 0x1177, 0x07f8, 0x03f8, 0x0df9, + 0x10f7, 0x06f8, 0x02f8, 0x0bf9, 0x00f8, 0x08f8, 0x04f8, 0x0ff9, + }, + nil, 0, +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/gen.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/gen.go new file mode 100644 index 00000000000..154c89a488e --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/gen.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates fixedhuff.go +// Invoke as +// +// go run gen.go -output fixedhuff.go + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" +) + +var filename = flag.String("output", "fixedhuff.go", "output file name") + +const maxCodeLen = 16 + +// Note: the definition of the huffmanDecoder struct is copied from +// inflate.go, as it is private to the implementation. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +func main() { + flag.Parse() + + var h huffmanDecoder + var bits [288]int + initReverseByte() + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + h.init(bits[:]) + if h.links != nil { + log.Fatal("Unexpected links table in fixed Huffman decoder") + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") + for i := 0; i < huffmanNumChunks; i++ { + if i&7 == 0 { + fmt.Fprintf(&buf, "\t\t") + } else { + fmt.Fprintf(&buf, " ") + } + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) + if i&7 == 7 { + fmt.Fprintln(&buf) + } + } + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +var reverseByte [256]byte + +func initReverseByte() { + for x := 0; x < 256; x++ { + var result byte + for i := uint(0); i < 8; i++ { + result |= byte(((x >> i) & 1) << (7 - i)) + } + reverseByte[x] = result + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_bit_writer.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 00000000000..56985ef000b --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,690 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "io" + "math" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // Output byte buffer size + // Must be multiple of 6 (48 bits) + 8 + bufferSize = 240 + 8 +) + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = []int8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = []uint32{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// offset code word extra bits. +var offsetExtraBits = []int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, +} + +var offsetBase = []uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + w io.Writer + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint + bytes [bufferSize]byte + nbytes int + literalFreq []int32 + offsetFreq []int32 + codegen []uint8 + codegenFreq []int32 + literalEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error +} + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + w: w, + literalFreq: make([]int32, maxNumLit), + offsetFreq: make([]int32, offsetCodeCount), + codegen: make([]uint8, maxNumLit+offsetCodeCount+1), + codegenFreq: make([]int32, codegenCodeCount), + literalEncoding: newHuffmanEncoder(maxNumLit), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.w = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.bytes = [bufferSize]byte{} + for i := range w.codegen { + w.codegen[i] = 0 + } + for _, s := range [...][]int32{w.literalFreq, w.offsetFreq, w.codegenFreq} { + for i := range s { + s[i] = 0 + } + } + encs := []*huffmanEncoder{w.literalEncoding, w.codegenEncoding} + // Don't reset, if we are huffman only mode + if w.offsetEncoding != huffOffset { + encs = append(encs, w.offsetEncoding) + } + for _, enc := range encs { + for i := range enc.codes { + enc.codes[i] = 0 + } + } +} + +/* Inlined in writeBits +func (w *huffmanBitWriter) flushBits() { + if w.err != nil { + w.nbits = 0 + return + } + bits := w.bits + w.bits >>= 16 + w.nbits -= 16 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + if n += 2; n >= len(w.bytes) { + _, w.err = w.w.Write(w.bytes[0:]) + n = 0 + } + w.nbytes = n +} +*/ + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + _, w.err = w.w.Write(w.bytes[0:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint) { + w.bits |= uint64(b) << w.nbits + w.nbits += nb + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferSize-8 { + _, w.err = w.w.Write(w.bytes[:bufferSize-8]) + n = 0 + } + w.nbytes = n + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if w.nbits != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + if n != 0 { + _, w.err = w.w.Write(w.bytes[0:n]) + if w.err != nil { + return + } + } + w.nbytes = 0 + _, w.err = w.w.Write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + //copy(codegen[0:numLiterals], w.literalEncoding.codeBits) + cgnl := codegen[0:numLiterals] + for i := range cgnl { + cgnl[i] = uint8(w.literalEncoding.codes[i].bits()) + } + + //copy(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits) + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = uint8(w.offsetEncoding.codes[i].bits()) + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +/* non-inlined: +func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) { + if w.err != nil { + return + } + c := code.codes[literal] + w.writeBits(int32(c.code()), int32(c.bits())) +} +*/ + +func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) { + if w.err != nil { + return + } + c := code.codes[literal] + w.bits |= uint64(c.code()) << w.nbits + w.nbits += c.bits() + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferSize-8 { + _, w.err = w.w.Write(w.bytes[:bufferSize-8]) + n = 0 + } + w.nbytes = n + } + +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + //value := w.codegenEncoding.codeBits[codegenOrder[i]] + value := w.codegenEncoding.codes[codegenOrder[i]].bits() + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord int = int(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + // The low byte contains the actual code to generate. + w.writeCode(w.codegenEncoding, uint32(codeWord)) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + break + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + break + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + break + } + } +} + +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +func (w *huffmanBitWriter) writeBlock(tok tokens, eof bool, input []byte) { + if w.err != nil { + return + } + copy(w.literalFreq, zeroLits[:]) + + for i := range w.offsetFreq { + w.offsetFreq[i] = 0 + } + + tok.tokens[tok.n] = endBlockMarker + tokens := tok.tokens[0 : tok.n+1] + + for _, t := range tokens { + switch t.typ() { + case literalType: + w.literalFreq[t.literal()]++ + case matchType: + length := t.length() + offset := t.offset() + w.literalFreq[lengthCodesStart+lengthCode(length)]++ + w.offsetFreq[offsetCode(offset)]++ + } + } + + // get the number of literals + numLiterals := len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets := len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding.generate(w.offsetFreq, 15) + + storedBytes := 0 + if input != nil { + storedBytes = len(input) + } + var extraBits int64 + var storedSize int64 = math.MaxInt64 + if storedBytes <= maxStoreBlockSize && input != nil { + storedSize = int64((storedBytes + 5) * 8) + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart]) + } + for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { + // First four offset codes have extra size = 0. + extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode]) + } + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var size = int64(3) + + fixedLiteralEncoding.bitLength(w.literalFreq) + + fixedOffsetEncoding.bitLength(w.offsetFreq) + + extraBits + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets) + w.codegenEncoding.generate(w.codegenFreq, 7) + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + dynamicHeader := int64(3+5+5+4+(3*numCodegens)) + + w.codegenEncoding.bitLength(w.codegenFreq) + + int64(extraBits) + + int64(w.codegenFreq[16]*2) + + int64(w.codegenFreq[17]*3) + + int64(w.codegenFreq[18]*7) + dynamicSize := dynamicHeader + + w.literalEncoding.bitLength(w.literalFreq) + + w.offsetEncoding.bitLength(w.offsetFreq) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storedSize < size { + w.writeStoredHeader(storedBytes, eof) + w.writeBytes(input[0:storedBytes]) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + for _, t := range tokens { + switch t.typ() { + case literalType: + w.writeCode(literalEncoding, t.literal()) + break + case matchType: + // Write the length + length := t.length() + lengthCode := lengthCode(length) + w.writeCode(literalEncoding, lengthCode+lengthCodesStart) + extraLengthBits := uint(lengthExtraBits[lengthCode]) + if extraLengthBits > 0 { + extraLength := int32(length - lengthBase[lengthCode]) + w.writeBits(extraLength, extraLengthBits) + } + // Write the offset + offset := t.offset() + offsetCode := offsetCode(offset) + w.writeCode(offsetEncoding, offsetCode) + extraOffsetBits := uint(offsetExtraBits[offsetCode]) + if extraOffsetBits > 0 { + extraOffset := int32(offset - offsetBase[offsetCode]) + w.writeBits(extraOffset, extraOffsetBits) + } + break + default: + panic("unknown token type: " + string(t)) + } + } +} + +var huffOffset *huffmanEncoder +var zeroLits [maxNumLit]int32 + +func init() { + var w = newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + w.offsetEncoding = newHuffmanEncoder(offsetCodeCount) + w.offsetEncoding.generate(w.offsetFreq, 15) + huffOffset = w.offsetEncoding +} + +// writeBlockHuff will write a block of bytes as either +// Huffman encoded literals, or uncompressed bytes depending +// on what yields the smallest result. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { + if w.err != nil { + return + } + // Clear histogram + copy(w.literalFreq, zeroLits[:]) + + // Add everything as literals + histogram(input, w.literalFreq) + + w.literalFreq[endBlockMarker]++ + + // get the number of literals + numLiterals := len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + + numOffsets := 1 + + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding = huffOffset + + storedBytes := len(input) + + var extraBits int64 + var storedSize int64 = math.MaxInt64 + if storedBytes <= maxStoreBlockSize { + storedSize = int64((storedBytes + 5) * 8) + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart]) + } + } + + // Figure out smallest code. + // Always use dynamic Huffman or Store + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets) + w.codegenEncoding.generate(w.codegenFreq, 7) + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + dynamicHeader := int64(3+5+5+4+(3*numCodegens)) + + w.codegenEncoding.bitLength(w.codegenFreq) + + int64(extraBits) + + int64(w.codegenFreq[16]*2) + + int64(w.codegenFreq[17]*3) + + int64(w.codegenFreq[18]*7) + size := dynamicHeader + + w.literalEncoding.bitLength(w.literalFreq) + + 1 /*w.offsetEncoding.bitLength(w.offsetFreq)*/ + + // Stored bytes? + if storedSize < size { + w.writeStoredHeader(storedBytes, eof) + w.writeBytes(input[0:storedBytes]) + return + } + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + for _, t := range input { + // Bitwriting inlined, ~30% speedup + c := w.literalEncoding.codes[t] + w.bits |= uint64(c.code()) << w.nbits + w.nbits += c.bits() + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + w.bytes[n] = byte(bits) + w.bytes[n+1] = byte(bits >> 8) + w.bytes[n+2] = byte(bits >> 16) + w.bytes[n+3] = byte(bits >> 24) + w.bytes[n+4] = byte(bits >> 32) + w.bytes[n+5] = byte(bits >> 40) + n += 6 + if n >= bufferSize-8 { + _, w.err = w.w.Write(w.bytes[:bufferSize-8]) + w.nbytes = 0 + } else { + w.nbytes = n + } + } + } + // Write EOB + w.writeCode(w.literalEncoding, endBlockMarker) +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_code.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 00000000000..9dba0faf339 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,363 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "sort" +) + +type hcode uint32 + +type huffmanEncoder struct { + codes []hcode + freqcache []literalNode + bitCount [17]int32 + lns literalNodeSorter + lfs literalFreqSorter +} + +type literalNode struct { + literal uint16 + freq int32 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +func (h hcode) codeBits() (code uint16, bits uint8) { + return uint16(h), uint8(h >> 16) +} + +func (h *hcode) set(code uint16, bits uint8) { + *h = hcode(code) | hcode(uint32(bits)<<16) +} + +func (h *hcode) setBits(bits uint8) { + *h = hcode(*h&0xffff) | hcode(uint32(bits)<<16) +} + +func toCode(code uint16, bits uint8) hcode { + return hcode(code) | hcode(uint32(bits)<<16) +} + +func (h hcode) code() (code uint16) { + return uint16(h) +} + +func (h hcode) bits() (bits uint) { + return uint(h >> 16) +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + return &huffmanEncoder{codes: make([]hcode, size), freqcache: nil} +} + +// Generates a HuffmanCode corresponding to the fixed literal table +func generateFixedLiteralEncoding() *huffmanEncoder { + h := newHuffmanEncoder(maxNumLit) + codes := h.codes + var ch uint16 + for ch = 0; ch < maxNumLit; ch++ { + var bits uint16 + var size uint8 + switch { + case ch < 144: + // size 8, 000110000 .. 10111111 + bits = ch + 48 + size = 8 + break + case ch < 256: + // size 9, 110010000 .. 111111111 + bits = ch + 400 - 144 + size = 9 + break + case ch < 280: + // size 7, 0000000 .. 0010111 + bits = ch - 256 + size = 7 + break + default: + // size 8, 11000000 .. 11000111 + bits = ch + 192 - 280 + size = 8 + } + codes[ch] = toCode(reverseBits(bits, size), size) + } + return h +} + +func generateFixedOffsetEncoding() *huffmanEncoder { + h := newHuffmanEncoder(30) + codes := h.codes + for ch := uint16(0); ch < 30; ch++ { + codes[ch] = toCode(reverseBits(ch, 5), 5) + } + return h +} + +var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() +var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() + +func (h *huffmanEncoder) bitLength(freq []int32) int64 { + var total int64 + for i, f := range freq { + if f != 0 { + total += int64(f) * int64(h.codes[i].bits()) + } + } + return total +} + +const maxBitsLimit = 16 + +// Return the number of literals assigned to each bit size in the Huffman encoding +// +// This method is only called when list.length >= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// maxBits The maximum number of bits that should be used to encode any literal. +// Must be less than 16. +// return An integer array in which array[i] indicates the number of literals +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: list[1].freq, + nextCharFreq: list[2].freq, + nextPairFreq: list[0].freq + list[1].freq, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := maxBits + for { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + l.nextCharFreq = list[n].freq + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + //make([]int32, maxBits+1) + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + h.lns.Sort(chunk) + for _, node := range chunk { + h.codes[node.literal] = toCode(reverseBits(code, uint8(n)), uint8(n)) + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { + if h.freqcache == nil { + h.freqcache = make([]literalNode, 300) + } + list := h.freqcache[:len(freq)+1] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + list[count] = literalNode{} + //h.codeBits[i] = 0 + h.codes[i].setBits(0) + } + } + list[len(freq)] = literalNode{} + // If freq[] is shorter than codeBits[], fill rest of codeBits[] with zeros + // FIXME: Doesn't do what it says on the tin (klauspost) + //h.codeBits = h.codeBits[0:len(freq)] + + list = list[0:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + //h.codeBits[node.literal] = 1 + //h.code[node.literal] = uint16(i) + } + return + } + h.lfs.Sort(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +type literalNodeSorter []literalNode + +func (s *literalNodeSorter) Sort(a []literalNode) { + *s = literalNodeSorter(a) + sort.Sort(s) +} + +func (s literalNodeSorter) Len() int { return len(s) } + +func (s literalNodeSorter) Less(i, j int) bool { + return s[i].literal < s[j].literal +} + +func (s literalNodeSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type literalFreqSorter []literalNode + +func (s *literalFreqSorter) Sort(a []literalNode) { + *s = literalFreqSorter(a) + sort.Sort(s) +} + +func (s literalFreqSorter) Len() int { return len(s) } + +func (s literalFreqSorter) Less(i, j int) bool { + if s[i].freq == s[j].freq { + return s[i].literal < s[j].literal + } + return s[i].freq < s[j].freq +} + +func (s literalFreqSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/inflate.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 00000000000..91e27e7e280 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,846 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go -output fixedhuff.go + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "io" + "strconv" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxHist = 32768 // max history required + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code +) + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) +} + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +type ReadError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Read +} + +func (e *ReadError) Error() string { + return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// A WriteError reports an error encountered while writing output. +type WriteError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Write +} + +func (e *WriteError) Error() string { + return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// Note that much of the implementation of huffmanDecoder is also copied +// into gen.go (in package main) for the purpose of precomputing the +// fixed huffman tables so they can be included statically. + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. + +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + woffset int64 + + // Input bits, in top of b. + b uint32 + nb uint + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + hist *[maxHist]byte + hp int // current output position in buffer + hw int // have written hist[0:hw] already + hfull bool // buffer has filled at least once + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + final bool + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int +} + +func (f *decompressor) nextBlock() { + if f.final { + if f.hw != f.hp { + f.flush((*decompressor).nextBlock) + return + } + f.err = io.EOF + return + } + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlock() + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlock() + default: + // 3 is reserved. + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + for { + if f.err != nil { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if len(f.toRead) > 0 { + var n int + n, f.err = w.Write(f.toRead) + if f.err != nil { + return total, f.err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + total += int64(n) + } + f.step(f) + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + return err + } + } + rep += int(f.b & uint32(1<>= nb + f.nb -= nb + if i+rep > n { + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + return CorruptInputError(f.roffset) + } + + // In order to preserve the property that we never read any extra bytes + // after the end of the DEFLATE stream, huffSym conservatively reads min + // bits at a time until it decodes the symbol. However, since every block + // must end with an EOB marker, we can use that as the minimum number of + // bits to read and guarantee we never read past the end of the stream. + if f.bits[endBlockMarker] > 0 { + f.h1.min = f.bits[endBlockMarker] // Length of EOB marker + } + + return nil +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBlock() { + for { + v, err := f.huffSym(f.hl) + if err != nil { + f.err = err + return + } + var n uint // number of bits extra + var length int + switch { + case v < 256: + f.hist[f.hp] = byte(v) + f.hp++ + if f.hp == len(f.hist) { + // After the flush, continue this loop. + f.flush((*decompressor).huffmanBlock) + return + } + continue + case v == 256: + // Done with huffman block; read next block. + f.step = (*decompressor).nextBlock + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + length += int(f.b & uint32(1<>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + dist = int(reverseByte[(f.b&0x1F)<<3]) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + extra |= int(f.b & uint32(1<>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + f.err = CorruptInputError(f.roffset) + return + } + + // Copy history[-dist:-dist+length] into output. + if dist > len(f.hist) { + f.err = InternalError("bad history distance") + return + } + + // No check on length; encoding can be prescient. + if !f.hfull && dist > f.hp { + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + if f.copyHist() { + return + } + } +} + +// copyHist copies f.copyLen bytes from f.hist (f.copyDist bytes ago) to itself. +// It reports whether the f.hist buffer is full. +func (f *decompressor) copyHist() bool { + p := f.hp - f.copyDist + if p < 0 { + p += len(f.hist) + } + for f.copyLen > 0 { + n := f.copyLen + if x := len(f.hist) - f.hp; n > x { + n = x + } + if x := len(f.hist) - p; n > x { + n = x + } + forwardCopy(f.hist[:], f.hp, p, n) + p += n + f.hp += n + f.copyLen -= n + if f.hp == len(f.hist) { + // After flush continue copying out of history. + f.flush((*decompressor).copyHuff) + return true + } + if p == len(f.hist) { + p = 0 + } + } + return false +} + +func (f *decompressor) copyHuff() { + if f.copyHist() { + return + } + f.huffmanBlock() +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + f.nb = 0 + f.b = 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[0:4]) + f.roffset += int64(nr) + if err != nil { + f.err = &ReadError{f.roffset, err} + return + } + n := int(f.buf[0]) | int(f.buf[1])<<8 + nn := int(f.buf[2]) | int(f.buf[3])<<8 + if uint16(nn) != uint16(^n) { + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + // 0-length block means sync + f.flush((*decompressor).nextBlock) + return + } + + f.copyLen = n + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + n := f.copyLen + for n > 0 { + m := len(f.hist) - f.hp + if m > n { + m = n + } + m, err := io.ReadFull(f.r, f.hist[f.hp:f.hp+m]) + f.roffset += int64(m) + if err != nil { + f.err = &ReadError{f.roffset, err} + return + } + n -= m + f.hp += m + if f.hp == len(f.hist) { + f.copyLen = n + f.flush((*decompressor).copyData) + return + } + } + f.step = (*decompressor).nextBlock +} + +func (f *decompressor) setDict(dict []byte) { + if len(dict) > len(f.hist) { + // Will only remember the tail. + dict = dict[len(dict)-len(f.hist):] + } + + f.hp = copy(f.hist[:], dict) + if f.hp == len(f.hist) { + f.hp = 0 + f.hfull = true + } + f.hw = f.hp +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.min) + for { + for f.nb < n { + if err := f.moreBits(); err != nil { + return 0, err + } + } + chunk := h.chunks[f.b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= f.nb { + if n == 0 { + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b >>= n + f.nb -= n + return int(chunk >> huffmanValueShift), nil + } + } +} + +// Flush any buffered output to the underlying writer. +func (f *decompressor) flush(step func(*decompressor)) { + f.toRead = f.hist[f.hw:f.hp] + f.woffset += int64(f.hp - f.hw) + f.hw = f.hp + if f.hp == len(f.hist) { + f.hp = 0 + f.hw = 0 + f.hfull = true + } + f.step = step +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + hist: f.hist, + step: (*decompressor).nextBlock, + } + if dict != nil { + f.setDict(dict) + } + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + var f decompressor + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.r = makeReader(r) + f.hist = new([maxHist]byte) + f.step = (*decompressor).nextBlock + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + var f decompressor + f.r = makeReader(r) + f.hist = new([maxHist]byte) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.setDict(dict) + return &f +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/reverse_bits.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/reverse_bits.go new file mode 100644 index 00000000000..c1a02720d1a --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/reverse_bits.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +var reverseByte = [256]byte{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +func reverseUint16(v uint16) uint16 { + return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return reverseUint16(number << uint8(16-bitLength)) +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/snappy.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/snappy.go new file mode 100644 index 00000000000..78199380d18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/snappy.go @@ -0,0 +1,97 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// We limit how far copy back-references can go, the same as the C++ code. +const maxOffset = 1 << 15 + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := dst.n + for i, v := range lit { + dst.tokens[i+ol] = token(v) + } + dst.n += len(lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst *tokens, offset, length int) { + dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) + dst.n++ +} + +// snappyEncode uses Snappy-like compression, but stores as Huffman +// blocks. +func snappyEncode(dst *tokens, src []byte) { + // Return early if src is short. + if len(src) <= 4 { + if len(src) != 0 { + emitLiteral(dst, src) + } + return + } + + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + const maxTableSize = 1 << 14 + shift, tableSize := uint(32-8), 1<<8 + for tableSize < maxTableSize && tableSize < len(src) { + shift-- + tableSize *= 2 + } + var table [maxTableSize]int + var misses int + // Iterate over the source bytes. + var ( + s int // The iterator position. + t int // The last position with the same hash as s. + lit int // The start position of any pending literal bytes. + ) + for s+3 < len(src) { + // Update the hash table. + b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] + h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 + p := &table[(h*0x1e35a7bd)>>shift] + // We need to to store values in [-1, inf) in table. To save + // some initialization time, (re)use the table's zero value + // and shift the values against this zero: add 1 on writes, + // subtract 1 on reads. + t, *p = *p-1, s+1 + // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. + if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { + misses++ + // Skip 1 byte for 16 consecutive missed. + s += 1 + (misses >> 4) + continue + } + // Otherwise, we have a match. First, emit any pending literal bytes. + if lit != s { + emitLiteral(dst, src[lit:s]) + } + // Extend the match to be as long as possible. + s0 := s + s1 := s + maxMatchLength + if s1 > len(src) { + s1 = len(src) + } + s, t = s+4, t+4 + for s < s1 && src[s] == src[t] { + s++ + t++ + } + misses = 0 + // Emit the copied bytes. + // inlined: emitCopy(dst, s-t, s-s0) + + dst.tokens[dst.n] = matchToken(uint32(s-s0-3), uint32(s-t-minOffsetSize)) + dst.n++ + lit = s + } + + // Emit any final pending literal bytes and return. + if lit != len(src) { + emitLiteral(dst, src[lit:]) + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/flate/token.go b/Godeps/_workspace/src/github.com/klauspost/compress/flate/token.go new file mode 100644 index 00000000000..94fa5eb93ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,105 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +const ( + // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused + // 8 bits: xlength = length - MIN_MATCH_LENGTH + // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1< pair into a match token. +func matchToken(xlength uint32, xoffset uint32) token { + return token(matchType + xlength<> lengthShift) } + +func lengthCode(len uint32) uint32 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[off>>7] + 14 + } else { + return offsetCodes[off>>14] + 28 + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/gzip/gunzip.go b/Godeps/_workspace/src/github.com/klauspost/compress/gzip/gunzip.go new file mode 100644 index 00000000000..f0d49197be1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/gzip/gunzip.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +package gzip + +import ( + "bufio" + "errors" + "hash" + "io" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +func makeReader(r io.Reader) flate.Reader { + if rr, ok := r.(flate.Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + size uint32 + flg byte + buf [512]byte + err error + multistream bool +} + +// NewReader creates a new Reader reading the given reader. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + z.r = makeReader(r) + z.multistream = true + z.digest = crc32.NewIEEE() + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + z.r = makeReader(r) + if z.digest == nil { + z.digest = crc32.NewIEEE() + } else { + z.digest.Reset() + } + z.size = 0 + z.err = nil + z.multistream = true + return z.readHeader(true) +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func get4(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func (z *Reader) readString() (string, error) { + var err error + needconv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needconv = true + } + if z.buf[i] == 0 { + // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). + if needconv { + s := make([]rune, 0, i) + for _, v := range z.buf[0:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[0:i]), nil + } + } +} + +func (z *Reader) read2() (uint32, error) { + _, err := io.ReadFull(z.r, z.buf[0:2]) + if err != nil { + return 0, err + } + return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil +} + +func (z *Reader) readHeader(save bool) error { + _, err := io.ReadFull(z.r, z.buf[0:10]) + if err != nil { + return err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return ErrHeader + } + z.flg = z.buf[3] + if save { + z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) + // z.buf[8] is xfl, ignored + z.OS = z.buf[9] + } + z.digest.Reset() + z.digest.Write(z.buf[0:10]) + + if z.flg&flagExtra != 0 { + n, err := z.read2() + if err != nil { + return err + } + data := make([]byte, n) + if _, err = io.ReadFull(z.r, data); err != nil { + return err + } + if save { + z.Extra = data + } + } + + var s string + if z.flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Name = s + } + } + + if z.flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Comment = s + } + } + + if z.flg&flagHdrCrc != 0 { + n, err := z.read2() + if err != nil { + return err + } + sum := z.digest.Sum32() & 0xFFFF + if n != sum { + return ErrHeader + } + } + + z.digest.Reset() + if z.decompressor == nil { + z.decompressor = flate.NewReader(z.r) + } else { + z.decompressor.(flate.Resetter).Reset(z.r, nil) + } + return nil +} + +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + + n, err = z.decompressor.Read(p) + z.digest.Write(p[0:n]) + z.size += uint32(n) + if n != 0 || err != io.EOF { + z.err = err + return + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return 0, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return 0, z.err + } + + // File is ok; is there another? + if !z.multistream { + return 0, io.EOF + } + + if err = z.readHeader(false); err != nil { + z.err = err + return + } + + // Yes. Reset and read from it. + z.digest.Reset() + z.size = 0 + return z.Read(p) +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (z *Reader) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + for { + if z.err != nil { + if z.err == io.EOF { + return total, nil + } + return total, z.err + } + + // We write both to output and digest. + mw := io.MultiWriter(w, z.digest) + n, err := z.decompressor.(io.WriterTo).WriteTo(mw) + total += n + z.size += uint32(n) + if err != nil { + z.err = err + return total, z.err + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return 0, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return 0, z.err + } + + // File is ok; is there another? + if !z.multistream { + return total, nil + } + + err = z.readHeader(false) + // There was not more + if err == io.EOF { + return total, nil + } + if err != nil { + z.err = err + return total, err + } + + // Yes. Reset and read from it. + z.digest.Reset() + z.size = 0 + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/Godeps/_workspace/src/github.com/klauspost/compress/gzip/gzip.go b/Godeps/_workspace/src/github.com/klauspost/compress/gzip/gzip.go new file mode 100644 index 00000000000..a590c883cd1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/compress/gzip/gzip.go @@ -0,0 +1,274 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "errors" + "fmt" + "hash" + "io" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header + w io.Writer + level int + wroteHeader bool + compressor *flate.Writer + digest hash.Hash32 + size uint32 + closed bool + buf [10]byte + err error +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write or Close. The Comment and Name header fields are +// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO +// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an +// error on Write. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be ConstantCompression, DefaultCompression, +// NoCompression, or any integer value between BestSpeed and BestCompression +// inclusive. The error returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < ConstantCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.init(w, level) + return z, nil +} + +func (z *Writer) init(w io.Writer, level int) { + digest := z.digest + if digest != nil { + digest.Reset() + } else { + digest = crc32.NewIEEE() + } + compressor := z.compressor + if compressor != nil { + compressor.Reset(w) + } + *z = Writer{ + Header: Header{ + OS: 255, // unknown + }, + w: w, + level: level, + digest: digest, + compressor: compressor, + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + z.init(w, z.level) +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func put2(p []byte, v uint16) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) +} + +func put4(p []byte, v uint32) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) + p[2] = uint8(v >> 16) + p[3] = uint8(v >> 24) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + put2(z.buf[0:2], uint16(len(b))) + _, err := z.w.Write(z.buf[0:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[0:1]) + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed. +func (z *Writer) Write(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + var n int + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + put4(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + n, z.err = z.w.Write(z.buf[0:10]) + if z.err != nil { + return n, z.err + } + if z.Extra != nil { + z.err = z.writeBytes(z.Extra) + if z.err != nil { + return n, z.err + } + } + if z.Name != "" { + z.err = z.writeString(z.Name) + if z.err != nil { + return n, z.err + } + } + if z.Comment != "" { + z.err = z.writeString(z.Comment) + if z.err != nil { + return n, z.err + } + } + if z.compressor == nil { + z.compressor, _ = flate.NewWriter(z.w, z.level) + } + } + z.size += uint32(len(p)) + z.digest.Write(p) + n, z.err = z.compressor.Write(p) + return n, z.err +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if z.err != nil { + return z.err + } + if z.closed { + return nil + } + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if z.err != nil { + return z.err + } + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + put4(z.buf[0:4], z.digest.Sum32()) + put4(z.buf[4:8], z.size) + _, z.err = z.w.Write(z.buf[0:8]) + return z.err +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/.gitignore b/Godeps/_workspace/src/github.com/klauspost/cpuid/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/.travis.yml b/Godeps/_workspace/src/github.com/klauspost/cpuid/.travis.yml new file mode 100644 index 00000000000..fb74de6b878 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/LICENSE b/Godeps/_workspace/src/github.com/klauspost/cpuid/LICENSE new file mode 100644 index 00000000000..5cec7ee949b --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/README.md b/Godeps/_workspace/src/github.com/klauspost/cpuid/README.md new file mode 100644 index 00000000000..b2b6bee879a --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/README.md @@ -0,0 +1,145 @@ +# cpuid +Package cpuid provides information about the CPU running the current program. + +CPU features are detected on startup, and kept for fast access through the life of the application. +Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use. + +You can access the CPU information by accessing the shared CPU variable of the cpuid library. + +Package home: https://github.com/klauspost/cpuid + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg +[2]: https://godoc.org/github.com/klauspost/cpuid +[3]: https://travis-ci.org/klauspost/cpuid.svg +[4]: https://travis-ci.org/klauspost/cpuid + +# features +## CPU Instructions +* **CMOV** (i686 CMOV) +* **NX** (NX (No-Execute) bit) +* **AMD3DNOW** (AMD 3DNOW) +* **AMD3DNOWEXT** (AMD 3DNowExt) +* **MMX** (standard MMX) +* **MMXEXT** (SSE integer functions or AMD MMX ext) +* **SSE** (SSE functions) +* **SSE2** (P4 SSE functions) +* **SSE3** (Prescott SSE3 functions) +* **SSSE3** (Conroe SSSE3 functions) +* **SSE4** (Penryn SSE4.1 functions) +* **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions) +* **SSE42** (Nehalem SSE4.2 functions) +* **AVX** (AVX functions) +* **AVX2** (AVX2 functions) +* **FMA3** (Intel FMA 3) +* **FMA4** (Bulldozer FMA4 functions) +* **XOP** (Bulldozer XOP functions) +* **F16C** (Half-precision floating-point conversion) +* **BMI1** (Bit Manipulation Instruction Set 1) +* **BMI2** (Bit Manipulation Instruction Set 2) +* **TBM** (AMD Trailing Bit Manipulation) +* **LZCNT** (LZCNT instruction) +* **POPCNT** (POPCNT instruction) +* **AESNI** (Advanced Encryption Standard New Instructions) +* **CLMUL** (Carry-less Multiplication) +* **HTT** (Hyperthreading (enabled)) +* **HLE** (Hardware Lock Elision) +* **RTM** (Restricted Transactional Memory) +* **RDRAND** (RDRAND instruction is available) +* **RDSEED** (RDSEED instruction is available) +* **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +* **SHA** (Intel SHA Extensions) +* **AVX512F** (AVX-512 Foundation) +* **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions) +* **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions) +* **AVX512PF** (AVX-512 Prefetch Instructions) +* **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions) +* **AVX512CD** (AVX-512 Conflict Detection Instructions) +* **AVX512BW** (AVX-512 Byte and Word Instructions) +* **AVX512VL** (AVX-512 Vector Length Extensions) +* **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions) +* **MPX** (Intel MPX (Memory Protection Extensions)) +* **ERMS** (Enhanced REP MOVSB/STOSB) +* **RDTSCP** (RDTSCP Instruction) +* **CX16** (CMPXCHG16B Instruction) +* **SGX** (Software Guard Extensions, with activation details) + +## Performance +* **RDTSCP()** Returns current cycle count. Can be used for benchmarking. +* **SSE2SLOW** (SSE2 is supported, but usually not faster) +* **SSE3SLOW** (SSE3 is supported, but usually not faster) +* **ATOM** (Atom processor, some SSSE3 instructions are slower) +* **Cache line** (Probable size of a cache line). +* **L1, L2, L3 Cache size** on newer Intel/AMD CPUs. + +## Cpu Vendor/VM +* **Intel** +* **AMD** +* **VIA** +* **Transmeta** +* **NSC** +* **KVM** (Kernel-based Virtual Machine) +* **MSVM** (Microsoft Hyper-V or Windows Virtual PC) +* **VMware** +* **XenHVM** + +# installing + +```go get github.com/klauspost/cpuid``` + +# example + +```Go +package main + +import ( + "fmt" + "github.com/klauspost/cpuid" +) + +func main() { + // Print basic CPU information: + fmt.Println("Name:", cpuid.CPU.BrandName) + fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores) + fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model) + fmt.Println("Features:", cpuid.CPU.Features) + fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine) + fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes") + fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes") + fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes") + + // Test if we have a specific feature: + if cpuid.CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} +``` + +Sample output: +``` +>go run main.go +Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz +PhysicalCores: 2 +ThreadsPerCore: 2 +LogicalCores: 4 +Family 6 Model: 42 +Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL +Cacheline bytes: 64 +We have Streaming SIMD Extensions +``` + +# private package + +In the "private" folder you can find an autogenerated version of the library you can include in your own packages. + +For this purpose all exports are removed, and functions and constants are lowercased. + +This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages. + +# license + +This code is published under an MIT license. See LICENSE file for more information. diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid.go new file mode 100644 index 00000000000..9230ca56282 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid.go @@ -0,0 +1,1022 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// Package cpuid provides information about the CPU running the current program. +// +// CPU features are detected on startup, and kept for fast access through the life of the application. +// Currently x86 / x64 (AMD64) is supported. +// +// You can access the CPU information by accessing the shared CPU variable of the cpuid library. +// +// Package home: https://github.com/klauspost/cpuid +package cpuid + +import "strings" + +// Vendor is a representation of a CPU vendor. +type Vendor int + +const ( + Other Vendor = iota + Intel + AMD + VIA + Transmeta + NSC + KVM // Kernel-based Virtual Machine + MSVM // Microsoft Hyper-V or Windows Virtual PC + VMware + XenHVM +) + +const ( + CMOV = 1 << iota // i686 CMOV + NX // NX (No-Execute) bit + AMD3DNOW // AMD 3DNOW + AMD3DNOWEXT // AMD 3DNowExt + MMX // standard MMX + MMXEXT // SSE integer functions or AMD MMX ext + SSE // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSSE3 // Conroe SSSE3 functions + SSE4 // Penryn SSE4.1 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSE42 // Nehalem SSE4.2 functions + AVX // AVX functions + AVX2 // AVX2 functions + FMA3 // Intel FMA 3 + FMA4 // Bulldozer FMA4 functions + XOP // Bulldozer XOP functions + F16C // Half-precision floating-point conversion + BMI1 // Bit Manipulation Instruction Set 1 + BMI2 // Bit Manipulation Instruction Set 2 + TBM // AMD Trailing Bit Manipulation + LZCNT // LZCNT instruction + POPCNT // POPCNT instruction + AESNI // Advanced Encryption Standard New Instructions + CLMUL // Carry-less Multiplication + HTT // Hyperthreading (enabled) + HLE // Hardware Lock Elision + RTM // Restricted Transactional Memory + RDRAND // RDRAND instruction is available + RDSEED // RDSEED instruction is available + ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA // Intel SHA Extensions + AVX512F // AVX-512 Foundation + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512BW // AVX-512 Byte and Word Instructions + AVX512VL // AVX-512 Vector Length Extensions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + MPX // Intel MPX (Memory Protection Extensions) + ERMS // Enhanced REP MOVSB/STOSB + RDTSCP // RDTSCP Instruction + CX16 // CMPXCHG16B Instruction + SGX // Software Guard Extensions + + // Performance indicators + SSE2SLOW // SSE2 is supported, but usually not faster + SSE3SLOW // SSE3 is supported, but usually not faster + ATOM // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[Flags]string{ + CMOV: "CMOV", // i686 CMOV + NX: "NX", // NX (No-Execute) bit + AMD3DNOW: "AMD3DNOW", // AMD 3DNOW + AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt + MMX: "MMX", // Standard MMX + MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext + SSE: "SSE", // SSE functions + SSE2: "SSE2", // P4 SSE2 functions + SSE3: "SSE3", // Prescott SSE3 functions + SSSE3: "SSSE3", // Conroe SSSE3 functions + SSE4: "SSE4.1", // Penryn SSE4.1 functions + SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + SSE42: "SSE4.2", // Nehalem SSE4.2 functions + AVX: "AVX", // AVX functions + AVX2: "AVX2", // AVX functions + FMA3: "FMA3", // Intel FMA 3 + FMA4: "FMA4", // Bulldozer FMA4 functions + XOP: "XOP", // Bulldozer XOP functions + F16C: "F16C", // Half-precision floating-point conversion + BMI1: "BMI1", // Bit Manipulation Instruction Set 1 + BMI2: "BMI2", // Bit Manipulation Instruction Set 2 + TBM: "TBM", // AMD Trailing Bit Manipulation + LZCNT: "LZCNT", // LZCNT instruction + POPCNT: "POPCNT", // POPCNT instruction + AESNI: "AESNI", // Advanced Encryption Standard New Instructions + CLMUL: "CLMUL", // Carry-less Multiplication + HTT: "HTT", // Hyperthreading (enabled) + HLE: "HLE", // Hardware Lock Elision + RTM: "RTM", // Restricted Transactional Memory + RDRAND: "RDRAND", // RDRAND instruction is available + RDSEED: "RDSEED", // RDSEED instruction is available + ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + SHA: "SHA", // Intel SHA Extensions + AVX512F: "AVX512F", // AVX-512 Foundation + AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions + AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions + AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions + AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions + AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + MPX: "MPX", // Intel MPX (Memory Protection Extensions) + ERMS: "ERMS", // Enhanced REP MOVSB/STOSB + RDTSCP: "RDTSCP", // RDTSCP Instruction + CX16: "CX16", // CMPXCHG16B Instruction + SGX: "SGX", // Software Guard Extensions + + // Performance indicators + SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster + SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster + ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type CPUInfo struct { + BrandName string // Brand name reported by the CPU + VendorID Vendor // Comparable CPU vendor ID + Features Flags // Features of the CPU + PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. + LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + Family int // CPU family number + Model int // CPU model number + CacheLine int // Cache line size in bytes. Will be 0 if undetectable. + Cache struct { + L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected + L2 int // L2 Cache (per core or shared). Will be -1 if undetected + L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + SGX SGXSupport + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var CPU CPUInfo + +func init() { + initCPU() + Detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func Detect() { + CPU.maxFunc = maxFunctionID() + CPU.maxExFunc = maxExtendedFunction() + CPU.BrandName = brandName() + CPU.CacheLine = cacheLine() + CPU.Family, CPU.Model = familyModel() + CPU.Features = support() + CPU.SGX = sgx(CPU.Features&SGX != 0) + CPU.ThreadsPerCore = threadsPerCore() + CPU.LogicalCores = logicalCores() + CPU.PhysicalCores = physicalCores() + CPU.VendorID = vendorID() + CPU.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c CPUInfo) Cmov() bool { + return c.Features&CMOV != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c CPUInfo) Amd3dnow() bool { + return c.Features&AMD3DNOW != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c CPUInfo) Amd3dnowExt() bool { + return c.Features&AMD3DNOWEXT != 0 +} + +// MMX indicates support of MMX instructions +func (c CPUInfo) MMX() bool { + return c.Features&MMX != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c CPUInfo) MMXExt() bool { + return c.Features&MMXEXT != 0 +} + +// SSE indicates support of SSE instructions +func (c CPUInfo) SSE() bool { + return c.Features&SSE != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c CPUInfo) SSE2() bool { + return c.Features&SSE2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c CPUInfo) SSE3() bool { + return c.Features&SSE3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c CPUInfo) SSSE3() bool { + return c.Features&SSSE3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c CPUInfo) SSE4() bool { + return c.Features&SSE4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c CPUInfo) SSE42() bool { + return c.Features&SSE42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c CPUInfo) AVX() bool { + return c.Features&AVX != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c CPUInfo) AVX2() bool { + return c.Features&AVX2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c CPUInfo) FMA3() bool { + return c.Features&FMA3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c CPUInfo) FMA4() bool { + return c.Features&FMA4 != 0 +} + +// XOP indicates support of XOP instructions +func (c CPUInfo) XOP() bool { + return c.Features&XOP != 0 +} + +// F16C indicates support of F16C instructions +func (c CPUInfo) F16C() bool { + return c.Features&F16C != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c CPUInfo) BMI1() bool { + return c.Features&BMI1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c CPUInfo) BMI2() bool { + return c.Features&BMI2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c CPUInfo) TBM() bool { + return c.Features&TBM != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c CPUInfo) Lzcnt() bool { + return c.Features&LZCNT != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c CPUInfo) Popcnt() bool { + return c.Features&POPCNT != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c CPUInfo) HTT() bool { + return c.Features&HTT != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c CPUInfo) SSE2Slow() bool { + return c.Features&SSE2SLOW != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c CPUInfo) SSE3Slow() bool { + return c.Features&SSE3SLOW != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c CPUInfo) AesNi() bool { + return c.Features&AESNI != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c CPUInfo) Clmul() bool { + return c.Features&CLMUL != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c CPUInfo) NX() bool { + return c.Features&NX != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c CPUInfo) SSE4A() bool { + return c.Features&SSE4A != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c CPUInfo) HLE() bool { + return c.Features&HLE != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c CPUInfo) RTM() bool { + return c.Features&RTM != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c CPUInfo) Rdrand() bool { + return c.Features&RDRAND != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c CPUInfo) Rdseed() bool { + return c.Features&RDSEED != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c CPUInfo) ADX() bool { + return c.Features&ADX != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c CPUInfo) SHA() bool { + return c.Features&SHA != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c CPUInfo) AVX512F() bool { + return c.Features&AVX512F != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c CPUInfo) AVX512DQ() bool { + return c.Features&AVX512DQ != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c CPUInfo) AVX512IFMA() bool { + return c.Features&AVX512IFMA != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c CPUInfo) AVX512PF() bool { + return c.Features&AVX512PF != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c CPUInfo) AVX512ER() bool { + return c.Features&AVX512ER != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c CPUInfo) AVX512CD() bool { + return c.Features&AVX512CD != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c CPUInfo) AVX512BW() bool { + return c.Features&AVX512BW != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c CPUInfo) AVX512VL() bool { + return c.Features&AVX512VL != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c CPUInfo) AVX512VBMI() bool { + return c.Features&AVX512VBMI != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c CPUInfo) MPX() bool { + return c.Features&MPX != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c CPUInfo) ERMS() bool { + return c.Features&ERMS != 0 +} + +func (c CPUInfo) RDTSCP() bool { + return c.Features&RDTSCP != 0 +} + +func (c CPUInfo) CX16() bool { + return c.Features&CX16 != 0 +} + +// Atom indicates an Atom processor +func (c CPUInfo) Atom() bool { + return c.Features&ATOM != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c CPUInfo) Intel() bool { + return c.VendorID == Intel +} + +// AMD returns true if vendor is recognized as AMD +func (c CPUInfo) AMD() bool { + return c.VendorID == AMD +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c CPUInfo) Transmeta() bool { + return c.VendorID == Transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c CPUInfo) NSC() bool { + return c.VendorID == NSC +} + +// VIA returns true if vendor is recognized as VIA +func (c CPUInfo) VIA() bool { + return c.VendorID == VIA +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c CPUInfo) RTCounter() uint64 { + if !c.RDTSCP() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c CPUInfo) Ia32TscAux() uint32 { + if !c.RDTSCP() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c CPUInfo) LogicalCPU() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c CPUInfo) VM() bool { + switch c.VendorID { + case MSVM, KVM, VMware, XenHVM: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type Flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f Flags) String() string { + return strings.Join(f.Strings(), ",") +} + +// Strings returns and array of the detected features. +func (f Flags) Strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := Flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != Intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case Intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case AMD: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case Intel: + return logicalCores() / threadsPerCore() + case AMD: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]Vendor{ + "AMDisbetter!": AMD, + "AuthenticAMD": AMD, + "CentaurHauls": VIA, + "GenuineIntel": Intel, + "TransmetaCPU": Transmeta, + "GenuineTMx86": Transmeta, + "Geode by NSC": NSC, + "VIA VIA VIA ": VIA, + "KVMKVMKVMKVM": KVM, + "Microsoft Hv": MSVM, + "VMwareVMware": VMware, + "XenVMMXenVMM": XenHVM, +} + +func vendorID() Vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return Other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *CPUInfo) cacheSize() { + c.Cache.L1D = -1 + c.Cache.L1I = -1 + c.Cache.L2 = -1 + c.Cache.L3 = -1 + vendor := vendorID() + switch vendor { + case Intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.Cache.L1D = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.Cache.L1I = size + } else { + if c.Cache.L1D < 0 { + c.Cache.L1I = size + } + if c.Cache.L1I < 0 { + c.Cache.L1I = size + } + } + case 2: + c.Cache.L2 = size + case 3: + c.Cache.L3 = size + } + } + case AMD: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) + c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +type SGXSupport struct { + Available bool + SGX1Supported bool + SGX2Supported bool + MaxEnclaveSizeNot64 int64 + MaxEnclaveSize64 int64 +} + +func sgx(available bool) (rval SGXSupport) { + rval.Available = available + + if !available { + return + } + + a, _, _, d := cpuidex(0x12, 0) + rval.SGX1Supported = a&0x01 != 0 + rval.SGX2Supported = a&0x02 != 0 + rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 + rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 + + return +} + +func support() Flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= CMOV + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 25)) != 0 { + rval |= MMXEXT + } + if (d & (1 << 25)) != 0 { + rval |= SSE + } + if (d & (1 << 26)) != 0 { + rval |= SSE2 + } + if (c & 1) != 0 { + rval |= SSE3 + } + if (c & 0x00000200) != 0 { + rval |= SSSE3 + } + if (c & 0x00080000) != 0 { + rval |= SSE4 + } + if (c & 0x00100000) != 0 { + rval |= SSE42 + } + if (c & (1 << 25)) != 0 { + rval |= AESNI + } + if (c & (1 << 1)) != 0 { + rval |= CLMUL + } + if c&(1<<23) != 0 { + rval |= POPCNT + } + if c&(1<<30) != 0 { + rval |= RDRAND + } + if c&(1<<29) != 0 { + rval |= F16C + } + if c&(1<<13) != 0 { + rval |= CX16 + } + if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= HTT + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= AVX + if (c & 0x00001000) != 0 { + rval |= FMA3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, _ := cpuidex(7, 0) + if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { + rval |= AVX2 + } + if (ebx & 0x00000008) != 0 { + rval |= BMI1 + if (ebx & 0x00000100) != 0 { + rval |= BMI2 + } + } + if ebx&(1<<2) != 0 { + rval |= SGX + } + if ebx&(1<<4) != 0 { + rval |= HLE + } + if ebx&(1<<9) != 0 { + rval |= ERMS + } + if ebx&(1<<11) != 0 { + rval |= RTM + } + if ebx&(1<<14) != 0 { + rval |= MPX + } + if ebx&(1<<18) != 0 { + rval |= RDSEED + } + if ebx&(1<<19) != 0 { + rval |= ADX + } + if ebx&(1<<29) != 0 { + rval |= SHA + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= AVX512F + } + if ebx&(1<<17) != 0 { + rval |= AVX512DQ + } + if ebx&(1<<21) != 0 { + rval |= AVX512IFMA + } + if ebx&(1<<26) != 0 { + rval |= AVX512PF + } + if ebx&(1<<27) != 0 { + rval |= AVX512ER + } + if ebx&(1<<28) != 0 { + rval |= AVX512CD + } + if ebx&(1<<30) != 0 { + rval |= AVX512BW + } + if ebx&(1<<31) != 0 { + rval |= AVX512VL + } + // ecx + if ecx&(1<<1) != 0 { + rval |= AVX512VBMI + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= LZCNT + rval |= POPCNT + } + if (d & (1 << 31)) != 0 { + rval |= AMD3DNOW + } + if (d & (1 << 30)) != 0 { + rval |= AMD3DNOWEXT + } + if (d & (1 << 23)) != 0 { + rval |= MMX + } + if (d & (1 << 22)) != 0 { + rval |= MMXEXT + } + if (c & (1 << 6)) != 0 { + rval |= SSE4A + } + if d&(1<<20) != 0 { + rval |= NX + } + if d&(1<<27) != 0 { + rval |= RDTSCP + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != Intel && + rval&SSE2 != 0 && (c&0x00000040) == 0 { + rval |= SSE2SLOW + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & AVX) != 0 { + if (c & 0x00000800) != 0 { + rval |= XOP + } + if (c & 0x00010000) != 0 { + rval |= FMA4 + } + } + + if vendorID() == Intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & SSE2) != 0 { + rval |= SSE2SLOW + } + if (rval & SSE3) != 0 { + rval |= SSE3SLOW + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= ATOM + } + } + } + return Flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_386.s b/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_386.s new file mode 100644 index 00000000000..9947f7b6f6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_386.s @@ -0,0 +1,40 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_amd64.s b/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_amd64.s new file mode 100644 index 00000000000..68a7c9d8808 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_amd64.s @@ -0,0 +1,40 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/detect_intel.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/detect_intel.go new file mode 100644 index 00000000000..d5475ebfaa8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386 amd64 + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/detect_ref.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/detect_ref.go new file mode 100644 index 00000000000..6f9231b13bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/generate.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/generate.go new file mode 100644 index 00000000000..c060b8165e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/generate.go @@ -0,0 +1,3 @@ +package cpuid + +//go:generate go run private-gen.go diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private-gen.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/private-gen.go new file mode 100644 index 00000000000..437333d2922 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private-gen.go @@ -0,0 +1,476 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "io" + "io/ioutil" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var inFiles = []string{"cpuid.go", "cpuid_test.go"} +var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} +var fileSet = token.NewFileSet() +var reWrites = []rewrite{ + initRewrite("CPUInfo -> cpuInfo"), + initRewrite("Vendor -> vendor"), + initRewrite("Flags -> flags"), + initRewrite("Detect -> detect"), + initRewrite("CPU -> cpu"), +} +var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, + // cpuid_test.go + "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, +} + +var excludePrefixes = []string{"test", "benchmark"} + +func main() { + Package := "private" + parserMode := parser.ParseComments + exported := make(map[string]rewrite) + for _, file := range inFiles { + in, err := os.Open(file) + if err != nil { + log.Fatalf("opening input", err) + } + + src, err := ioutil.ReadAll(in) + if err != nil { + log.Fatalf("reading input", err) + } + + astfile, err := parser.ParseFile(fileSet, file, src, parserMode) + if err != nil { + log.Fatalf("parsing input", err) + } + + for _, rw := range reWrites { + astfile = rw(astfile) + } + + // Inspect the AST and print all identifiers and literals. + var startDecl token.Pos + var endDecl token.Pos + ast.Inspect(astfile, func(n ast.Node) bool { + var s string + switch x := n.(type) { + case *ast.Ident: + if x.IsExported() { + t := strings.ToLower(x.Name) + for _, pre := range excludePrefixes { + if strings.HasPrefix(t, pre) { + return true + } + } + if excludeNames[t] != true { + //if x.Pos() > startDecl && x.Pos() < endDecl { + exported[x.Name] = initRewrite(x.Name + " -> " + t) + } + } + + case *ast.GenDecl: + if x.Tok == token.CONST && x.Lparen > 0 { + startDecl = x.Lparen + endDecl = x.Rparen + // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) + } + } + if s != "" { + fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) + } + return true + }) + + for _, rw := range exported { + astfile = rw(astfile) + } + + var buf bytes.Buffer + + printer.Fprint(&buf, fileSet, astfile) + + // Remove package documentation and insert information + s := buf.String() + ind := strings.Index(buf.String(), "\npackage cpuid") + s = s[ind:] + s = "// Generated, DO NOT EDIT,\n" + + "// but copy it to your own project and rename the package.\n" + + "// See more at http://github.com/klauspost/cpuid\n" + + s + + outputName := Package + string(os.PathSeparator) + file + + err = ioutil.WriteFile(outputName, []byte(s), 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } + log.Println("Generated", outputName) + } + + for _, file := range copyFiles { + dst := "" + if strings.HasPrefix(file, "cpuid") { + dst = Package + string(os.PathSeparator) + file + } else { + dst = Package + string(os.PathSeparator) + "cpuid_" + file + } + err := copyFile(file, dst) + if err != nil { + log.Fatalf("copying file: %s", err) + } + log.Println("Copied", dst) + } +} + +// CopyFile copies a file from src to dst. If src and dst files exist, and are +// the same, then return success. Copy the file contents from src to dst. +func copyFile(src, dst string) (err error) { + sfi, err := os.Stat(src) + if err != nil { + return + } + if !sfi.Mode().IsRegular() { + // cannot copy non-regular files (e.g., directories, + // symlinks, devices, etc.) + return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) + } + dfi, err := os.Stat(dst) + if err != nil { + if !os.IsNotExist(err) { + return + } + } else { + if !(dfi.Mode().IsRegular()) { + return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) + } + if os.SameFile(sfi, dfi) { + return + } + } + err = copyFileContents(src, dst) + return +} + +// copyFileContents copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) + if err != nil { + return + } + defer in.Close() + out, err := os.Create(dst) + if err != nil { + return + } + defer func() { + cerr := out.Close() + if err == nil { + err = cerr + } + }() + if _, err = io.Copy(out, in); err != nil { + return + } + err = out.Sync() + return +} + +type rewrite func(*ast.File) *ast.File + +// Mostly copied from gofmt +func initRewrite(rewriteRule string) rewrite { + f := strings.Split(rewriteRule, "->") + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") + os.Exit(2) + } + pattern := parseExpr(f[0], "pattern") + replace := parseExpr(f[1], "replacement") + return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } +} + +// parseExpr parses s as an expression. +// It might make sense to expand this to allow statement patterns, +// but there are problems with preserving formatting and also +// with what a wildcard for a statement looks like. +func parseExpr(s, what string) ast.Expr { + x, err := parser.ParseExpr(s) + if err != nil { + fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) + os.Exit(2) + } + return x +} + +// Keep this function for debugging. +/* +func dump(msg string, val reflect.Value) { + fmt.Printf("%s:\n", msg) + ast.Print(fileSet, val.Interface()) + fmt.Println() +} +*/ + +// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. +func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { + cmap := ast.NewCommentMap(fileSet, p, p.Comments) + m := make(map[string]reflect.Value) + pat := reflect.ValueOf(pattern) + repl := reflect.ValueOf(replace) + + var rewriteVal func(val reflect.Value) reflect.Value + rewriteVal = func(val reflect.Value) reflect.Value { + // don't bother if val is invalid to start with + if !val.IsValid() { + return reflect.Value{} + } + for k := range m { + delete(m, k) + } + val = apply(rewriteVal, val) + if match(m, pat, val) { + val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) + } + return val + } + + r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) + r.Comments = cmap.Filter(r).Comments() // recreate comments list + return r +} + +// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. +func set(x, y reflect.Value) { + // don't bother if x cannot be set or y is invalid + if !x.CanSet() || !y.IsValid() { + return + } + defer func() { + if x := recover(); x != nil { + if s, ok := x.(string); ok && + (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { + // x cannot be set to y - ignore this rewrite + return + } + panic(x) + } + }() + x.Set(y) +} + +// Values/types for special cases. +var ( + objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) + scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) + + identType = reflect.TypeOf((*ast.Ident)(nil)) + objectPtrType = reflect.TypeOf((*ast.Object)(nil)) + positionType = reflect.TypeOf(token.NoPos) + callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) + scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) +) + +// apply replaces each AST field x in val with f(x), returning val. +// To avoid extra conversions, f operates on the reflect.Value form. +func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { + if !val.IsValid() { + return reflect.Value{} + } + + // *ast.Objects introduce cycles and are likely incorrect after + // rewrite; don't follow them but replace with nil instead + if val.Type() == objectPtrType { + return objectPtrNil + } + + // similarly for scopes: they are likely incorrect after a rewrite; + // replace them with nil + if val.Type() == scopePtrType { + return scopePtrNil + } + + switch v := reflect.Indirect(val); v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + e := v.Index(i) + set(e, f(e)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + e := v.Field(i) + set(e, f(e)) + } + case reflect.Interface: + e := v.Elem() + set(v, f(e)) + } + return val +} + +func isWildcard(s string) bool { + rune, size := utf8.DecodeRuneInString(s) + return size == len(s) && unicode.IsLower(rune) +} + +// match returns true if pattern matches val, +// recording wildcard submatches in m. +// If m == nil, match checks whether pattern == val. +func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { + // Wildcard matches any expression. If it appears multiple + // times in the pattern, it must match the same expression + // each time. + if m != nil && pattern.IsValid() && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) && val.IsValid() { + // wildcards only match valid (non-nil) expressions. + if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { + if old, ok := m[name]; ok { + return match(nil, old, val) + } + m[name] = val + return true + } + } + } + + // Otherwise, pattern and val must match recursively. + if !pattern.IsValid() || !val.IsValid() { + return !pattern.IsValid() && !val.IsValid() + } + if pattern.Type() != val.Type() { + return false + } + + // Special cases. + switch pattern.Type() { + case identType: + // For identifiers, only the names need to match + // (and none of the other *ast.Object information). + // This is a common case, handle it all here instead + // of recursing down any further via reflection. + p := pattern.Interface().(*ast.Ident) + v := val.Interface().(*ast.Ident) + return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name + case objectPtrType, positionType: + // object pointers and token positions always match + return true + case callExprType: + // For calls, the Ellipsis fields (token.Position) must + // match since that is how f(x) and f(x...) are different. + // Check them here but fall through for the remaining fields. + p := pattern.Interface().(*ast.CallExpr) + v := val.Interface().(*ast.CallExpr) + if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { + return false + } + } + + p := reflect.Indirect(pattern) + v := reflect.Indirect(val) + if !p.IsValid() || !v.IsValid() { + return !p.IsValid() && !v.IsValid() + } + + switch p.Kind() { + case reflect.Slice: + if p.Len() != v.Len() { + return false + } + for i := 0; i < p.Len(); i++ { + if !match(m, p.Index(i), v.Index(i)) { + return false + } + } + return true + + case reflect.Struct: + for i := 0; i < p.NumField(); i++ { + if !match(m, p.Field(i), v.Field(i)) { + return false + } + } + return true + + case reflect.Interface: + return match(m, p.Elem(), v.Elem()) + } + + // Handle token integers, etc. + return p.Interface() == v.Interface() +} + +// subst returns a copy of pattern with values from m substituted in place +// of wildcards and pos used as the position of tokens from the pattern. +// if m == nil, subst returns a copy of pattern and doesn't change the line +// number information. +func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { + if !pattern.IsValid() { + return reflect.Value{} + } + + // Wildcard gets replaced with map value. + if m != nil && pattern.Type() == identType { + name := pattern.Interface().(*ast.Ident).Name + if isWildcard(name) { + if old, ok := m[name]; ok { + return subst(nil, old, reflect.Value{}) + } + } + } + + if pos.IsValid() && pattern.Type() == positionType { + // use new position only if old position was valid in the first place + if old := pattern.Interface().(token.Pos); !old.IsValid() { + return pattern + } + return pos + } + + // Otherwise copy. + switch p := pattern; p.Kind() { + case reflect.Slice: + v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) + for i := 0; i < p.Len(); i++ { + v.Index(i).Set(subst(m, p.Index(i), pos)) + } + return v + + case reflect.Struct: + v := reflect.New(p.Type()).Elem() + for i := 0; i < p.NumField(); i++ { + v.Field(i).Set(subst(m, p.Field(i), pos)) + } + return v + + case reflect.Ptr: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos).Addr()) + } + return v + + case reflect.Interface: + v := reflect.New(p.Type()).Elem() + if elem := p.Elem(); elem.IsValid() { + v.Set(subst(m, elem, pos)) + } + return v + } + + return pattern +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private/README.md b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/README.md new file mode 100644 index 00000000000..57a68f88274 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/README.md @@ -0,0 +1,6 @@ +# cpuid private + +This is a specially converted of the cpuid package, so it can be included in +a package without exporting anything. + +Package home: https://github.com/klauspost/cpuid diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid.go new file mode 100644 index 00000000000..be99cb0b70d --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid.go @@ -0,0 +1,987 @@ +// Generated, DO NOT EDIT, +// but copy it to your own project and rename the package. +// See more at http://github.com/klauspost/cpuid + +package cpuid + +import ( + "strings" +) + +// Vendor is a representation of a CPU vendor. +type vendor int + +const ( + other vendor = iota + intel + amd + via + transmeta + nsc + kvm // Kernel-based Virtual Machine + msvm // Microsoft Hyper-V or Windows Virtual PC + vmware + xenhvm +) + +const ( + cmov = 1 << iota // i686 CMOV + nx // NX (No-Execute) bit + amd3dnow // AMD 3DNOW + amd3dnowext // AMD 3DNowExt + mmx // standard MMX + mmxext // SSE integer functions or AMD MMX ext + sse // SSE functions + sse2 // P4 SSE functions + sse3 // Prescott SSE3 functions + ssse3 // Conroe SSSE3 functions + sse4 // Penryn SSE4.1 functions + sse4a // AMD Barcelona microarchitecture SSE4a instructions + sse42 // Nehalem SSE4.2 functions + avx // AVX functions + avx2 // AVX2 functions + fma3 // Intel FMA 3 + fma4 // Bulldozer FMA4 functions + xop // Bulldozer XOP functions + f16c // Half-precision floating-point conversion + bmi1 // Bit Manipulation Instruction Set 1 + bmi2 // Bit Manipulation Instruction Set 2 + tbm // AMD Trailing Bit Manipulation + lzcnt // LZCNT instruction + popcnt // POPCNT instruction + aesni // Advanced Encryption Standard New Instructions + clmul // Carry-less Multiplication + htt // Hyperthreading (enabled) + hle // Hardware Lock Elision + rtm // Restricted Transactional Memory + rdrand // RDRAND instruction is available + rdseed // RDSEED instruction is available + adx // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + sha // Intel SHA Extensions + avx512f // AVX-512 Foundation + avx512dq // AVX-512 Doubleword and Quadword Instructions + avx512ifma // AVX-512 Integer Fused Multiply-Add Instructions + avx512pf // AVX-512 Prefetch Instructions + avx512er // AVX-512 Exponential and Reciprocal Instructions + avx512cd // AVX-512 Conflict Detection Instructions + avx512bw // AVX-512 Byte and Word Instructions + avx512vl // AVX-512 Vector Length Extensions + avx512vbmi // AVX-512 Vector Bit Manipulation Instructions + mpx // Intel MPX (Memory Protection Extensions) + erms // Enhanced REP MOVSB/STOSB + rdtscp // RDTSCP Instruction + cx16 // CMPXCHG16B Instruction + + // Performance indicators + sse2slow // SSE2 is supported, but usually not faster + sse3slow // SSE3 is supported, but usually not faster + atom // Atom processor, some SSSE3 instructions are slower +) + +var flagNames = map[flags]string{ + cmov: "CMOV", // i686 CMOV + nx: "NX", // NX (No-Execute) bit + amd3dnow: "AMD3DNOW", // AMD 3DNOW + amd3dnowext: "AMD3DNOWEXT", // AMD 3DNowExt + mmx: "MMX", // Standard MMX + mmxext: "MMXEXT", // SSE integer functions or AMD MMX ext + sse: "SSE", // SSE functions + sse2: "SSE2", // P4 SSE2 functions + sse3: "SSE3", // Prescott SSE3 functions + ssse3: "SSSE3", // Conroe SSSE3 functions + sse4: "SSE4.1", // Penryn SSE4.1 functions + sse4a: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions + sse42: "SSE4.2", // Nehalem SSE4.2 functions + avx: "AVX", // AVX functions + avx2: "AVX2", // AVX functions + fma3: "FMA3", // Intel FMA 3 + fma4: "FMA4", // Bulldozer FMA4 functions + xop: "XOP", // Bulldozer XOP functions + f16c: "F16C", // Half-precision floating-point conversion + bmi1: "BMI1", // Bit Manipulation Instruction Set 1 + bmi2: "BMI2", // Bit Manipulation Instruction Set 2 + tbm: "TBM", // AMD Trailing Bit Manipulation + lzcnt: "LZCNT", // LZCNT instruction + popcnt: "POPCNT", // POPCNT instruction + aesni: "AESNI", // Advanced Encryption Standard New Instructions + clmul: "CLMUL", // Carry-less Multiplication + htt: "HTT", // Hyperthreading (enabled) + hle: "HLE", // Hardware Lock Elision + rtm: "RTM", // Restricted Transactional Memory + rdrand: "RDRAND", // RDRAND instruction is available + rdseed: "RDSEED", // RDSEED instruction is available + adx: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) + sha: "SHA", // Intel SHA Extensions + avx512f: "AVX512F", // AVX-512 Foundation + avx512dq: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions + avx512ifma: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions + avx512pf: "AVX512PF", // AVX-512 Prefetch Instructions + avx512er: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions + avx512cd: "AVX512CD", // AVX-512 Conflict Detection Instructions + avx512bw: "AVX512BW", // AVX-512 Byte and Word Instructions + avx512vl: "AVX512VL", // AVX-512 Vector Length Extensions + avx512vbmi: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions + mpx: "MPX", // Intel MPX (Memory Protection Extensions) + erms: "ERMS", // Enhanced REP MOVSB/STOSB + rdtscp: "RDTSCP", // RDTSCP Instruction + cx16: "CX16", // CMPXCHG16B Instruction + + // Performance indicators + sse2slow: "SSE2SLOW", // SSE2 supported, but usually not faster + sse3slow: "SSE3SLOW", // SSE3 supported, but usually not faster + atom: "ATOM", // Atom processor, some SSSE3 instructions are slower + +} + +// CPUInfo contains information about the detected system CPU. +type cpuInfo struct { + brandname string // Brand name reported by the CPU + vendorid vendor // Comparable CPU vendor ID + features flags // Features of the CPU + physicalcores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. + threadspercore int // Number of threads per physical core. Will be 1 if undetectable. + logicalcores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. + family int // CPU family number + model int // CPU model number + cacheline int // Cache line size in bytes. Will be 0 if undetectable. + cache struct { + l1i int // L1 Instruction Cache (per core or shared). Will be -1 if undetected + l1d int // L1 Data Cache (per core or shared). Will be -1 if undetected + l2 int // L2 Cache (per core or shared). Will be -1 if undetected + l3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected + } + maxFunc uint32 + maxExFunc uint32 +} + +var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) +var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) +var xgetbv func(index uint32) (eax, edx uint32) +var rdtscpAsm func() (eax, ebx, ecx, edx uint32) + +// CPU contains information about the CPU as detected on startup, +// or when Detect last was called. +// +// Use this as the primary entry point to you data, +// this way queries are +var cpu cpuInfo + +func init() { + initCPU() + detect() +} + +// Detect will re-detect current CPU info. +// This will replace the content of the exported CPU variable. +// +// Unless you expect the CPU to change while you are running your program +// you should not need to call this function. +// If you call this, you must ensure that no other goroutine is accessing the +// exported CPU variable. +func detect() { + cpu.maxFunc = maxFunctionID() + cpu.maxExFunc = maxExtendedFunction() + cpu.brandname = brandName() + cpu.cacheline = cacheLine() + cpu.family, cpu.model = familyModel() + cpu.features = support() + cpu.threadspercore = threadsPerCore() + cpu.logicalcores = logicalCores() + cpu.physicalcores = physicalCores() + cpu.vendorid = vendorID() + cpu.cacheSize() +} + +// Generated here: http://play.golang.org/p/BxFH2Gdc0G + +// Cmov indicates support of CMOV instructions +func (c cpuInfo) cmov() bool { + return c.features&cmov != 0 +} + +// Amd3dnow indicates support of AMD 3DNOW! instructions +func (c cpuInfo) amd3dnow() bool { + return c.features&amd3dnow != 0 +} + +// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions +func (c cpuInfo) amd3dnowext() bool { + return c.features&amd3dnowext != 0 +} + +// MMX indicates support of MMX instructions +func (c cpuInfo) mmx() bool { + return c.features&mmx != 0 +} + +// MMXExt indicates support of MMXEXT instructions +// (SSE integer functions or AMD MMX ext) +func (c cpuInfo) mmxext() bool { + return c.features&mmxext != 0 +} + +// SSE indicates support of SSE instructions +func (c cpuInfo) sse() bool { + return c.features&sse != 0 +} + +// SSE2 indicates support of SSE 2 instructions +func (c cpuInfo) sse2() bool { + return c.features&sse2 != 0 +} + +// SSE3 indicates support of SSE 3 instructions +func (c cpuInfo) sse3() bool { + return c.features&sse3 != 0 +} + +// SSSE3 indicates support of SSSE 3 instructions +func (c cpuInfo) ssse3() bool { + return c.features&ssse3 != 0 +} + +// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions +func (c cpuInfo) sse4() bool { + return c.features&sse4 != 0 +} + +// SSE42 indicates support of SSE4.2 instructions +func (c cpuInfo) sse42() bool { + return c.features&sse42 != 0 +} + +// AVX indicates support of AVX instructions +// and operating system support of AVX instructions +func (c cpuInfo) avx() bool { + return c.features&avx != 0 +} + +// AVX2 indicates support of AVX2 instructions +func (c cpuInfo) avx2() bool { + return c.features&avx2 != 0 +} + +// FMA3 indicates support of FMA3 instructions +func (c cpuInfo) fma3() bool { + return c.features&fma3 != 0 +} + +// FMA4 indicates support of FMA4 instructions +func (c cpuInfo) fma4() bool { + return c.features&fma4 != 0 +} + +// XOP indicates support of XOP instructions +func (c cpuInfo) xop() bool { + return c.features&xop != 0 +} + +// F16C indicates support of F16C instructions +func (c cpuInfo) f16c() bool { + return c.features&f16c != 0 +} + +// BMI1 indicates support of BMI1 instructions +func (c cpuInfo) bmi1() bool { + return c.features&bmi1 != 0 +} + +// BMI2 indicates support of BMI2 instructions +func (c cpuInfo) bmi2() bool { + return c.features&bmi2 != 0 +} + +// TBM indicates support of TBM instructions +// (AMD Trailing Bit Manipulation) +func (c cpuInfo) tbm() bool { + return c.features&tbm != 0 +} + +// Lzcnt indicates support of LZCNT instruction +func (c cpuInfo) lzcnt() bool { + return c.features&lzcnt != 0 +} + +// Popcnt indicates support of POPCNT instruction +func (c cpuInfo) popcnt() bool { + return c.features&popcnt != 0 +} + +// HTT indicates the processor has Hyperthreading enabled +func (c cpuInfo) htt() bool { + return c.features&htt != 0 +} + +// SSE2Slow indicates that SSE2 may be slow on this processor +func (c cpuInfo) sse2slow() bool { + return c.features&sse2slow != 0 +} + +// SSE3Slow indicates that SSE3 may be slow on this processor +func (c cpuInfo) sse3slow() bool { + return c.features&sse3slow != 0 +} + +// AesNi indicates support of AES-NI instructions +// (Advanced Encryption Standard New Instructions) +func (c cpuInfo) aesni() bool { + return c.features&aesni != 0 +} + +// Clmul indicates support of CLMUL instructions +// (Carry-less Multiplication) +func (c cpuInfo) clmul() bool { + return c.features&clmul != 0 +} + +// NX indicates support of NX (No-Execute) bit +func (c cpuInfo) nx() bool { + return c.features&nx != 0 +} + +// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions +func (c cpuInfo) sse4a() bool { + return c.features&sse4a != 0 +} + +// HLE indicates support of Hardware Lock Elision +func (c cpuInfo) hle() bool { + return c.features&hle != 0 +} + +// RTM indicates support of Restricted Transactional Memory +func (c cpuInfo) rtm() bool { + return c.features&rtm != 0 +} + +// Rdrand indicates support of RDRAND instruction is available +func (c cpuInfo) rdrand() bool { + return c.features&rdrand != 0 +} + +// Rdseed indicates support of RDSEED instruction is available +func (c cpuInfo) rdseed() bool { + return c.features&rdseed != 0 +} + +// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) +func (c cpuInfo) adx() bool { + return c.features&adx != 0 +} + +// SHA indicates support of Intel SHA Extensions +func (c cpuInfo) sha() bool { + return c.features&sha != 0 +} + +// AVX512F indicates support of AVX-512 Foundation +func (c cpuInfo) avx512f() bool { + return c.features&avx512f != 0 +} + +// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions +func (c cpuInfo) avx512dq() bool { + return c.features&avx512dq != 0 +} + +// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions +func (c cpuInfo) avx512ifma() bool { + return c.features&avx512ifma != 0 +} + +// AVX512PF indicates support of AVX-512 Prefetch Instructions +func (c cpuInfo) avx512pf() bool { + return c.features&avx512pf != 0 +} + +// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions +func (c cpuInfo) avx512er() bool { + return c.features&avx512er != 0 +} + +// AVX512CD indicates support of AVX-512 Conflict Detection Instructions +func (c cpuInfo) avx512cd() bool { + return c.features&avx512cd != 0 +} + +// AVX512BW indicates support of AVX-512 Byte and Word Instructions +func (c cpuInfo) avx512bw() bool { + return c.features&avx512bw != 0 +} + +// AVX512VL indicates support of AVX-512 Vector Length Extensions +func (c cpuInfo) avx512vl() bool { + return c.features&avx512vl != 0 +} + +// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions +func (c cpuInfo) avx512vbmi() bool { + return c.features&avx512vbmi != 0 +} + +// MPX indicates support of Intel MPX (Memory Protection Extensions) +func (c cpuInfo) mpx() bool { + return c.features&mpx != 0 +} + +// ERMS indicates support of Enhanced REP MOVSB/STOSB +func (c cpuInfo) erms() bool { + return c.features&erms != 0 +} + +func (c cpuInfo) rdtscp() bool { + return c.features&rdtscp != 0 +} + +func (c cpuInfo) cx16() bool { + return c.features&cx16 != 0 +} + +// Atom indicates an Atom processor +func (c cpuInfo) atom() bool { + return c.features&atom != 0 +} + +// Intel returns true if vendor is recognized as Intel +func (c cpuInfo) intel() bool { + return c.vendorid == intel +} + +// AMD returns true if vendor is recognized as AMD +func (c cpuInfo) amd() bool { + return c.vendorid == amd +} + +// Transmeta returns true if vendor is recognized as Transmeta +func (c cpuInfo) transmeta() bool { + return c.vendorid == transmeta +} + +// NSC returns true if vendor is recognized as National Semiconductor +func (c cpuInfo) nsc() bool { + return c.vendorid == nsc +} + +// VIA returns true if vendor is recognized as VIA +func (c cpuInfo) via() bool { + return c.vendorid == via +} + +// RTCounter returns the 64-bit time-stamp counter +// Uses the RDTSCP instruction. The value 0 is returned +// if the CPU does not support the instruction. +func (c cpuInfo) rtcounter() uint64 { + if !c.rdtscp() { + return 0 + } + a, _, _, d := rdtscpAsm() + return uint64(a) | (uint64(d) << 32) +} + +// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. +// This variable is OS dependent, but on Linux contains information +// about the current cpu/core the code is running on. +// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. +func (c cpuInfo) ia32tscaux() uint32 { + if !c.rdtscp() { + return 0 + } + _, _, ecx, _ := rdtscpAsm() + return ecx +} + +// LogicalCPU will return the Logical CPU the code is currently executing on. +// This is likely to change when the OS re-schedules the running thread +// to another CPU. +// If the current core cannot be detected, -1 will be returned. +func (c cpuInfo) logicalcpu() int { + if c.maxFunc < 1 { + return -1 + } + _, ebx, _, _ := cpuid(1) + return int(ebx >> 24) +} + +// VM Will return true if the cpu id indicates we are in +// a virtual machine. This is only a hint, and will very likely +// have many false negatives. +func (c cpuInfo) vm() bool { + switch c.vendorid { + case msvm, kvm, vmware, xenhvm: + return true + } + return false +} + +// Flags contains detected cpu features and caracteristics +type flags uint64 + +// String returns a string representation of the detected +// CPU features. +func (f flags) String() string { + return strings.Join(f.strings(), ",") +} + +// Strings returns and array of the detected features. +func (f flags) strings() []string { + s := support() + r := make([]string, 0, 20) + for i := uint(0); i < 64; i++ { + key := flags(1 << i) + val := flagNames[key] + if s&key != 0 { + r = append(r, val) + } + } + return r +} + +func maxExtendedFunction() uint32 { + eax, _, _, _ := cpuid(0x80000000) + return eax +} + +func maxFunctionID() uint32 { + a, _, _, _ := cpuid(0) + return a +} + +func brandName() string { + if maxExtendedFunction() >= 0x80000004 { + v := make([]uint32, 0, 48) + for i := uint32(0); i < 3; i++ { + a, b, c, d := cpuid(0x80000002 + i) + v = append(v, a, b, c, d) + } + return strings.Trim(string(valAsString(v...)), " ") + } + return "unknown" +} + +func threadsPerCore() int { + mfi := maxFunctionID() + if mfi < 0x4 || vendorID() != intel { + return 1 + } + + if mfi < 0xb { + _, b, _, d := cpuid(1) + if (d & (1 << 28)) != 0 { + // v will contain logical core count + v := (b >> 16) & 255 + if v > 1 { + a4, _, _, _ := cpuid(4) + // physical cores + v2 := (a4 >> 26) + 1 + if v2 > 0 { + return int(v) / int(v2) + } + } + } + return 1 + } + _, b, _, _ := cpuidex(0xb, 0) + if b&0xffff == 0 { + return 1 + } + return int(b & 0xffff) +} + +func logicalCores() int { + mfi := maxFunctionID() + switch vendorID() { + case intel: + // Use this on old Intel processors + if mfi < 0xb { + if mfi < 1 { + return 0 + } + // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) + // that can be assigned to logical processors in a physical package. + // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. + _, ebx, _, _ := cpuid(1) + logical := (ebx >> 16) & 0xff + return int(logical) + } + _, b, _, _ := cpuidex(0xb, 1) + return int(b & 0xffff) + case amd: + _, b, _, _ := cpuid(1) + return int((b >> 16) & 0xff) + default: + return 0 + } +} + +func familyModel() (int, int) { + if maxFunctionID() < 0x1 { + return 0, 0 + } + eax, _, _, _ := cpuid(1) + family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) + model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) + return int(family), int(model) +} + +func physicalCores() int { + switch vendorID() { + case intel: + return logicalCores() / threadsPerCore() + case amd: + if maxExtendedFunction() >= 0x80000008 { + _, _, c, _ := cpuid(0x80000008) + return int(c&0xff) + 1 + } + } + return 0 +} + +// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID +var vendorMapping = map[string]vendor{ + "AMDisbetter!": amd, + "AuthenticAMD": amd, + "CentaurHauls": via, + "GenuineIntel": intel, + "TransmetaCPU": transmeta, + "GenuineTMx86": transmeta, + "Geode by NSC": nsc, + "VIA VIA VIA ": via, + "KVMKVMKVMKVM": kvm, + "Microsoft Hv": msvm, + "VMwareVMware": vmware, + "XenVMMXenVMM": xenhvm, +} + +func vendorID() vendor { + _, b, c, d := cpuid(0) + v := valAsString(b, d, c) + vend, ok := vendorMapping[string(v)] + if !ok { + return other + } + return vend +} + +func cacheLine() int { + if maxFunctionID() < 0x1 { + return 0 + } + + _, ebx, _, _ := cpuid(1) + cache := (ebx & 0xff00) >> 5 // cflush size + if cache == 0 && maxExtendedFunction() >= 0x80000006 { + _, _, ecx, _ := cpuid(0x80000006) + cache = ecx & 0xff // cacheline size + } + // TODO: Read from Cache and TLB Information + return int(cache) +} + +func (c *cpuInfo) cacheSize() { + c.cache.l1d = -1 + c.cache.l1i = -1 + c.cache.l2 = -1 + c.cache.l3 = -1 + vendor := vendorID() + switch vendor { + case intel: + if maxFunctionID() < 4 { + return + } + for i := uint32(0); ; i++ { + eax, ebx, ecx, _ := cpuidex(4, i) + cacheType := eax & 15 + if cacheType == 0 { + break + } + cacheLevel := (eax >> 5) & 7 + coherency := int(ebx&0xfff) + 1 + partitions := int((ebx>>12)&0x3ff) + 1 + associativity := int((ebx>>22)&0x3ff) + 1 + sets := int(ecx) + 1 + size := associativity * partitions * coherency * sets + switch cacheLevel { + case 1: + if cacheType == 1 { + // 1 = Data Cache + c.cache.l1d = size + } else if cacheType == 2 { + // 2 = Instruction Cache + c.cache.l1i = size + } else { + if c.cache.l1d < 0 { + c.cache.l1i = size + } + if c.cache.l1i < 0 { + c.cache.l1i = size + } + } + case 2: + c.cache.l2 = size + case 3: + c.cache.l3 = size + } + } + case amd: + // Untested. + if maxExtendedFunction() < 0x80000005 { + return + } + _, _, ecx, edx := cpuid(0x80000005) + c.cache.l1d = int(((ecx >> 24) & 0xFF) * 1024) + c.cache.l1i = int(((edx >> 24) & 0xFF) * 1024) + + if maxExtendedFunction() < 0x80000006 { + return + } + _, _, ecx, _ = cpuid(0x80000006) + c.cache.l2 = int(((ecx >> 16) & 0xFFFF) * 1024) + } + + return +} + +func support() flags { + mfi := maxFunctionID() + vend := vendorID() + if mfi < 0x1 { + return 0 + } + rval := uint64(0) + _, _, c, d := cpuid(1) + if (d & (1 << 15)) != 0 { + rval |= cmov + } + if (d & (1 << 23)) != 0 { + rval |= mmx + } + if (d & (1 << 25)) != 0 { + rval |= mmxext + } + if (d & (1 << 25)) != 0 { + rval |= sse + } + if (d & (1 << 26)) != 0 { + rval |= sse2 + } + if (c & 1) != 0 { + rval |= sse3 + } + if (c & 0x00000200) != 0 { + rval |= ssse3 + } + if (c & 0x00080000) != 0 { + rval |= sse4 + } + if (c & 0x00100000) != 0 { + rval |= sse42 + } + if (c & (1 << 25)) != 0 { + rval |= aesni + } + if (c & (1 << 1)) != 0 { + rval |= clmul + } + if c&(1<<23) != 0 { + rval |= popcnt + } + if c&(1<<30) != 0 { + rval |= rdrand + } + if c&(1<<29) != 0 { + rval |= f16c + } + if c&(1<<13) != 0 { + rval |= cx16 + } + if vend == intel && (d&(1<<28)) != 0 && mfi >= 4 { + if threadsPerCore() > 1 { + rval |= htt + } + } + + // Check XGETBV, OXSAVE and AVX bits + if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { + // Check for OS support + eax, _ := xgetbv(0) + if (eax & 0x6) == 0x6 { + rval |= avx + if (c & 0x00001000) != 0 { + rval |= fma3 + } + } + } + + // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. + if mfi >= 7 { + _, ebx, ecx, _ := cpuidex(7, 0) + if (rval&avx) != 0 && (ebx&0x00000020) != 0 { + rval |= avx2 + } + if (ebx & 0x00000008) != 0 { + rval |= bmi1 + if (ebx & 0x00000100) != 0 { + rval |= bmi2 + } + } + if ebx&(1<<4) != 0 { + rval |= hle + } + if ebx&(1<<9) != 0 { + rval |= erms + } + if ebx&(1<<11) != 0 { + rval |= rtm + } + if ebx&(1<<14) != 0 { + rval |= mpx + } + if ebx&(1<<18) != 0 { + rval |= rdseed + } + if ebx&(1<<19) != 0 { + rval |= adx + } + if ebx&(1<<29) != 0 { + rval |= sha + } + + // Only detect AVX-512 features if XGETBV is supported + if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { + // Check for OS support + eax, _ := xgetbv(0) + + // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and + // ZMM16-ZMM31 state are enabled by OS) + /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). + if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { + if ebx&(1<<16) != 0 { + rval |= avx512f + } + if ebx&(1<<17) != 0 { + rval |= avx512dq + } + if ebx&(1<<21) != 0 { + rval |= avx512ifma + } + if ebx&(1<<26) != 0 { + rval |= avx512pf + } + if ebx&(1<<27) != 0 { + rval |= avx512er + } + if ebx&(1<<28) != 0 { + rval |= avx512cd + } + if ebx&(1<<30) != 0 { + rval |= avx512bw + } + if ebx&(1<<31) != 0 { + rval |= avx512vl + } + // ecx + if ecx&(1<<1) != 0 { + rval |= avx512vbmi + } + } + } + } + + if maxExtendedFunction() >= 0x80000001 { + _, _, c, d := cpuid(0x80000001) + if (c & (1 << 5)) != 0 { + rval |= lzcnt + rval |= popcnt + } + if (d & (1 << 31)) != 0 { + rval |= amd3dnow + } + if (d & (1 << 30)) != 0 { + rval |= amd3dnowext + } + if (d & (1 << 23)) != 0 { + rval |= mmx + } + if (d & (1 << 22)) != 0 { + rval |= mmxext + } + if (c & (1 << 6)) != 0 { + rval |= sse4a + } + if d&(1<<20) != 0 { + rval |= nx + } + if d&(1<<27) != 0 { + rval |= rdtscp + } + + /* Allow for selectively disabling SSE2 functions on AMD processors + with SSE2 support but not SSE4a. This includes Athlon64, some + Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster + than SSE2 often enough to utilize this special-case flag. + AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case + so that SSE2 is used unless explicitly disabled by checking + AV_CPU_FLAG_SSE2SLOW. */ + if vendorID() != intel && + rval&sse2 != 0 && (c&0x00000040) == 0 { + rval |= sse2slow + } + + /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be + * used unless the OS has AVX support. */ + if (rval & avx) != 0 { + if (c & 0x00000800) != 0 { + rval |= xop + } + if (c & 0x00010000) != 0 { + rval |= fma4 + } + } + + if vendorID() == intel { + family, model := familyModel() + if family == 6 && (model == 9 || model == 13 || model == 14) { + /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and + * 6/14 (core1 "yonah") theoretically support sse2, but it's + * usually slower than mmx. */ + if (rval & sse2) != 0 { + rval |= sse2slow + } + if (rval & sse3) != 0 { + rval |= sse3slow + } + } + /* The Atom processor has SSSE3 support, which is useful in many cases, + * but sometimes the SSSE3 version is slower than the SSE2 equivalent + * on the Atom, but is generally faster on other processors supporting + * SSSE3. This flag allows for selectively disabling certain SSSE3 + * functions on the Atom. */ + if family == 6 && model == 28 { + rval |= atom + } + } + } + return flags(rval) +} + +func valAsString(values ...uint32) []byte { + r := make([]byte, 4*len(values)) + for i, v := range values { + dst := r[i*4:] + dst[0] = byte(v & 0xff) + dst[1] = byte((v >> 8) & 0xff) + dst[2] = byte((v >> 16) & 0xff) + dst[3] = byte((v >> 24) & 0xff) + switch { + case dst[0] == 0: + return r[:i*4] + case dst[1] == 0: + return r[:i*4+1] + case dst[2] == 0: + return r[:i*4+2] + case dst[3] == 0: + return r[:i*4+3] + } + } + return r +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_386.s b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_386.s new file mode 100644 index 00000000000..9947f7b6f6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_386.s @@ -0,0 +1,40 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORL CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+4(FP) + MOVL BX, ebx+8(FP) + MOVL CX, ecx+12(FP) + MOVL DX, edx+16(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func xgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+4(FP) + MOVL DX, edx+8(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_amd64.s b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_amd64.s new file mode 100644 index 00000000000..68a7c9d8808 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_amd64.s @@ -0,0 +1,40 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuid(SB), 7, $0 + XORQ CX, CX + MOVL op+0(FP), AX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +TEXT ·asmCpuidex(SB), 7, $0 + MOVL op+0(FP), AX + MOVL op2+4(FP), CX + CPUID + MOVL AX, eax+8(FP) + MOVL BX, ebx+12(FP) + MOVL CX, ecx+16(FP) + MOVL DX, edx+20(FP) + RET + +// func asmXgetbv(index uint32) (eax, edx uint32) +TEXT ·asmXgetbv(SB), 7, $0 + MOVL index+0(FP), CX + BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV + MOVL AX, eax+8(FP) + MOVL DX, edx+12(FP) + RET + +// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) +TEXT ·asmRdtscpAsm(SB), 7, $0 + BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP + MOVL AX, eax+0(FP) + MOVL BX, ebx+4(FP) + MOVL CX, ecx+8(FP) + MOVL DX, edx+12(FP) + RET diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_intel.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_intel.go new file mode 100644 index 00000000000..d5475ebfaa8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_intel.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build 386 amd64 + +package cpuid + +func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) +func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) +func asmXgetbv(index uint32) (eax, edx uint32) +func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) + +func initCPU() { + cpuid = asmCpuid + cpuidex = asmCpuidex + xgetbv = asmXgetbv + rdtscpAsm = asmRdtscpAsm +} diff --git a/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_ref.go b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_ref.go new file mode 100644 index 00000000000..6f9231b13bf --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_ref.go @@ -0,0 +1,23 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +// +build !amd64,!386 + +package cpuid + +func initCPU() { + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } + + xgetbv = func(index uint32) (eax, edx uint32) { + return 0, 0 + } + + rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { + return 0, 0, 0, 0 + } +} diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore b/Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml b/Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml new file mode 100644 index 00000000000..c62e25f5a5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: + - go test -v . + - go test -v -race . diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE b/Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE new file mode 100644 index 00000000000..4fd5963e39c --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2015 Klaus Post + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/README.md b/Godeps/_workspace/src/github.com/klauspost/crc32/README.md new file mode 100644 index 00000000000..440541c7ff3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/README.md @@ -0,0 +1,84 @@ +# crc32 +CRC32 hash with x64 optimizations + +This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup. + +[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32) + +# usage + +Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer. + +Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go. + +# changes + +* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable. + + +# performance + +For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction: +``` +benchmark old ns/op new ns/op delta +BenchmarkCrc32KB 99955 10258 -89.74% + +benchmark old MB/s new MB/s speedup +BenchmarkCrc32KB 327.83 3194.20 9.74x +``` + +For other tables and "CLMUL" capable machines the performance is the same as the standard library. + +Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled. + +``` +Std: Standard Go 1.5 library +Crc: Indicates IEEE type CRC. +40B: Size of each slice encoded. +NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine). +Castagnoli: Castagnoli CRC type. + +BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s +BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8) +BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8) + +BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s +BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8) +BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm) + +BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8) +BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8) +BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm) + +BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8) +BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8) +BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm) + +BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s +BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm) +BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8) +BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s +BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm) +BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8) +BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s +BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm) +BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8) +BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm) + +BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s +BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm) +BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8) +BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm) +``` + +The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library. + +However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7. + +# license + +Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions. diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go new file mode 100644 index 00000000000..b584e410350 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go @@ -0,0 +1,182 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, +// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for +// information. +// +// Polynomials are represented in LSB-first form also known as reversed representation. +// +// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials +// for information. +package crc32 + +import ( + "hash" + "sync" +) + +// The size of a CRC-32 checksum in bytes. +const Size = 4 + +// Predefined polynomials. +const ( + // IEEE is by far and away the most common CRC-32 polynomial. + // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... + IEEE = 0xedb88320 + + // Castagnoli's polynomial, used in iSCSI. + // Has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/26.231911 + Castagnoli = 0x82f63b78 + + // Koopman's polynomial. + // Also has better error detection characteristics than IEEE. + // http://dx.doi.org/10.1109/DSN.2002.1028931 + Koopman = 0xeb31d82e +) + +// Table is a 256-word table representing the polynomial for efficient processing. +type Table [256]uint32 + +// castagnoliTable points to a lazily initialized Table for the Castagnoli +// polynomial. MakeTable will always return this value when asked to make a +// Castagnoli table so we can compare against it to find when the caller is +// using this polynomial. +var castagnoliTable *Table +var castagnoliTable8 *slicing8Table +var castagnoliOnce sync.Once + +func castagnoliInit() { + castagnoliTable = makeTable(Castagnoli) + castagnoliTable8 = makeTable8(Castagnoli) +} + +// IEEETable is the table for the IEEE polynomial. +var IEEETable = makeTable(IEEE) + +// slicing8Table is array of 8 Tables +type slicing8Table [8]Table + +// iEEETable8 is the slicing8Table for IEEE +var iEEETable8 *slicing8Table +var iEEETable8Once sync.Once + +// MakeTable returns the Table constructed from the specified polynomial. +func MakeTable(poly uint32) *Table { + switch poly { + case IEEE: + return IEEETable + case Castagnoli: + castagnoliOnce.Do(castagnoliInit) + return castagnoliTable + } + return makeTable(poly) +} + +// makeTable returns the Table constructed from the specified polynomial. +func makeTable(poly uint32) *Table { + t := new(Table) + for i := 0; i < 256; i++ { + crc := uint32(i) + for j := 0; j < 8; j++ { + if crc&1 == 1 { + crc = (crc >> 1) ^ poly + } else { + crc >>= 1 + } + } + t[i] = crc + } + return t +} + +// makeTable8 returns slicing8Table constructed from the specified polynomial. +func makeTable8(poly uint32) *slicing8Table { + t := new(slicing8Table) + t[0] = *makeTable(poly) + for i := 0; i < 256; i++ { + crc := t[0][i] + for j := 1; j < 8; j++ { + crc = t[0][crc&0xFF] ^ (crc >> 8) + t[j][i] = crc + } + } + return t +} + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint32 + tab *Table +} + +// New creates a new hash.Hash32 computing the CRC-32 checksum +// using the polynomial represented by the Table. +func New(tab *Table) hash.Hash32 { return &digest{0, tab} } + +// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum +// using the IEEE polynomial. +func NewIEEE() hash.Hash32 { return New(IEEETable) } + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return 1 } + +func (d *digest) Reset() { d.crc = 0 } + +func update(crc uint32, tab *Table, p []byte) uint32 { + crc = ^crc + for _, v := range p { + crc = tab[byte(crc)^v] ^ (crc >> 8) + } + return ^crc +} + +// updateSlicingBy8 updates CRC using Slicing-by-8 +func updateSlicingBy8(crc uint32, tab *slicing8Table, p []byte) uint32 { + crc = ^crc + for len(p) > 8 { + crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 + crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ + tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ + tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] + p = p[8:] + } + crc = ^crc + if len(p) == 0 { + return crc + } + return update(crc, &tab[0], p) +} + +// Update returns the result of adding the bytes in p to the crc. +func Update(crc uint32, tab *Table, p []byte) uint32 { + if tab == castagnoliTable { + return updateCastagnoli(crc, p) + } else if tab == IEEETable { + return updateIEEE(crc, p) + } + return update(crc, tab, p) +} + +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = Update(d.crc, d.tab, p) + return len(p), nil +} + +func (d *digest) Sum32() uint32 { return d.crc } + +func (d *digest) Sum(in []byte) []byte { + s := d.Sum32() + return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// Checksum returns the CRC-32 checksum of data +// using the polynomial represented by the Table. +func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } + +// ChecksumIEEE returns the CRC-32 checksum of data +// using the IEEE polynomial. +func ChecksumIEEE(data []byte) uint32 { return updateIEEE(0, data) } diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go new file mode 100644 index 00000000000..669fe17114e --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go @@ -0,0 +1,62 @@ +//+build !appengine,!gccgo + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// and IEEE CRC. + +// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and uses +// CPUID to test for SSE 4.1, 4.2 and CLMUL support. +func haveSSE41() bool +func haveSSE42() bool +func haveCLMUL() bool + +// castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32 +// instruction. +// go:noescape +func castagnoliSSE42(crc uint32, p []byte) uint32 + +// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ +// instruction as well as SSE 4.1. +// go:noescape +func ieeeCLMUL(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() +var useFastIEEE = haveCLMUL() && haveSSE41() + +func updateCastagnoli(crc uint32, p []byte) uint32 { + if sse42 { + return castagnoliSSE42(crc, p) + } + // only use slicing-by-8 when input is >= 16 Bytes + if len(p) >= 16 { + return updateSlicingBy8(crc, castagnoliTable8, p) + } + return update(crc, castagnoliTable, p) +} + +func updateIEEE(crc uint32, p []byte) uint32 { + if useFastIEEE && len(p) >= 64 { + left := len(p) & 15 + do := len(p) - left + crc := ^ieeeCLMUL(^crc, p[:do]) + if left > 0 { + crc = update(crc, IEEETable, p[do:]) + } + return crc + } + + // only use slicing-by-8 when input is >= 16 Bytes + if len(p) >= 16 { + iEEETable8Once.Do(func() { + iEEETable8 = makeTable8(IEEE) + }) + return updateSlicingBy8(crc, iEEETable8, p) + } + + return update(crc, IEEETable, p) +} diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s new file mode 100644 index 00000000000..1b1cd8b6740 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s @@ -0,0 +1,237 @@ +//+build gc + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#define NOSPLIT 4 +#define RODATA 8 + +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + NOTL AX + + // If there's less than 8 bytes to process, we do it byte-by-byte. + CMPQ CX, $8 + JL cleanup + + // Process individual bytes until the input is 8-byte aligned. +startup: + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + CRC32B (SI), AX + DECQ CX + INCQ SI + JMP startup + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL cleanup + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +cleanup: + // We may have some bytes left over that we process one at a time. + CMPQ CX, $0 + JE done + + CRC32B (SI), AX + INCQ SI + DECQ CX + JMP cleanup + +done: + NOTL AX + MOVL AX, ret+32(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveCLMUL() bool +TEXT ·haveCLMUL(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $1, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// func haveSSE41() bool +TEXT ·haveSSE41(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $19, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + +// CRC32 polynomial data +// +// These constants are lifted from the +// Linux kernel, since they avoid the costly +// PSHUFB 16 byte reversal proposed in the +// original Intel paper. +DATA r2r1kp<>+0(SB)/8, $0x154442bd4 +DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 +DATA r4r3kp<>+0(SB)/8, $0x1751997d0 +DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e +DATA rupolykp<>+0(SB)/8, $0x1db710641 +DATA rupolykp<>+8(SB)/8, $0x1f7011641 +DATA r5kp<>+0(SB)/8, $0x163cd6124 + +GLOBL r2r1kp<>(SB), RODATA, $16 +GLOBL r4r3kp<>(SB), RODATA, $16 +GLOBL rupolykp<>(SB), RODATA, $16 +GLOBL r5kp<>(SB), RODATA, $8 + +// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf +// len(p) must be at least 64, and must be a multiple of 16. + +// func ieeeCLMUL(crc uint32, p []byte) uint32 +TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 + MOVL crc+0(FP), X0 // Initial CRC value + MOVQ p+8(FP), SI // data pointer + MOVQ p_len+16(FP), CX // len(p) + + MOVOU (SI), X1 + MOVOU 16(SI), X2 + MOVOU 32(SI), X3 + MOVOU 48(SI), X4 + PXOR X0, X1 + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left + JB remain64 + + MOVOU r2r1kp<>+0(SB), X0 + +loopback64: + MOVOA X1, X5 + MOVOA X2, X6 + MOVOA X3, X7 + MOVOA X4, X8 + + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0, X0, X2 + PCLMULQDQ $0, X0, X3 + PCLMULQDQ $0, X0, X4 + + // Load next early + MOVOU (SI), X11 + MOVOU 16(SI), X12 + MOVOU 32(SI), X13 + MOVOU 48(SI), X14 + + PCLMULQDQ $0x11, X0, X5 + PCLMULQDQ $0x11, X0, X6 + PCLMULQDQ $0x11, X0, X7 + PCLMULQDQ $0x11, X0, X8 + + PXOR X5, X1 + PXOR X6, X2 + PXOR X7, X3 + PXOR X8, X4 + + PXOR X11, X1 + PXOR X12, X2 + PXOR X13, X3 + PXOR X14, X4 + + ADDQ $0x40, DI + ADDQ $64, SI // buf+=64 + SUBQ $64, CX // len-=64 + CMPQ CX, $64 // Less than 64 bytes left? + JGE loopback64 + + // Fold result into a single register (X1) +remain64: + MOVOU r4r3kp<>+0(SB), X0 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X2, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X3, X1 + + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X4, X1 + + // More than 16 bytes left? + CMPQ CX, $16 + JB finish + + // Encode 16 bytes +remain16: + MOVOU (SI), X10 + MOVOA X1, X5 + PCLMULQDQ $0, X0, X1 + PCLMULQDQ $0x11, X0, X5 + PXOR X5, X1 + PXOR X10, X1 + SUBQ $16, CX + ADDQ $16, SI + CMPQ CX, $16 + JGE remain16 + +finish: + // Fold final result into 32 bits and return it + PCMPEQB X3, X3 + PCLMULQDQ $1, X1, X0 + PSRLDQ $8, X1 + PXOR X0, X1 + + MOVOA X1, X2 + MOVQ r5kp<>+0(SB), X0 + + // Creates 32 bit mask. Note that we don't care about upper half. + PSRLQ $32, X3 + + PSRLDQ $4, X2 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + MOVOU rupolykp<>+0(SB), X0 + + MOVOA X1, X2 + PAND X3, X1 + PCLMULQDQ $0x10, X0, X1 + PAND X3, X1 + PCLMULQDQ $0, X0, X1 + PXOR X2, X1 + + // PEXTRD $1, X1, AX (SSE 4.1) + BYTE $0x66; BYTE $0x0f; BYTE $0x3a + BYTE $0x16; BYTE $0xc8; BYTE $0x01 + MOVL AX, ret+32(FP) + + RET diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go new file mode 100644 index 00000000000..64a356feca3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go @@ -0,0 +1,39 @@ +//+build !appengine,!gccgo + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +// This file contains the code to call the SSE 4.2 version of the Castagnoli +// CRC. + +// haveSSE42 is defined in crc_amd64p32.s and uses CPUID to test for 4.2 +// support. +func haveSSE42() bool + +// castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32 +// instruction. +func castagnoliSSE42(crc uint32, p []byte) uint32 + +var sse42 = haveSSE42() + +func updateCastagnoli(crc uint32, p []byte) uint32 { + if sse42 { + return castagnoliSSE42(crc, p) + } + return update(crc, castagnoliTable, p) +} + +func updateIEEE(crc uint32, p []byte) uint32 { + // only use slicing-by-8 when input is >= 4KB + if len(p) >= 4096 { + iEEETable8Once.Do(func() { + iEEETable8 = makeTable8(IEEE) + }) + return updateSlicingBy8(crc, iEEETable8, p) + } + + return update(crc, IEEETable, p) +} diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s new file mode 100644 index 00000000000..65944c5ac77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s @@ -0,0 +1,67 @@ +//+build gc + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#define NOSPLIT 4 +#define RODATA 8 + +// func castagnoliSSE42(crc uint32, p []byte) uint32 +TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 + MOVL crc+0(FP), AX // CRC value + MOVL p+4(FP), SI // data pointer + MOVL p_len+8(FP), CX // len(p) + + NOTL AX + + // If there's less than 8 bytes to process, we do it byte-by-byte. + CMPQ CX, $8 + JL cleanup + + // Process individual bytes until the input is 8-byte aligned. +startup: + MOVQ SI, BX + ANDQ $7, BX + JZ aligned + + CRC32B (SI), AX + DECQ CX + INCQ SI + JMP startup + +aligned: + // The input is now 8-byte aligned and we can process 8-byte chunks. + CMPQ CX, $8 + JL cleanup + + CRC32Q (SI), AX + ADDQ $8, SI + SUBQ $8, CX + JMP aligned + +cleanup: + // We may have some bytes left over that we process one at a time. + CMPQ CX, $0 + JE done + + CRC32B (SI), AX + INCQ SI + DECQ CX + JMP cleanup + +done: + NOTL AX + MOVL AX, ret+16(FP) + RET + +// func haveSSE42() bool +TEXT ·haveSSE42(SB), NOSPLIT, $0 + XORQ AX, AX + INCL AX + CPUID + SHRQ $20, CX + ANDQ $1, CX + MOVB CX, ret+0(FP) + RET + diff --git a/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go new file mode 100644 index 00000000000..43f5d1dd911 --- /dev/null +++ b/Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go @@ -0,0 +1,28 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 arm arm64 ppc64 ppc64le appengine gccgo + +package crc32 + +// The file contains the generic version of updateCastagnoli which does +// slicing-by-8, or uses the fallback for very small sizes. +func updateCastagnoli(crc uint32, p []byte) uint32 { + // only use slicing-by-8 when input is >= 16 Bytes + if len(p) >= 16 { + return updateSlicingBy8(crc, castagnoliTable8, p) + } + return update(crc, castagnoliTable, p) +} + +func updateIEEE(crc uint32, p []byte) uint32 { + // only use slicing-by-8 when input is >= 16 Bytes + if len(p) >= 16 { + iEEETable8Once.Do(func() { + iEEETable8 = makeTable8(IEEE) + }) + return updateSlicingBy8(crc, iEEETable8, p) + } + return update(crc, IEEETable, p) +} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/README.md b/Godeps/_workspace/src/github.com/macaron-contrib/binding/README.md deleted file mode 100644 index ad375653600..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/README.md +++ /dev/null @@ -1,21 +0,0 @@ -binding [![Build Status](https://drone.io/github.com/macaron-contrib/binding/status.png)](https://drone.io/github.com/macaron-contrib/binding/latest) [![](http://gocover.io/_badge/github.com/macaron-contrib/binding)](http://gocover.io/github.com/macaron-contrib/binding) -======= - -Middlware binding provides request data binding and validation for [Macaron](https://github.com/Unknwon/macaron). - -### Installation - - go get github.com/macaron-contrib/binding - -## Getting Help - -- [API Reference](https://gowalker.org/github.com/macaron-contrib/binding) -- [Documentation](http://macaron.gogs.io/docs/middlewares/binding) - -## Credits - -This package is forked from [martini-contrib/binding](https://github.com/martini-contrib/binding) with modifications. - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/bind_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/bind_test.go deleted file mode 100644 index fb224fafc05..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/bind_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Bind(t *testing.T) { - Convey("Bind test", t, func() { - Convey("Bind form", func() { - for _, testCase := range formTestCases { - performFormTest(t, Bind, testCase) - } - }) - - Convey("Bind JSON", func() { - for _, testCase := range jsonTestCases { - performJsonTest(t, Bind, testCase) - } - }) - - Convey("Bind multipart form", func() { - for _, testCase := range multipartFormTestCases { - performMultipartFormTest(t, Bind, testCase) - } - }) - - Convey("Bind with file", func() { - for _, testCase := range fileTestCases { - performFileTest(t, Bind, testCase) - performFileTest(t, BindIgnErr, testCase) - } - }) - }) -} - -func Test_Version(t *testing.T) { - Convey("Get package version", t, func() { - So(Version(), ShouldEqual, _VERSION) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/common_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/common_test.go deleted file mode 100644 index 83af92ff182..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/common_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "mime/multipart" - - "github.com/Unknwon/macaron" -) - -// These types are mostly contrived examples, but they're used -// across many test cases. The idea is to cover all the scenarios -// that this binding package might encounter in actual use. -type ( - // For basic test cases with a required field - Post struct { - Title string `form:"title" json:"title" binding:"Required"` - Content string `form:"content" json:"content"` - } - - // To be used as a nested struct (with a required field) - Person struct { - Name string `form:"name" json:"name" binding:"Required"` - Email string `form:"email" json:"email"` - } - - // For advanced test cases: multiple values, embedded - // and nested structs, an ignored field, and single - // and multiple file uploads - BlogPost struct { - Post - Id int `binding:"Required"` // JSON not specified here for test coverage - Ignored string `form:"-" json:"-"` - Ratings []int `form:"rating" json:"ratings"` - Author Person `json:"author"` - Coauthor *Person `json:"coauthor"` - HeaderImage *multipart.FileHeader - Pictures []*multipart.FileHeader `form:"picture"` - unexported string `form:"unexported"` - } - - EmbedPerson struct { - *Person - } - - SadForm struct { - AlphaDash string `form:"AlphaDash" binding:"AlphaDash"` - AlphaDashDot string `form:"AlphaDashDot" binding:"AlphaDashDot"` - MinSize string `form:"MinSize" binding:"MinSize(5)"` - MinSizeSlice []string `form:"MinSizeSlice" binding:"MinSize(5)"` - MaxSize string `form:"MaxSize" binding:"MaxSize(1)"` - MaxSizeSlice []string `form:"MaxSizeSlice" binding:"MaxSize(1)"` - Range int `form:"Range" binding:"Range(1,2)"` - RangeInvalid int `form:"RangeInvalid" binding:"Range(1)"` - Email string `binding:"Email"` - Url string `form:"Url" binding:"Url"` - UrlEmpty string `form:"UrlEmpty" binding:"Url"` - In string `form:"In" binding:"Default(0);In(1,2,3)"` - InInvalid string `form:"InInvalid" binding:"In(1,2,3)"` - NotIn string `form:"NotIn" binding:"NotIn(1,2,3)"` - Include string `form:"Include" binding:"Include(a)"` - Exclude string `form:"Exclude" binding:"Exclude(a)"` - } - - CustomErrorHandle struct { - Rule `binding:"CustomRule"` - } - - // The common function signature of the handlers going under test. - handlerFunc func(interface{}, ...interface{}) macaron.Handler - - // Used for testing mapping an interface to the context - // If used (withInterface = true in the testCases), a modeler - // should be mapped to the context as well as BlogPost, meaning - // you can receive a modeler in your application instead of a - // concrete BlogPost. - modeler interface { - Model() string - } -) - -func (p Post) Validate(ctx *macaron.Context, errs Errors) Errors { - if len(p.Title) < 10 { - errs = append(errs, Error{ - FieldNames: []string{"title"}, - Classification: "LengthError", - Message: "Life is too short", - }) - } - return errs -} - -func (p Post) Model() string { - return p.Title -} - -func (_ CustomErrorHandle) Error(_ *macaron.Context, _ Errors) {} - -const ( - testRoute = "/test" - formContentType = "application/x-www-form-urlencoded" -) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/errorhandler_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/errorhandler_test.go deleted file mode 100644 index 8b20657201c..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/errorhandler_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -var errorTestCases = []errorTestCase{ - { - description: "No errors", - errors: Errors{}, - expected: errorTestResult{ - statusCode: http.StatusOK, - }, - }, - { - description: "Deserialization error", - errors: Errors{ - { - Classification: ERR_DESERIALIZATION, - Message: "Some parser error here", - }, - }, - expected: errorTestResult{ - statusCode: http.StatusBadRequest, - contentType: _JSON_CONTENT_TYPE, - body: `[{"classification":"DeserializationError","message":"Some parser error here"}]`, - }, - }, - { - description: "Content-Type error", - errors: Errors{ - { - Classification: ERR_CONTENT_TYPE, - Message: "Empty Content-Type", - }, - }, - expected: errorTestResult{ - statusCode: http.StatusUnsupportedMediaType, - contentType: _JSON_CONTENT_TYPE, - body: `[{"classification":"ContentTypeError","message":"Empty Content-Type"}]`, - }, - }, - { - description: "Requirement error", - errors: Errors{ - { - FieldNames: []string{"some_field"}, - Classification: ERR_REQUIRED, - Message: "Required", - }, - }, - expected: errorTestResult{ - statusCode: STATUS_UNPROCESSABLE_ENTITY, - contentType: _JSON_CONTENT_TYPE, - body: `[{"fieldNames":["some_field"],"classification":"RequiredError","message":"Required"}]`, - }, - }, - { - description: "Bad header error", - errors: Errors{ - { - Classification: "HeaderError", - Message: "The X-Something header must be specified", - }, - }, - expected: errorTestResult{ - statusCode: STATUS_UNPROCESSABLE_ENTITY, - contentType: _JSON_CONTENT_TYPE, - body: `[{"classification":"HeaderError","message":"The X-Something header must be specified"}]`, - }, - }, - { - description: "Custom field error", - errors: Errors{ - { - FieldNames: []string{"month", "year"}, - Classification: "DateError", - Message: "The month and year must be in the future", - }, - }, - expected: errorTestResult{ - statusCode: STATUS_UNPROCESSABLE_ENTITY, - contentType: _JSON_CONTENT_TYPE, - body: `[{"fieldNames":["month","year"],"classification":"DateError","message":"The month and year must be in the future"}]`, - }, - }, - { - description: "Multiple errors", - errors: Errors{ - { - FieldNames: []string{"foo"}, - Classification: ERR_REQUIRED, - Message: "Required", - }, - { - FieldNames: []string{"foo"}, - Classification: "LengthError", - Message: "The length of the 'foo' field is too short", - }, - }, - expected: errorTestResult{ - statusCode: STATUS_UNPROCESSABLE_ENTITY, - contentType: _JSON_CONTENT_TYPE, - body: `[{"fieldNames":["foo"],"classification":"RequiredError","message":"Required"},{"fieldNames":["foo"],"classification":"LengthError","message":"The length of the 'foo' field is too short"}]`, - }, - }, -} - -func Test_ErrorHandler(t *testing.T) { - Convey("Error handler", t, func() { - for _, testCase := range errorTestCases { - performErrorTest(t, testCase) - } - }) -} - -func performErrorTest(t *testing.T, testCase errorTestCase) { - resp := httptest.NewRecorder() - - errorHandler(testCase.errors, resp) - - So(resp.Code, ShouldEqual, testCase.expected.statusCode) - So(resp.Header().Get("Content-Type"), ShouldEqual, testCase.expected.contentType) - - actualBody, err := ioutil.ReadAll(resp.Body) - So(err, ShouldBeNil) - So(string(actualBody), ShouldEqual, testCase.expected.body) -} - -type ( - errorTestCase struct { - description string - errors Errors - expected errorTestResult - } - - errorTestResult struct { - statusCode int - contentType string - body string - } -) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/errors_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/errors_test.go deleted file mode 100644 index 8537c0133ac..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/errors_test.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "fmt" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_ErrorsAdd(t *testing.T) { - Convey("Add new error", t, func() { - var actual Errors - expected := Errors{ - Error{ - FieldNames: []string{"Field1", "Field2"}, - Classification: "ErrorClass", - Message: "Some message", - }, - } - - actual.Add(expected[0].FieldNames, expected[0].Classification, expected[0].Message) - - So(len(actual), ShouldEqual, 1) - So(fmt.Sprintf("%#v", actual), ShouldEqual, fmt.Sprintf("%#v", expected)) - }) -} - -func Test_ErrorsLen(t *testing.T) { - Convey("Get number of errors", t, func() { - So(errorsTestSet.Len(), ShouldEqual, len(errorsTestSet)) - }) -} - -func Test_ErrorsHas(t *testing.T) { - Convey("Check error class", t, func() { - So(errorsTestSet.Has("ClassA"), ShouldBeTrue) - So(errorsTestSet.Has("ClassQ"), ShouldBeFalse) - }) -} - -func Test_ErrorGetters(t *testing.T) { - Convey("Get error detail", t, func() { - err := Error{ - FieldNames: []string{"field1", "field2"}, - Classification: "ErrorClass", - Message: "The message", - } - - fieldsActual := err.Fields() - - So(len(fieldsActual), ShouldEqual, 2) - So(fieldsActual[0], ShouldEqual, "field1") - So(fieldsActual[1], ShouldEqual, "field2") - - So(err.Kind(), ShouldEqual, "ErrorClass") - So(err.Error(), ShouldEqual, "The message") - }) -} - -/* -func TestErrorsWithClass(t *testing.T) { - expected := Errors{ - errorsTestSet[0], - errorsTestSet[3], - } - actualStr := fmt.Sprintf("%#v", errorsTestSet.WithClass("ClassA")) - expectedStr := fmt.Sprintf("%#v", expected) - if actualStr != expectedStr { - t.Errorf("Expected:\n%s\nbut got:\n%s", expectedStr, actualStr) - } -} -*/ - -var errorsTestSet = Errors{ - Error{ - FieldNames: []string{}, - Classification: "ClassA", - Message: "Foobar", - }, - Error{ - FieldNames: []string{}, - Classification: "ClassB", - Message: "Foo", - }, - Error{ - FieldNames: []string{"field1", "field2"}, - Classification: "ClassB", - Message: "Foobar", - }, - Error{ - FieldNames: []string{"field2"}, - Classification: "ClassA", - Message: "Foobar", - }, - Error{ - FieldNames: []string{"field2"}, - Classification: "ClassB", - Message: "Foobar", - }, -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/file_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/file_test.go deleted file mode 100644 index f6bab1792eb..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/file_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "bytes" - "mime/multipart" - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -var fileTestCases = []fileTestCase{ - { - description: "Single file", - singleFile: &fileInfo{ - fileName: "message.txt", - data: "All your binding are belong to us", - }, - }, - { - description: "Multiple files", - multipleFiles: []*fileInfo{ - &fileInfo{ - fileName: "cool-gopher-fact.txt", - data: "Did you know? https://plus.google.com/+MatthewHolt/posts/GmVfd6TPJ51", - }, - &fileInfo{ - fileName: "gophercon2014.txt", - data: "@bradfitz has a Go time machine: https://twitter.com/mholt6/status/459463953395875840", - }, - }, - }, - { - description: "Single file and multiple files", - singleFile: &fileInfo{ - fileName: "social media.txt", - data: "Hey, you should follow @mholt6 (Twitter) or +MatthewHolt (Google+)", - }, - multipleFiles: []*fileInfo{ - &fileInfo{ - fileName: "thank you!", - data: "Also, thanks to all the contributors of this package!", - }, - &fileInfo{ - fileName: "btw...", - data: "This tool translates JSON into Go structs: http://mholt.github.io/json-to-go/", - }, - }, - }, -} - -func Test_FileUploads(t *testing.T) { - Convey("Test file upload", t, func() { - for _, testCase := range fileTestCases { - performFileTest(t, MultipartForm, testCase) - } - }) -} - -func performFileTest(t *testing.T, binder handlerFunc, testCase fileTestCase) { - httpRecorder := httptest.NewRecorder() - m := macaron.Classic() - - fileTestHandler := func(actual BlogPost, errs Errors) { - assertFileAsExpected(t, testCase, actual.HeaderImage, testCase.singleFile) - So(len(testCase.multipleFiles), ShouldEqual, len(actual.Pictures)) - - for i, expectedFile := range testCase.multipleFiles { - if i >= len(actual.Pictures) { - break - } - assertFileAsExpected(t, testCase, actual.Pictures[i], expectedFile) - } - } - - m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { - fileTestHandler(actual, errs) - }) - - m.ServeHTTP(httpRecorder, buildRequestWithFile(testCase)) - - switch httpRecorder.Code { - case http.StatusNotFound: - panic("Routing is messed up in test fixture (got 404): check methods and paths") - case http.StatusInternalServerError: - panic("Something bad happened on '" + testCase.description + "'") - } -} - -func assertFileAsExpected(t *testing.T, testCase fileTestCase, actual *multipart.FileHeader, expected *fileInfo) { - if expected == nil && actual == nil { - return - } - - if expected != nil && actual == nil { - So(actual, ShouldNotBeNil) - return - } else if expected == nil && actual != nil { - So(actual, ShouldBeNil) - return - } - - So(actual.Filename, ShouldEqual, expected.fileName) - So(unpackFileHeaderData(actual), ShouldEqual, expected.data) -} - -func buildRequestWithFile(testCase fileTestCase) *http.Request { - b := &bytes.Buffer{} - w := multipart.NewWriter(b) - - if testCase.singleFile != nil { - formFileSingle, err := w.CreateFormFile("header_image", testCase.singleFile.fileName) - if err != nil { - panic("Could not create FormFile (single file): " + err.Error()) - } - formFileSingle.Write([]byte(testCase.singleFile.data)) - } - - for _, file := range testCase.multipleFiles { - formFileMultiple, err := w.CreateFormFile("picture", file.fileName) - if err != nil { - panic("Could not create FormFile (multiple files): " + err.Error()) - } - formFileMultiple.Write([]byte(file.data)) - } - - err := w.Close() - if err != nil { - panic("Could not close multipart writer: " + err.Error()) - } - - req, err := http.NewRequest("POST", testRoute, b) - if err != nil { - panic("Could not create file upload request: " + err.Error()) - } - - req.Header.Set("Content-Type", w.FormDataContentType()) - - return req -} - -func unpackFileHeaderData(fh *multipart.FileHeader) string { - if fh == nil { - return "" - } - - f, err := fh.Open() - if err != nil { - panic("Could not open file header:" + err.Error()) - } - defer f.Close() - - var fb bytes.Buffer - _, err = fb.ReadFrom(f) - if err != nil { - panic("Could not read from file header:" + err.Error()) - } - - return fb.String() -} - -type ( - fileTestCase struct { - description string - input BlogPost - singleFile *fileInfo - multipleFiles []*fileInfo - } - - fileInfo struct { - fileName string - data string - } -) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/form_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/form_test.go deleted file mode 100644 index 8e2480908ff..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/form_test.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "fmt" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -var formTestCases = []formTestCase{ - { - description: "Happy path", - shouldSucceed: true, - payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, - contentType: formContentType, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Happy path with interface", - shouldSucceed: true, - withInterface: true, - payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, - contentType: formContentType, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Empty payload", - shouldSucceed: false, - payload: ``, - contentType: formContentType, - expected: Post{}, - }, - { - description: "Empty content type", - shouldSucceed: false, - payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, - contentType: ``, - expected: Post{}, - }, - { - description: "Malformed form body", - shouldSucceed: false, - payload: `title=%2`, - contentType: formContentType, - expected: Post{}, - }, - { - description: "With nested and embedded structs", - shouldSucceed: true, - payload: `title=Glorious+Post+Title&id=1&name=Matt+Holt`, - contentType: formContentType, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Required embedded struct field not specified", - shouldSucceed: false, - payload: `id=1&name=Matt+Holt`, - contentType: formContentType, - expected: BlogPost{Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Required nested struct field not specified", - shouldSucceed: false, - payload: `title=Glorious+Post+Title&id=1`, - contentType: formContentType, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1}, - }, - { - description: "Multiple values into slice", - shouldSucceed: true, - payload: `title=Glorious+Post+Title&id=1&name=Matt+Holt&rating=4&rating=3&rating=5`, - contentType: formContentType, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}, Ratings: []int{4, 3, 5}}, - }, - { - description: "Unexported field", - shouldSucceed: true, - payload: `title=Glorious+Post+Title&id=1&name=Matt+Holt&unexported=foo`, - contentType: formContentType, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Query string POST", - shouldSucceed: true, - payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, - contentType: formContentType, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Query string with Content-Type (POST request)", - shouldSucceed: true, - queryString: "?title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet", - payload: ``, - contentType: formContentType, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Query string without Content-Type (GET request)", - shouldSucceed: true, - method: "GET", - queryString: "?title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet", - payload: ``, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Embed struct pointer", - shouldSucceed: true, - deepEqual: true, - method: "GET", - queryString: "?name=Glorious+Post+Title&email=Lorem+ipsum+dolor+sit+amet", - payload: ``, - expected: EmbedPerson{&Person{Name: "Glorious Post Title", Email: "Lorem ipsum dolor sit amet"}}, - }, - { - description: "Embed struct pointer remain nil if not binded", - shouldSucceed: true, - deepEqual: true, - method: "GET", - queryString: "?", - payload: ``, - expected: EmbedPerson{nil}, - }, - { - description: "Custom error handler", - shouldSucceed: true, - deepEqual: true, - method: "GET", - queryString: "?", - payload: ``, - expected: CustomErrorHandle{}, - }, -} - -func init() { - AddRule(&Rule{ - func(rule string) bool { - return rule == "CustomRule" - }, - func(_ Errors, _ string, _ interface{}) bool { - return false - }, - }) - SetNameMapper(nameMapper) -} - -func Test_Form(t *testing.T) { - Convey("Test form", t, func() { - for _, testCase := range formTestCases { - performFormTest(t, Form, testCase) - } - }) -} - -func performFormTest(t *testing.T, binder handlerFunc, testCase formTestCase) { - resp := httptest.NewRecorder() - m := macaron.Classic() - - formTestHandler := func(actual interface{}, errs Errors) { - if testCase.shouldSucceed && len(errs) > 0 { - So(len(errs), ShouldEqual, 0) - } else if !testCase.shouldSucceed && len(errs) == 0 { - So(len(errs), ShouldNotEqual, 0) - } - expString := fmt.Sprintf("%+v", testCase.expected) - actString := fmt.Sprintf("%+v", actual) - if actString != expString && !(testCase.deepEqual && reflect.DeepEqual(testCase.expected, actual)) { - So(actString, ShouldEqual, expString) - } - } - - switch testCase.expected.(type) { - case Post: - if testCase.withInterface { - m.Post(testRoute, binder(Post{}, (*modeler)(nil)), func(actual Post, iface modeler, errs Errors) { - So(actual.Title, ShouldEqual, iface.Model()) - formTestHandler(actual, errs) - }) - } else { - m.Post(testRoute, binder(Post{}), func(actual Post, errs Errors) { - formTestHandler(actual, errs) - }) - m.Get(testRoute, binder(Post{}), func(actual Post, errs Errors) { - formTestHandler(actual, errs) - }) - } - - case BlogPost: - if testCase.withInterface { - m.Post(testRoute, binder(BlogPost{}, (*modeler)(nil)), func(actual BlogPost, iface modeler, errs Errors) { - So(actual.Title, ShouldEqual, iface.Model()) - formTestHandler(actual, errs) - }) - } else { - m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { - formTestHandler(actual, errs) - }) - } - - case EmbedPerson: - m.Post(testRoute, binder(EmbedPerson{}), func(actual EmbedPerson, errs Errors) { - formTestHandler(actual, errs) - }) - m.Get(testRoute, binder(EmbedPerson{}), func(actual EmbedPerson, errs Errors) { - formTestHandler(actual, errs) - }) - case CustomErrorHandle: - m.Get(testRoute, binder(CustomErrorHandle{}), func(actual CustomErrorHandle, errs Errors) { - formTestHandler(actual, errs) - }) - } - - if len(testCase.method) == 0 { - testCase.method = "POST" - } - - req, err := http.NewRequest(testCase.method, testRoute+testCase.queryString, strings.NewReader(testCase.payload)) - if err != nil { - panic(err) - } - req.Header.Set("Content-Type", testCase.contentType) - - m.ServeHTTP(resp, req) - - switch resp.Code { - case http.StatusNotFound: - panic("Routing is messed up in test fixture (got 404): check methods and paths") - case http.StatusInternalServerError: - panic("Something bad happened on '" + testCase.description + "'") - } -} - -type ( - formTestCase struct { - description string - shouldSucceed bool - deepEqual bool - withInterface bool - queryString string - payload string - contentType string - expected interface{} - method string - } -) - -type defaultForm struct { - Default string `binding:"Default(hello world)"` -} - -func Test_Default(t *testing.T) { - Convey("Test default value", t, func() { - m := macaron.Classic() - m.Get("/", Bind(defaultForm{}), func(f defaultForm) { - So(f.Default, ShouldEqual, "hello world") - }) - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - - m.ServeHTTP(resp, req) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/json_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/json_test.go deleted file mode 100644 index 1321f0bd14c..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/json_test.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -var jsonTestCases = []jsonTestCase{ - { - description: "Happy path", - shouldSucceedOnJson: true, - payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, - contentType: _JSON_CONTENT_TYPE, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Happy path with interface", - shouldSucceedOnJson: true, - withInterface: true, - payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, - contentType: _JSON_CONTENT_TYPE, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Nil payload", - shouldSucceedOnJson: false, - payload: `-nil-`, - contentType: _JSON_CONTENT_TYPE, - expected: Post{}, - }, - { - description: "Empty payload", - shouldSucceedOnJson: false, - payload: ``, - contentType: _JSON_CONTENT_TYPE, - expected: Post{}, - }, - { - description: "Empty content type", - shouldSucceedOnJson: true, - shouldFailOnBind: true, - payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, - contentType: ``, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Unsupported content type", - shouldSucceedOnJson: true, - shouldFailOnBind: true, - payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, - contentType: `BoGuS`, - expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, - }, - { - description: "Malformed JSON", - shouldSucceedOnJson: false, - payload: `{"title":"foo"`, - contentType: _JSON_CONTENT_TYPE, - expected: Post{}, - }, - { - description: "Deserialization with nested and embedded struct", - shouldSucceedOnJson: true, - payload: `{"title":"Glorious Post Title", "id":1, "author":{"name":"Matt Holt"}}`, - contentType: _JSON_CONTENT_TYPE, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Deserialization with nested and embedded struct with interface", - shouldSucceedOnJson: true, - withInterface: true, - payload: `{"title":"Glorious Post Title", "id":1, "author":{"name":"Matt Holt"}}`, - contentType: _JSON_CONTENT_TYPE, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Required nested struct field not specified", - shouldSucceedOnJson: false, - payload: `{"title":"Glorious Post Title", "id":1, "author":{}}`, - contentType: _JSON_CONTENT_TYPE, - expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1}, - }, - { - description: "Required embedded struct field not specified", - shouldSucceedOnJson: false, - payload: `{"id":1, "author":{"name":"Matt Holt"}}`, - contentType: _JSON_CONTENT_TYPE, - expected: BlogPost{Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Slice of Posts", - shouldSucceedOnJson: true, - payload: `[{"title": "First Post"}, {"title": "Second Post"}]`, - contentType: _JSON_CONTENT_TYPE, - expected: []Post{Post{Title: "First Post"}, Post{Title: "Second Post"}}, - }, -} - -func Test_Json(t *testing.T) { - Convey("Test JSON", t, func() { - for _, testCase := range jsonTestCases { - performJsonTest(t, Json, testCase) - } - }) -} - -func performJsonTest(t *testing.T, binder handlerFunc, testCase jsonTestCase) { - var payload io.Reader - httpRecorder := httptest.NewRecorder() - m := macaron.Classic() - - jsonTestHandler := func(actual interface{}, errs Errors) { - if testCase.shouldSucceedOnJson && len(errs) > 0 { - So(len(errs), ShouldEqual, 0) - } else if !testCase.shouldSucceedOnJson && len(errs) == 0 { - So(len(errs), ShouldNotEqual, 0) - } - So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", testCase.expected)) - } - - switch testCase.expected.(type) { - case []Post: - if testCase.withInterface { - m.Post(testRoute, binder([]Post{}, (*modeler)(nil)), func(actual []Post, iface modeler, errs Errors) { - - for _, a := range actual { - So(a.Title, ShouldEqual, iface.Model()) - jsonTestHandler(a, errs) - } - }) - } else { - m.Post(testRoute, binder([]Post{}), func(actual []Post, errs Errors) { - jsonTestHandler(actual, errs) - }) - } - - case Post: - if testCase.withInterface { - m.Post(testRoute, binder(Post{}, (*modeler)(nil)), func(actual Post, iface modeler, errs Errors) { - So(actual.Title, ShouldEqual, iface.Model()) - jsonTestHandler(actual, errs) - }) - } else { - m.Post(testRoute, binder(Post{}), func(actual Post, errs Errors) { - jsonTestHandler(actual, errs) - }) - } - - case BlogPost: - if testCase.withInterface { - m.Post(testRoute, binder(BlogPost{}, (*modeler)(nil)), func(actual BlogPost, iface modeler, errs Errors) { - So(actual.Title, ShouldEqual, iface.Model()) - jsonTestHandler(actual, errs) - }) - } else { - m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { - jsonTestHandler(actual, errs) - }) - } - } - - if testCase.payload == "-nil-" { - payload = nil - } else { - payload = strings.NewReader(testCase.payload) - } - - req, err := http.NewRequest("POST", testRoute, payload) - if err != nil { - panic(err) - } - req.Header.Set("Content-Type", testCase.contentType) - - m.ServeHTTP(httpRecorder, req) - - switch httpRecorder.Code { - case http.StatusNotFound: - panic("Routing is messed up in test fixture (got 404): check method and path") - case http.StatusInternalServerError: - panic("Something bad happened on '" + testCase.description + "'") - default: - if testCase.shouldSucceedOnJson && - httpRecorder.Code != http.StatusOK && - !testCase.shouldFailOnBind { - So(httpRecorder.Code, ShouldEqual, http.StatusOK) - } - } -} - -type ( - jsonTestCase struct { - description string - withInterface bool - shouldSucceedOnJson bool - shouldFailOnBind bool - payload string - contentType string - expected interface{} - } -) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/misc_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/misc_test.go deleted file mode 100644 index 29f72f627d2..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/misc_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "fmt" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -// When binding from Form data, testing the type of data to bind -// and converting a string into that type is tedious, so these tests -// cover all those cases. -func Test_SetWithProperType(t *testing.T) { - Convey("Set with proper type", t, func() { - testInputs := map[string]string{ - "successful": `integer=-1&integer8=-8&integer16=-16&integer32=-32&integer64=-64&uinteger=1&uinteger8=8&uinteger16=16&uinteger32=32&uinteger64=64&boolean_1=true&fl32_1=32.3232&fl64_1=-64.6464646464&str=string`, - "errorful": `integer=&integer8=asdf&integer16=--&integer32=&integer64=dsf&uinteger=&uinteger8=asdf&uinteger16=+&uinteger32= 32 &uinteger64=+%20+&boolean_1=&boolean_2=asdf&fl32_1=asdf&fl32_2=&fl64_1=&fl64_2=asdfstr`, - } - - expectedOutputs := map[string]Everything{ - "successful": Everything{ - Integer: -1, - Integer8: -8, - Integer16: -16, - Integer32: -32, - Integer64: -64, - Uinteger: 1, - Uinteger8: 8, - Uinteger16: 16, - Uinteger32: 32, - Uinteger64: 64, - Boolean_1: true, - Fl32_1: 32.3232, - Fl64_1: -64.6464646464, - Str: "string", - }, - "errorful": Everything{}, - } - - for key, testCase := range testInputs { - httpRecorder := httptest.NewRecorder() - m := macaron.Classic() - - m.Post(testRoute, Form(Everything{}), func(actual Everything, errs Errors) { - So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", expectedOutputs[key])) - }) - req, err := http.NewRequest("POST", testRoute, strings.NewReader(testCase)) - if err != nil { - panic(err) - } - req.Header.Set("Content-Type", formContentType) - m.ServeHTTP(httpRecorder, req) - } - }) -} - -// Each binder middleware should assert that the struct passed in is not -// a pointer (to avoid race conditions) -func Test_EnsureNotPointer(t *testing.T) { - Convey("Ensure field is not a pointer", t, func() { - shouldPanic := func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - ensureNotPointer(&Post{}) - } - - shouldNotPanic := func() { - defer func() { - So(recover(), ShouldBeNil) - }() - ensureNotPointer(Post{}) - } - - shouldPanic() - shouldNotPanic() - }) -} - -// Used in testing setWithProperType; kind of clunky... -type Everything struct { - Integer int `form:"integer"` - Integer8 int8 `form:"integer8"` - Integer16 int16 `form:"integer16"` - Integer32 int32 `form:"integer32"` - Integer64 int64 `form:"integer64"` - Uinteger uint `form:"uinteger"` - Uinteger8 uint8 `form:"uinteger8"` - Uinteger16 uint16 `form:"uinteger16"` - Uinteger32 uint32 `form:"uinteger32"` - Uinteger64 uint64 `form:"uinteger64"` - Boolean_1 bool `form:"boolean_1"` - Boolean_2 bool `form:"boolean_2"` - Fl32_1 float32 `form:"fl32_1"` - Fl32_2 float32 `form:"fl32_2"` - Fl64_1 float64 `form:"fl64_1"` - Fl64_2 float64 `form:"fl64_2"` - Str string `form:"str"` -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/multipart_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/multipart_test.go deleted file mode 100644 index fef10523593..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/multipart_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "bytes" - "fmt" - "mime/multipart" - "net/http" - "net/http/httptest" - "strconv" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -var multipartFormTestCases = []multipartFormTestCase{ - { - description: "Happy multipart form path", - shouldSucceed: true, - inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "FormValue called before req.MultipartReader(); see https://github.com/martini-contrib/csrf/issues/6", - shouldSucceed: true, - callFormValueBefore: true, - inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Empty payload", - shouldSucceed: false, - inputAndExpected: BlogPost{}, - }, - { - description: "Missing required field (Id)", - shouldSucceed: false, - inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Required embedded struct field not specified", - shouldSucceed: false, - inputAndExpected: BlogPost{Id: 1, Author: Person{Name: "Matt Holt"}}, - }, - { - description: "Required nested struct field not specified", - shouldSucceed: false, - inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1}, - }, - { - description: "Multiple values", - shouldSucceed: true, - inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}, Ratings: []int{3, 5, 4}}, - }, - { - description: "Bad multipart encoding", - shouldSucceed: false, - malformEncoding: true, - }, -} - -func Test_MultipartForm(t *testing.T) { - Convey("Test multipart form", t, func() { - for _, testCase := range multipartFormTestCases { - performMultipartFormTest(t, MultipartForm, testCase) - } - }) -} - -func performMultipartFormTest(t *testing.T, binder handlerFunc, testCase multipartFormTestCase) { - httpRecorder := httptest.NewRecorder() - m := macaron.Classic() - - m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { - if testCase.shouldSucceed && len(errs) > 0 { - So(len(errs), ShouldEqual, 0) - } else if !testCase.shouldSucceed && len(errs) == 0 { - So(len(errs), ShouldNotEqual, 0) - } - So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", testCase.inputAndExpected)) - }) - - multipartPayload, mpWriter := makeMultipartPayload(testCase) - - req, err := http.NewRequest("POST", testRoute, multipartPayload) - if err != nil { - panic(err) - } - - req.Header.Add("Content-Type", mpWriter.FormDataContentType()) - - err = mpWriter.Close() - if err != nil { - panic(err) - } - - if testCase.callFormValueBefore { - req.FormValue("foo") - } - - m.ServeHTTP(httpRecorder, req) - - switch httpRecorder.Code { - case http.StatusNotFound: - panic("Routing is messed up in test fixture (got 404): check methods and paths") - case http.StatusInternalServerError: - panic("Something bad happened on '" + testCase.description + "'") - } -} - -// Writes the input from a test case into a buffer using the multipart writer. -func makeMultipartPayload(testCase multipartFormTestCase) (*bytes.Buffer, *multipart.Writer) { - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - if testCase.malformEncoding { - // TODO: Break the multipart form parser which is apparently impervious!! - // (Get it to return an error. Trying to get 100% test coverage.) - body.Write([]byte(`--` + writer.Boundary() + `\nContent-Disposition: form-data; name="foo"\n\n--` + writer.Boundary() + `--`)) - return body, writer - } else { - writer.WriteField("title", testCase.inputAndExpected.Title) - writer.WriteField("content", testCase.inputAndExpected.Content) - writer.WriteField("id", strconv.Itoa(testCase.inputAndExpected.Id)) - writer.WriteField("ignored", testCase.inputAndExpected.Ignored) - for _, value := range testCase.inputAndExpected.Ratings { - writer.WriteField("rating", strconv.Itoa(value)) - } - writer.WriteField("name", testCase.inputAndExpected.Author.Name) - writer.WriteField("email", testCase.inputAndExpected.Author.Email) - return body, writer - } -} - -type ( - multipartFormTestCase struct { - description string - shouldSucceed bool - inputAndExpected BlogPost - malformEncoding bool - callFormValueBefore bool - } -) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/binding/validate_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/binding/validate_test.go deleted file mode 100644 index 66d31e159e4..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/binding/validate_test.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2014 martini-contrib/binding Authors -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package binding - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -var validationTestCases = []validationTestCase{ - { - description: "No errors", - data: BlogPost{ - Id: 1, - Post: Post{ - Title: "Behold The Title!", - Content: "And some content", - }, - Author: Person{ - Name: "Matt Holt", - }, - }, - expectedErrors: Errors{}, - }, - { - description: "ID required", - data: BlogPost{ - Post: Post{ - Title: "Behold The Title!", - Content: "And some content", - }, - Author: Person{ - Name: "Matt Holt", - }, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"id"}, - Classification: ERR_REQUIRED, - Message: "Required", - }, - }, - }, - { - description: "Embedded struct field required", - data: BlogPost{ - Id: 1, - Post: Post{ - Content: "Content given, but title is required", - }, - Author: Person{ - Name: "Matt Holt", - }, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"title"}, - Classification: ERR_REQUIRED, - Message: "Required", - }, - Error{ - FieldNames: []string{"title"}, - Classification: "LengthError", - Message: "Life is too short", - }, - }, - }, - { - description: "Nested struct field required", - data: BlogPost{ - Id: 1, - Post: Post{ - Title: "Behold The Title!", - Content: "And some content", - }, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"name"}, - Classification: ERR_REQUIRED, - Message: "Required", - }, - }, - }, - { - description: "Required field missing in nested struct pointer", - data: BlogPost{ - Id: 1, - Post: Post{ - Title: "Behold The Title!", - Content: "And some content", - }, - Author: Person{ - Name: "Matt Holt", - }, - Coauthor: &Person{}, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"name"}, - Classification: ERR_REQUIRED, - Message: "Required", - }, - }, - }, - { - description: "All required fields specified in nested struct pointer", - data: BlogPost{ - Id: 1, - Post: Post{ - Title: "Behold The Title!", - Content: "And some content", - }, - Author: Person{ - Name: "Matt Holt", - }, - Coauthor: &Person{ - Name: "Jeremy Saenz", - }, - }, - expectedErrors: Errors{}, - }, - { - description: "Custom validation should put an error", - data: BlogPost{ - Id: 1, - Post: Post{ - Title: "Too short", - Content: "And some content", - }, - Author: Person{ - Name: "Matt Holt", - }, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"title"}, - Classification: "LengthError", - Message: "Life is too short", - }, - }, - }, - { - description: "List Validation", - data: []BlogPost{ - BlogPost{ - Id: 1, - Post: Post{ - Title: "First Post", - Content: "And some content", - }, - Author: Person{ - Name: "Leeor Aharon", - }, - }, - BlogPost{ - Id: 2, - Post: Post{ - Title: "Second Post", - Content: "And some content", - }, - Author: Person{ - Name: "Leeor Aharon", - }, - }, - }, - expectedErrors: Errors{}, - }, - { - description: "List Validation w/ Errors", - data: []BlogPost{ - BlogPost{ - Id: 1, - Post: Post{ - Title: "First Post", - Content: "And some content", - }, - Author: Person{ - Name: "Leeor Aharon", - }, - }, - BlogPost{ - Id: 2, - Post: Post{ - Title: "Too Short", - Content: "And some content", - }, - Author: Person{ - Name: "Leeor Aharon", - }, - }, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"title"}, - Classification: "LengthError", - Message: "Life is too short", - }, - }, - }, - { - description: "List of invalid custom validations", - data: []SadForm{ - SadForm{ - AlphaDash: ",", - AlphaDashDot: ",", - MinSize: ",", - MinSizeSlice: []string{",", ","}, - MaxSize: ",,", - MaxSizeSlice: []string{",", ","}, - Range: 3, - Email: ",", - Url: ",", - UrlEmpty: "", - InInvalid: "4", - NotIn: "1", - Include: "def", - Exclude: "abc", - }, - }, - expectedErrors: Errors{ - Error{ - FieldNames: []string{"AlphaDash"}, - Classification: "AlphaDashError", - Message: "AlphaDash", - }, - Error{ - FieldNames: []string{"AlphaDashDot"}, - Classification: "AlphaDashDot", - Message: "AlphaDashDot", - }, - Error{ - FieldNames: []string{"MinSize"}, - Classification: "MinSize", - Message: "MinSize", - }, - Error{ - FieldNames: []string{"MinSize"}, - Classification: "MinSize", - Message: "MinSize", - }, - Error{ - FieldNames: []string{"MaxSize"}, - Classification: "MaxSize", - Message: "MaxSize", - }, - Error{ - FieldNames: []string{"MaxSize"}, - Classification: "MaxSize", - Message: "MaxSize", - }, - Error{ - FieldNames: []string{"Range"}, - Classification: "Range", - Message: "Range", - }, - Error{ - FieldNames: []string{"Email"}, - Classification: "Email", - Message: "Email", - }, - Error{ - FieldNames: []string{"Url"}, - Classification: "Url", - Message: "Url", - }, - Error{ - FieldNames: []string{"Default"}, - Classification: "Default", - Message: "Default", - }, - Error{ - FieldNames: []string{"InInvalid"}, - Classification: "In", - Message: "In", - }, - Error{ - FieldNames: []string{"NotIn"}, - Classification: "NotIn", - Message: "NotIn", - }, - Error{ - FieldNames: []string{"Include"}, - Classification: "Include", - Message: "Include", - }, - Error{ - FieldNames: []string{"Exclude"}, - Classification: "Exclude", - Message: "Exclude", - }, - }, - }, - { - description: "List of valid custom validations", - data: []SadForm{ - SadForm{ - AlphaDash: "123-456", - AlphaDashDot: "123.456", - MinSize: "12345", - MinSizeSlice: []string{"1", "2", "3", "4", "5"}, - MaxSize: "1", - MaxSizeSlice: []string{"1"}, - Range: 2, - In: "1", - InInvalid: "1", - Email: "123@456.com", - Url: "http://123.456", - Include: "abc", - }, - }, - }, -} - -func Test_Validation(t *testing.T) { - Convey("Test validation", t, func() { - for _, testCase := range validationTestCases { - performValidationTest(t, testCase) - } - }) -} - -func performValidationTest(t *testing.T, testCase validationTestCase) { - httpRecorder := httptest.NewRecorder() - m := macaron.Classic() - - m.Post(testRoute, Validate(testCase.data), func(actual Errors) { - So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", testCase.expectedErrors)) - }) - - req, err := http.NewRequest("POST", testRoute, nil) - if err != nil { - panic(err) - } - - m.ServeHTTP(httpRecorder, req) - - switch httpRecorder.Code { - case http.StatusNotFound: - panic("Routing is messed up in test fixture (got 404): check methods and paths") - case http.StatusInternalServerError: - panic("Something bad happened on '" + testCase.description + "'") - } -} - -type ( - validationTestCase struct { - description string - data interface{} - expectedErrors Errors - } -) diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md b/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md deleted file mode 100644 index 01de811eacc..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/README.md +++ /dev/null @@ -1,21 +0,0 @@ -session [![Build Status](https://drone.io/github.com/macaron-contrib/session/status.png)](https://drone.io/github.com/macaron-contrib/session/latest) [![](http://gocover.io/_badge/github.com/macaron-contrib/session)](http://gocover.io/github.com/macaron-contrib/session) -======= - -Middleware session provides session management for [Macaron](https://github.com/Unknwon/macaron). It can use many session providers, including memory, file, Redis, Memcache, PostgreSQL, MySQL, Couchbase, Ledis and Nodb. - -### Installation - - go get github.com/macaron-contrib/session - -## Getting Help - -- [API Reference](https://gowalker.org/github.com/macaron-contrib/session) -- [Documentation](http://macaron.gogs.io/docs/middlewares/session) - -## Credits - -This package is forked from [beego/session](https://github.com/astaxie/beego/tree/master/session) with reconstruction(over 80%). - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/file_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/file_test.go deleted file mode 100644 index 9c83555ab4e..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/file_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "os" - "path" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_FileProvider(t *testing.T) { - Convey("Test file session provider", t, func() { - dir := path.Join(os.TempDir(), "data/sessions") - os.RemoveAll(dir) - testProvider(Options{ - Provider: "file", - ProviderConfig: dir, - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go deleted file mode 100644 index dac42a364b7..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" - - "github.com/macaron-contrib/session" -) - -func Test_LedisProvider(t *testing.T) { - Convey("Test ledis session provider", t, func() { - opt := session.Options{ - Provider: "ledis", - ProviderConfig: "data_dir=./tmp.db", - } - - Convey("Basic operation", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess session.Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - Convey("Regenrate empty session", func() { - m.Get("/empty", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/empty", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") - m.ServeHTTP(resp, req) - }) - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go deleted file mode 100644 index beb272d52b1..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" - - "github.com/macaron-contrib/session" -) - -func Test_MemcacheProvider(t *testing.T) { - Convey("Test memcache session provider", t, func() { - opt := session.Options{ - Provider: "memcache", - ProviderConfig: "127.0.0.1:9090", - } - - Convey("Basic operation", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess session.Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - }) - - Convey("Regenrate empty session", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") - m.ServeHTTP(resp, req) - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/memory_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/memory_test.go deleted file mode 100644 index 41659bb6a1e..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/memory_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func Test_MemProvider(t *testing.T) { - Convey("Test memory session provider", t, func() { - testProvider(Options{}) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go deleted file mode 100644 index 15b3996a228..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" - - "github.com/macaron-contrib/session" -) - -func Test_MysqlProvider(t *testing.T) { - Convey("Test mysql session provider", t, func() { - opt := session.Options{ - Provider: "mysql", - ProviderConfig: "root:@tcp(localhost:3306)/macaron?charset=utf8", - } - - Convey("Basic operation", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess session.Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - So(raw.Release(), ShouldBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - }) - - Convey("Regenrate empty session", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf48; Path=/;") - m.ServeHTTP(resp, req) - }) - - Convey("GC session", func() { - m := macaron.New() - opt2 := opt - opt2.Gclifetime = 1 - m.Use(session.Sessioner(opt2)) - - m.Get("/", func(sess session.Store) { - sess.Set("uname", "unknwon") - So(sess.ID(), ShouldNotBeEmpty) - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Flush(), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - time.Sleep(2 * time.Second) - sess.GC() - So(sess.Count(), ShouldEqual, 0) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go deleted file mode 100644 index c86ba98ded5..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb_test.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" - - "github.com/macaron-contrib/session" -) - -func Test_LedisProvider(t *testing.T) { - Convey("Test nodb session provider", t, func() { - opt := session.Options{ - Provider: "nodb", - ProviderConfig: "./tmp.db", - } - - Convey("Basic operation", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess session.Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - Convey("Regenrate empty session", func() { - m.Get("/empty", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - }) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/empty", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") - m.ServeHTTP(resp, req) - }) - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go deleted file mode 100644 index ea212c729f6..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" - - "github.com/macaron-contrib/session" -) - -func Test_PostgresProvider(t *testing.T) { - Convey("Test postgres session provider", t, func() { - opt := session.Options{ - Provider: "postgres", - ProviderConfig: "user=jiahuachen dbname=macaron port=5432 sslmode=disable", - } - - Convey("Basic operation", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess session.Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - So(raw.Release(), ShouldBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - }) - - Convey("Regenrate empty session", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf48; Path=/;") - m.ServeHTTP(resp, req) - }) - - Convey("GC session", func() { - m := macaron.New() - opt2 := opt - opt2.Gclifetime = 1 - m.Use(session.Sessioner(opt2)) - - m.Get("/", func(sess session.Store) { - sess.Set("uname", "unknwon") - So(sess.ID(), ShouldNotBeEmpty) - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Flush(), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - time.Sleep(2 * time.Second) - sess.GC() - So(sess.Count(), ShouldEqual, 0) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go deleted file mode 100644 index 9fd8e6518f8..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis_test.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" - - "github.com/macaron-contrib/session" -) - -func Test_RedisProvider(t *testing.T) { - Convey("Test redis session provider", t, func() { - opt := session.Options{ - Provider: "redis", - ProviderConfig: "addr=:6379", - } - - Convey("Basic operation", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess session.Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - }) - - Convey("Regenrate empty session", func() { - m := macaron.New() - m.Use(session.Sessioner(opt)) - m.Get("/", func(ctx *macaron.Context, sess session.Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") - m.ServeHTTP(resp, req) - }) - }) -} diff --git a/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go b/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go deleted file mode 100644 index 82efc277c61..00000000000 --- a/Godeps/_workspace/src/github.com/macaron-contrib/session/session_test.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package session - -import ( - "net/http" - "net/http/httptest" - "testing" - "time" - - "github.com/Unknwon/macaron" - . "github.com/smartystreets/goconvey/convey" -) - -func Test_Version(t *testing.T) { - Convey("Check package version", t, func() { - So(Version(), ShouldEqual, _VERSION) - }) -} - -func Test_Sessioner(t *testing.T) { - Convey("Use session middleware", t, func() { - m := macaron.New() - m.Use(Sessioner()) - m.Get("/", func() {}) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) - - Convey("Register invalid provider", t, func() { - Convey("Provider not exists", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - - m := macaron.New() - m.Use(Sessioner(Options{ - Provider: "fake", - })) - }) - - Convey("Provider value is nil", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - - Register("fake", nil) - }) - - Convey("Register twice", func() { - defer func() { - So(recover(), ShouldNotBeNil) - }() - - Register("memory", &MemProvider{}) - }) - }) -} - -func testProvider(opt Options) { - Convey("Basic operation", func() { - m := macaron.New() - m.Use(Sessioner(opt)) - - m.Get("/", func(ctx *macaron.Context, sess Store) { - sess.Set("uname", "unknwon") - }) - m.Get("/reg", func(ctx *macaron.Context, sess Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := raw.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - }) - m.Get("/get", func(ctx *macaron.Context, sess Store) { - sid := sess.ID() - So(sid, ShouldNotBeEmpty) - - raw, err := sess.Read(sid) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Delete("uname"), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - So(sess.Destory(ctx), ShouldBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - cookie := resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/reg", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - - cookie = resp.Header().Get("Set-Cookie") - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", cookie) - m.ServeHTTP(resp, req) - }) - - Convey("Regenrate empty session", func() { - m := macaron.New() - m.Use(Sessioner(opt)) - m.Get("/", func(ctx *macaron.Context, sess Store) { - raw, err := sess.RegenerateId(ctx) - So(err, ShouldBeNil) - So(raw, ShouldNotBeNil) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "MacaronSession=ad2c7e3cbecfcf486; Path=/;") - m.ServeHTTP(resp, req) - }) - - Convey("GC session", func() { - m := macaron.New() - opt2 := opt - opt2.Gclifetime = 1 - m.Use(Sessioner(opt2)) - - m.Get("/", func(sess Store) { - sess.Set("uname", "unknwon") - So(sess.ID(), ShouldNotBeEmpty) - uname := sess.Get("uname") - So(uname, ShouldNotBeNil) - So(uname, ShouldEqual, "unknwon") - - So(sess.Flush(), ShouldBeNil) - So(sess.Get("uname"), ShouldBeNil) - - time.Sleep(2 * time.Second) - sess.GC() - So(sess.Count(), ShouldEqual, 0) - }) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - }) -} - -func Test_Flash(t *testing.T) { - Convey("Test flash", t, func() { - m := macaron.New() - m.Use(Sessioner()) - m.Get("/set", func(f *Flash) string { - f.Success("success") - f.Error("error") - f.Warning("warning") - f.Info("info") - return "" - }) - m.Get("/get", func() {}) - - resp := httptest.NewRecorder() - req, err := http.NewRequest("GET", "/set", nil) - So(err, ShouldBeNil) - m.ServeHTTP(resp, req) - - resp = httptest.NewRecorder() - req, err = http.NewRequest("GET", "/get", nil) - So(err, ShouldBeNil) - req.Header.Set("Cookie", "macaron_flash=error%3Derror%26info%3Dinfo%26success%3Dsuccess%26warning%3Dwarning; Path=/") - m.ServeHTTP(resp, req) - }) -} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/.gitignore b/Godeps/_workspace/src/gopkg.in/macaron.v1/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/.gitignore rename to Godeps/_workspace/src/gopkg.in/macaron.v1/.gitignore diff --git a/Godeps/_workspace/src/gopkg.in/macaron.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/macaron.v1/.travis.yml new file mode 100644 index 00000000000..6eeb32a4e38 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/.travis.yml @@ -0,0 +1,13 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/Godeps/_workspace/src/gopkg.in/macaron.v1/LICENSE b/Godeps/_workspace/src/gopkg.in/macaron.v1/LICENSE new file mode 100644 index 00000000000..37ec93a14fd --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/gopkg.in/macaron.v1/README.md b/Godeps/_workspace/src/gopkg.in/macaron.v1/README.md new file mode 100644 index 00000000000..704ecf34077 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/README.md @@ -0,0 +1,94 @@ +Macaron [![Build Status](https://travis-ci.org/go-macaron/macaron.svg?branch=v1)](https://travis-ci.org/go-macaron/macaron) [![](http://gocover.io/_badge/github.com/go-macaron/macaron)](http://gocover.io/github.com/go-macaron/macaron) +======================= + +![Macaron Logo](https://raw.githubusercontent.com/go-macaron/macaron/v1/macaronlogo.png) + +Package macaron is a high productive and modular web framework in Go. + +##### Current version: 0.8.0 + +## Getting Started + +The minimum requirement of Go is **1.3**. + +To install Macaron: + + go get gopkg.in/macaron.v1 + +The very basic usage of Macaron: + +```go +package main + +import "gopkg.in/macaron.v1" + +func main() { + m := macaron.Classic() + m.Get("/", func() string { + return "Hello world!" + }) + m.Run() +} +``` + +## Features + +- Powerful routing with suburl. +- Flexible routes combinations. +- Unlimited nested group routers. +- Directly integrate with existing services. +- Dynamically change template files at runtime. +- Allow to use in-memory template and static files. +- Easy to plugin/unplugin features with modular design. +- Handy dependency injection powered by [inject](https://github.com/codegangsta/inject). +- Better router layer and less reflection make faster speed. + +## Middlewares + +Middlewares allow you easily plugin/unplugin features for your Macaron applications. + +There are already many [middlewares](https://github.com/go-macaron) to simplify your work: + +- render - Go template engine +- static - Serves static files +- [gzip](https://github.com/go-macaron/gzip) - Gzip compression to all responses +- [binding](https://github.com/go-macaron/binding) - Request data binding and validation +- [i18n](https://github.com/go-macaron/i18n) - Internationalization and Localization +- [cache](https://github.com/go-macaron/cache) - Cache manager +- [session](https://github.com/go-macaron/session) - Session manager +- [csrf](https://github.com/go-macaron/csrf) - Generates and validates csrf tokens +- [captcha](https://github.com/go-macaron/captcha) - Captcha service +- [pongo2](https://github.com/go-macaron/pongo2) - Pongo2 template engine support +- [sockets](https://github.com/go-macaron/sockets) - WebSockets channels binding +- [bindata](https://github.com/go-macaron/bindata) - Embed binary data as static and template files +- [toolbox](https://github.com/go-macaron/toolbox) - Health check, pprof, profile and statistic services +- [oauth2](https://github.com/go-macaron/oauth2) - OAuth 2.0 backend +- [switcher](https://github.com/go-macaron/switcher) - Multiple-site support +- [method](https://github.com/go-macaron/method) - HTTP method override +- [permissions2](https://github.com/xyproto/permissions2) - Cookies, users and permissions +- [renders](https://github.com/go-macaron/renders) - Beego-like render engine(Macaron has built-in template engine, this is another option) + +## Use Cases + +- [Gogs](http://gogs.io): A painless self-hosted Git Service +- [Peach](http://peachdocs.org): A modern web documentation server +- [Go Walker](https://gowalker.org): Go online API documentation +- [Switch](http://gopm.io): Gopm registry +- [YouGam](http://yougam.com): Online Forum +- [Critical Stack Intel](https://intel.criticalstack.com/): A 100% free intel marketplace from Critical Stack, Inc. + +## Getting Help + +- [API Reference](https://gowalker.org/gopkg.in/macaron.v1) +- [Documentation](http://go-macaron.com) +- [FAQs](http://go-macaron.com/docs/faqs) +- [![Join the chat at https://gitter.im/Unknwon/macaron](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-macaron/macaron?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +## Credits + +- Basic design of [Martini](https://github.com/go-martini/martini). +- Logo is modified by [@insionng](https://github.com/insionng) based on [Tribal Dragon](http://xtremeyamazaki.deviantart.com/art/Tribal-Dragon-27005087). + +## License + +This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/context.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/context.go similarity index 92% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/context.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/context.go index abf7ac90257..bb6caa41414 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/context.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/context.go @@ -1,4 +1,4 @@ -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -33,7 +33,7 @@ import ( "github.com/Unknwon/com" - "github.com/Unknwon/macaron/inject" + "github.com/go-macaron/inject" ) // Locale reprents a localization interface. @@ -176,11 +176,27 @@ func (ctx *Context) Redirect(location string, status ...int) { http.Redirect(ctx.Resp, ctx.Req.Request, location, code) } -// Query querys form parameter. -func (ctx *Context) Query(name string) string { - if ctx.Req.Form == nil { +// Maximum amount of memory to use when parsing a multipart form. +// Set this to whatever value you prefer; default is 10 MB. +var MaxMemory = int64(1024 * 1024 * 10) + +func (ctx *Context) parseForm() { + if ctx.Req.Form != nil { + return + } + + contentType := ctx.Req.Header.Get(_CONTENT_TYPE) + if (ctx.Req.Method == "POST" || ctx.Req.Method == "PUT") && + len(contentType) > 0 && strings.Contains(contentType, "multipart/form-data") { + ctx.Req.ParseMultipartForm(MaxMemory) + } else { ctx.Req.ParseForm() } +} + +// Query querys form parameter. +func (ctx *Context) Query(name string) string { + ctx.parseForm() return ctx.Req.Form.Get(name) } @@ -191,9 +207,7 @@ func (ctx *Context) QueryTrim(name string) string { // QueryStrings returns a list of results by given query name. func (ctx *Context) QueryStrings(name string) []string { - if ctx.Req.Form == nil { - ctx.Req.ParseForm() - } + ctx.parseForm() vals, ok := ctx.Req.Form[name] if !ok { @@ -229,7 +243,7 @@ func (ctx *Context) Params(name string) string { if len(name) == 0 { return "" } - if name[0] != '*' && name[0] != ':' { + if len(name) > 1 && name[0] != ':' { name = ":" + name } return ctx.params[name] @@ -273,6 +287,24 @@ func (ctx *Context) GetFile(name string) (multipart.File, *multipart.FileHeader, return ctx.Req.FormFile(name) } +// SaveToFile reads a file from request by field name and saves to given path. +func (ctx *Context) SaveToFile(name, savePath string) error { + fr, _, err := ctx.GetFile(name) + if err != nil { + return err + } + defer fr.Close() + + fw, err := os.OpenFile(savePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer fw.Close() + + _, err = io.Copy(fw, fr) + return err +} + // SetCookie sets given cookie value to response header. // FIXME: IE support? http://golanghome.com/post/620#reply2 func (ctx *Context) SetCookie(name string, value string, others ...interface{}) { diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/admin/index.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/admin/index.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/admin/index.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/admin/index.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/another_layout.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/another_layout.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/another_layout.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/another_layout.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/content.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/content.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/content.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/content.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/current_layout.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/current_layout.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/current_layout.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/current_layout.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/delims.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/delims.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/delims.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/delims.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/hello.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/hello.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/hello.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/hello.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/hypertext.html b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/hypertext.html similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/hypertext.html rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/hypertext.html diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/layout.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/layout.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic/layout.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic/layout.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic2/hello.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic2/hello.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic2/hello.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic2/hello.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic2/hello2.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic2/hello2.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/basic2/hello2.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/basic2/hello2.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/custom_funcs/index.tmpl b/Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/custom_funcs/index.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/fixtures/custom_funcs/index.tmpl rename to Godeps/_workspace/src/gopkg.in/macaron.v1/fixtures/custom_funcs/index.tmpl diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/logger.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/logger.go similarity index 97% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/logger.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/logger.go index 6e0b25c9b1f..109b7b3330d 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/logger.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/logger.go @@ -1,5 +1,5 @@ // Copyright 2013 Martini Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/macaron.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/macaron.go similarity index 92% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/macaron.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/macaron.go index adbe9e3692d..94fd3f0ce20 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/macaron.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/macaron.go @@ -1,4 +1,6 @@ -// Copyright 2014 Unknwon +// +build go1.3 + +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -12,7 +14,7 @@ // License for the specific language governing permissions and limitations // under the License. -// Package macaron is a high productive and modular design web framework in Go. +// Package macaron is a high productive and modular web framework in Go. package macaron import ( @@ -26,10 +28,10 @@ import ( "github.com/Unknwon/com" "gopkg.in/ini.v1" - "github.com/Unknwon/macaron/inject" + "github.com/go-macaron/inject" ) -const _VERSION = "0.5.4.0318" +const _VERSION = "0.8.0.1013" func Version() string { return _VERSION @@ -64,7 +66,8 @@ type Macaron struct { handlers []Handler action Handler - urlPrefix string // For suburl support. + hasURLPrefix bool + urlPrefix string // For suburl support. *Router logger *log.Logger @@ -83,11 +86,10 @@ func NewWithLogger(out io.Writer) *Macaron { m.Router.m = m m.Map(m.logger) m.Map(defaultReturnHandler()) - m.notFound = func(resp http.ResponseWriter, req *http.Request) { - c := m.createContext(resp, req) - c.handlers = append(c.handlers, http.NotFound) - c.run() - } + m.NotFound(http.NotFound) + m.InternalServerError(func(rw http.ResponseWriter, err error) { + http.Error(rw, err.Error(), 500) + }) return m } @@ -162,7 +164,9 @@ func (m *Macaron) createContext(rw http.ResponseWriter, req *http.Request) *Cont // Useful if you want to control your own HTTP server. // Be aware that none of middleware will run without registering any router. func (m *Macaron) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - req.URL.Path = strings.TrimPrefix(req.URL.Path, m.urlPrefix) + if m.hasURLPrefix { + req.URL.Path = strings.TrimPrefix(req.URL.Path, m.urlPrefix) + } for _, h := range m.befores { if h(rw, req) { return @@ -203,7 +207,7 @@ func (m *Macaron) Run(args ...interface{}) { } addr := host + ":" + com.ToStr(port) - logger := m.Injector.GetVal(reflect.TypeOf(m.logger)).Interface().(*log.Logger) + logger := m.GetVal(reflect.TypeOf(m.logger)).Interface().(*log.Logger) logger.Printf("listening on %s (%s)\n", addr, Env) logger.Fatalln(http.ListenAndServe(addr, m)) } @@ -211,6 +215,7 @@ func (m *Macaron) Run(args ...interface{}) { // SetURLPrefix sets URL prefix of router layer, so that it support suburl. func (m *Macaron) SetURLPrefix(prefix string) { m.urlPrefix = prefix + m.hasURLPrefix = len(m.urlPrefix) > 0 } // ____ ____ .__ ___. .__ diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/macaronlogo.png b/Godeps/_workspace/src/gopkg.in/macaron.v1/macaronlogo.png similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/macaronlogo.png rename to Godeps/_workspace/src/gopkg.in/macaron.v1/macaronlogo.png diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/recovery.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/recovery.go similarity index 98% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/recovery.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/recovery.go index 6ff1659e8ad..ea3bdac045b 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/recovery.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/recovery.go @@ -1,5 +1,5 @@ // Copyright 2013 Martini Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -23,7 +23,7 @@ import ( "net/http" "runtime" - "github.com/Unknwon/macaron/inject" + "github.com/go-macaron/inject" ) const ( diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/render.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/render.go similarity index 87% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/render.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/render.go index b0558c95742..3706e4fe414 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/render.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/render.go @@ -1,6 +1,5 @@ // Copyright 2013 Martini Authors -// Copyright 2013 oxtoacart -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -22,7 +21,6 @@ import ( "encoding/xml" "fmt" "html/template" - "io" "io/ioutil" "net/http" "os" @@ -35,52 +33,23 @@ import ( "github.com/Unknwon/com" ) -// BufferPool implements a pool of bytes.Buffers in the form of a bounded channel. -type BufferPool struct { - c chan *bytes.Buffer -} - -// NewBufferPool creates a new BufferPool bounded to the given size. -func NewBufferPool(size int) (bp *BufferPool) { - return &BufferPool{ - c: make(chan *bytes.Buffer, size), - } -} - -// Get gets a Buffer from the BufferPool, or creates a new one if none are available -// in the pool. -func (bp *BufferPool) Get() (b *bytes.Buffer) { - select { - case b = <-bp.c: - // reuse existing buffer - default: - // create new buffer - b = bytes.NewBuffer([]byte{}) - } - return -} - -// Put returns the given Buffer to the BufferPool. -func (bp *BufferPool) Put(b *bytes.Buffer) { - b.Reset() - bp.c <- b -} - const ( - ContentType = "Content-Type" - ContentLength = "Content-Length" - ContentBinary = "application/octet-stream" - ContentJSON = "application/json" - ContentHTML = "text/html" - CONTENT_PLAIN = "text/plain" - ContentXHTML = "application/xhtml+xml" - ContentXML = "text/xml" - defaultCharset = "UTF-8" + _CONTENT_TYPE = "Content-Type" + _CONTENT_LENGTH = "Content-Length" + _CONTENT_BINARY = "application/octet-stream" + _CONTENT_JSON = "application/json" + _CONTENT_HTML = "text/html" + _CONTENT_PLAIN = "text/plain" + _CONTENT_XHTML = "application/xhtml+xml" + _CONTENT_XML = "text/xml" + _DEFAULT_CHARSET = "UTF-8" ) var ( // Provides a temporary buffer to execute templates into and catch errors. - bufpool = NewBufferPool(64) + bufpool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, + } // Included helper functions for use when rendering html helperFuncs = template.FuncMap{ @@ -149,12 +118,12 @@ type ( Render interface { http.ResponseWriter - RW() http.ResponseWriter + SetResponseWriter(http.ResponseWriter) JSON(int, interface{}) JSONString(interface{}) (string, error) - RawData(int, []byte) - RenderData(int, []byte) + RawData(int, []byte) // Serve content as binary + PlainText(int, []byte) // Serve content as plain text HTML(int, string, interface{}, ...HTMLOptions) HTMLSet(int, string, string, interface{}, ...HTMLOptions) HTMLSetString(string, string, interface{}, ...HTMLOptions) (string, error) @@ -244,7 +213,7 @@ func PrepareCharset(charset string) string { return "; charset=" + charset } - return "; charset=" + defaultCharset + return "; charset=" + _DEFAULT_CHARSET } func GetExt(s string) string { @@ -321,7 +290,7 @@ func (ts *templateSet) GetDir(name string) string { return ts.dirs[name] } -func prepareOptions(options []RenderOptions) RenderOptions { +func prepareRenderOptions(options []RenderOptions) RenderOptions { var opt RenderOptions if len(options) > 0 { opt = options[0] @@ -335,7 +304,7 @@ func prepareOptions(options []RenderOptions) RenderOptions { opt.Extensions = []string{".tmpl", ".html"} } if len(opt.HTMLContentType) == 0 { - opt.HTMLContentType = ContentHTML + opt.HTMLContentType = _CONTENT_HTML } return opt @@ -401,11 +370,11 @@ func renderHandler(opt RenderOptions, tplSets []string) Handler { // If MACARON_ENV is set to "" or "development" then templates will be recompiled on every request. For more performance, set the // MACARON_ENV environment variable to "production". func Renderer(options ...RenderOptions) Handler { - return renderHandler(prepareOptions(options), []string{}) + return renderHandler(prepareRenderOptions(options), []string{}) } func Renderers(options RenderOptions, tplSets ...string) Handler { - return renderHandler(prepareOptions([]RenderOptions{options}), tplSets) + return renderHandler(prepareRenderOptions([]RenderOptions{options}), tplSets) } type TplRender struct { @@ -417,8 +386,8 @@ type TplRender struct { startTime time.Time } -func (r *TplRender) RW() http.ResponseWriter { - return r.ResponseWriter +func (r *TplRender) SetResponseWriter(rw http.ResponseWriter) { + r.ResponseWriter = rw } func (r *TplRender) JSON(status int, v interface{}) { @@ -437,7 +406,7 @@ func (r *TplRender) JSON(status int, v interface{}) { } // json rendered fine, write out the result - r.Header().Set(ContentType, ContentJSON+r.CompiledCharset) + r.Header().Set(_CONTENT_TYPE, _CONTENT_JSON+r.CompiledCharset) r.WriteHeader(status) if len(r.Opt.PrefixJSON) > 0 { r.Write(r.Opt.PrefixJSON) @@ -473,7 +442,7 @@ func (r *TplRender) XML(status int, v interface{}) { } // XML rendered fine, write out the result - r.Header().Set(ContentType, ContentXML+r.CompiledCharset) + r.Header().Set(_CONTENT_TYPE, _CONTENT_XML+r.CompiledCharset) r.WriteHeader(status) if len(r.Opt.PrefixXML) > 0 { r.Write(r.Opt.PrefixXML) @@ -482,23 +451,23 @@ func (r *TplRender) XML(status int, v interface{}) { } func (r *TplRender) data(status int, contentType string, v []byte) { - if r.Header().Get(ContentType) == "" { - r.Header().Set(ContentType, contentType) + if r.Header().Get(_CONTENT_TYPE) == "" { + r.Header().Set(_CONTENT_TYPE, contentType) } r.WriteHeader(status) r.Write(v) } func (r *TplRender) RawData(status int, v []byte) { - r.data(status, ContentBinary, v) + r.data(status, _CONTENT_BINARY, v) } -func (r *TplRender) RenderData(status int, v []byte) { - r.data(status, CONTENT_PLAIN, v) +func (r *TplRender) PlainText(status int, v []byte) { + r.data(status, _CONTENT_PLAIN, v) } func (r *TplRender) execute(t *template.Template, name string, data interface{}) (*bytes.Buffer, error) { - buf := bufpool.Get() + buf := bufpool.Get().(*bytes.Buffer) return buf, t.ExecuteTemplate(buf, name, data) } @@ -551,10 +520,10 @@ func (r *TplRender) renderHTML(status int, setName, tplName string, data interfa return } - r.Header().Set(ContentType, r.Opt.HTMLContentType+r.CompiledCharset) + r.Header().Set(_CONTENT_TYPE, r.Opt.HTMLContentType+r.CompiledCharset) r.WriteHeader(status) - io.Copy(r, out) + out.WriteTo(r) bufpool.Put(out) } diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/response_writer.go similarity index 100% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/response_writer.go diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/return_handler.go similarity index 74% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/return_handler.go index ea1e0447646..db6eec3e9fa 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/return_handler.go @@ -1,5 +1,5 @@ // Copyright 2013 Martini Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -19,7 +19,7 @@ import ( "net/http" "reflect" - "github.com/Unknwon/macaron/inject" + "github.com/go-macaron/inject" ) // ReturnHandler is a service that Martini provides that is called @@ -32,6 +32,11 @@ func canDeref(val reflect.Value) bool { return val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr } +func isError(val reflect.Value) bool { + _, ok := val.Interface().(error) + return ok +} + func isByteSlice(val reflect.Value) bool { return val.Kind() == reflect.Slice && val.Type().Elem().Kind() == reflect.Uint8 } @@ -39,21 +44,33 @@ func isByteSlice(val reflect.Value) bool { func defaultReturnHandler() ReturnHandler { return func(ctx *Context, vals []reflect.Value) { rv := ctx.GetVal(inject.InterfaceOf((*http.ResponseWriter)(nil))) - res := rv.Interface().(http.ResponseWriter) + resp := rv.Interface().(http.ResponseWriter) var respVal reflect.Value if len(vals) > 1 && vals[0].Kind() == reflect.Int { - res.WriteHeader(int(vals[0].Int())) + resp.WriteHeader(int(vals[0].Int())) respVal = vals[1] } else if len(vals) > 0 { respVal = vals[0] + + if isError(respVal) { + err := respVal.Interface().(error) + if err != nil { + ctx.internalServerError(ctx, err) + } + return + } else if canDeref(respVal) { + if respVal.IsNil() { + return // Ignore nil error + } + } } if canDeref(respVal) { respVal = respVal.Elem() } if isByteSlice(respVal) { - res.Write(respVal.Bytes()) + resp.Write(respVal.Bytes()) } else { - res.Write([]byte(respVal.String())) + resp.Write([]byte(respVal.String())) } } } diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/router.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/router.go similarity index 58% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/router.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/router.go index d2b394540b8..f9b421a3302 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/router.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/router.go @@ -1,4 +1,4 @@ -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain @@ -18,8 +18,6 @@ import ( "net/http" "strings" "sync" - - "github.com/Unknwon/com" ) var ( @@ -38,22 +36,22 @@ var ( // routeMap represents a thread-safe map for route tree. type routeMap struct { lock sync.RWMutex - routes map[string]map[string]bool + routes map[string]map[string]*Leaf } // NewRouteMap initializes and returns a new routeMap. func NewRouteMap() *routeMap { rm := &routeMap{ - routes: make(map[string]map[string]bool), + routes: make(map[string]map[string]*Leaf), } for m := range _HTTP_METHODS { - rm.routes[m] = make(map[string]bool) + rm.routes[m] = make(map[string]*Leaf) } return rm } -// isExist returns true if a route has been registered. -func (rm *routeMap) isExist(method, pattern string) bool { +// getLeaf returns Leaf object if a route has been registered. +func (rm *routeMap) getLeaf(method, pattern string) *Leaf { rm.lock.RLock() defer rm.lock.RUnlock() @@ -61,11 +59,11 @@ func (rm *routeMap) isExist(method, pattern string) bool { } // add adds new route to route tree map. -func (rm *routeMap) add(method, pattern string) { +func (rm *routeMap) add(method, pattern string, leaf *Leaf) { rm.lock.Lock() defer rm.lock.Unlock() - rm.routes[method][pattern] = true + rm.routes[method][pattern] = leaf } type group struct { @@ -75,34 +73,61 @@ type group struct { // Router represents a Macaron router layer. type Router struct { - m *Macaron - routers map[string]*Tree + m *Macaron + autoHead bool + routers map[string]*Tree *routeMap + namedRoutes map[string]*Leaf - groups []group - notFound http.HandlerFunc + groups []group + notFound http.HandlerFunc + internalServerError func(*Context, error) } func NewRouter() *Router { return &Router{ - routers: make(map[string]*Tree), - routeMap: NewRouteMap(), + routers: make(map[string]*Tree), + routeMap: NewRouteMap(), + namedRoutes: make(map[string]*Leaf), } } +// SetAutoHead sets the value who determines whether add HEAD method automatically +// when GET method is added. Combo router will not be affected by this value. +func (r *Router) SetAutoHead(v bool) { + r.autoHead = v +} + type Params map[string]string // Handle is a function that can be registered to a route to handle HTTP requests. // Like http.HandlerFunc, but has a third parameter for the values of wildcards (variables). type Handle func(http.ResponseWriter, *http.Request, Params) +// Route represents a wrapper of leaf route and upper level router. +type Route struct { + router *Router + leaf *Leaf +} + +// Name sets name of route. +func (r *Route) Name(name string) { + if len(name) == 0 { + panic("route name cannot be empty") + } else if r.router.namedRoutes[name] != nil { + panic("route with given name already exists") + } + r.router.namedRoutes[name] = r.leaf +} + // handle adds new route to the router tree. -func (r *Router) handle(method, pattern string, handle Handle) { +func (r *Router) handle(method, pattern string, handle Handle) *Route { method = strings.ToUpper(method) + var leaf *Leaf // Prevent duplicate routes. - if r.isExist(method, pattern) { - return + if leaf = r.getLeaf(method, pattern); leaf != nil { + return &Route{r, leaf} } // Validate HTTP methods. @@ -123,18 +148,19 @@ func (r *Router) handle(method, pattern string, handle Handle) { // Add to router tree. for m := range methods { if t, ok := r.routers[m]; ok { - t.AddRouter(pattern, handle) + leaf = t.Add(pattern, handle) } else { t := NewTree() - t.AddRouter(pattern, handle) + leaf = t.Add(pattern, handle) r.routers[m] = t } - r.add(m, pattern) + r.add(m, pattern, leaf) } + return &Route{r, leaf} } // Handle registers a new request handle with the given pattern, method and handlers. -func (r *Router) Handle(method string, pattern string, handlers []Handler) { +func (r *Router) Handle(method string, pattern string, handlers []Handler) *Route { if len(r.groups) > 0 { groupPattern := "" h := make([]Handler, 0) @@ -149,7 +175,7 @@ func (r *Router) Handle(method string, pattern string, handlers []Handler) { } validateHandlers(handlers) - r.handle(method, pattern, func(resp http.ResponseWriter, req *http.Request, params Params) { + return r.handle(method, pattern, func(resp http.ResponseWriter, req *http.Request, params Params) { c := r.m.createContext(resp, req) c.params = params c.handlers = make([]Handler, 0, len(r.m.handlers)+len(handlers)) @@ -166,64 +192,70 @@ func (r *Router) Group(pattern string, fn func(), h ...Handler) { } // Get is a shortcut for r.Handle("GET", pattern, handlers) -func (r *Router) Get(pattern string, h ...Handler) { - r.Handle("GET", pattern, h) +func (r *Router) Get(pattern string, h ...Handler) (leaf *Route) { + leaf = r.Handle("GET", pattern, h) + if r.autoHead { + r.Head(pattern, h...) + } + return leaf } // Patch is a shortcut for r.Handle("PATCH", pattern, handlers) -func (r *Router) Patch(pattern string, h ...Handler) { - r.Handle("PATCH", pattern, h) +func (r *Router) Patch(pattern string, h ...Handler) *Route { + return r.Handle("PATCH", pattern, h) } // Post is a shortcut for r.Handle("POST", pattern, handlers) -func (r *Router) Post(pattern string, h ...Handler) { - r.Handle("POST", pattern, h) +func (r *Router) Post(pattern string, h ...Handler) *Route { + return r.Handle("POST", pattern, h) } // Put is a shortcut for r.Handle("PUT", pattern, handlers) -func (r *Router) Put(pattern string, h ...Handler) { - r.Handle("PUT", pattern, h) +func (r *Router) Put(pattern string, h ...Handler) *Route { + return r.Handle("PUT", pattern, h) } // Delete is a shortcut for r.Handle("DELETE", pattern, handlers) -func (r *Router) Delete(pattern string, h ...Handler) { - r.Handle("DELETE", pattern, h) +func (r *Router) Delete(pattern string, h ...Handler) *Route { + return r.Handle("DELETE", pattern, h) } // Options is a shortcut for r.Handle("OPTIONS", pattern, handlers) -func (r *Router) Options(pattern string, h ...Handler) { - r.Handle("OPTIONS", pattern, h) +func (r *Router) Options(pattern string, h ...Handler) *Route { + return r.Handle("OPTIONS", pattern, h) } // Head is a shortcut for r.Handle("HEAD", pattern, handlers) -func (r *Router) Head(pattern string, h ...Handler) { - r.Handle("HEAD", pattern, h) +func (r *Router) Head(pattern string, h ...Handler) *Route { + return r.Handle("HEAD", pattern, h) } // Any is a shortcut for r.Handle("*", pattern, handlers) -func (r *Router) Any(pattern string, h ...Handler) { - r.Handle("*", pattern, h) +func (r *Router) Any(pattern string, h ...Handler) *Route { + return r.Handle("*", pattern, h) } // Route is a shortcut for same handlers but different HTTP methods. // // Example: // m.Route("/", "GET,POST", h) -func (r *Router) Route(pattern, methods string, h ...Handler) { +func (r *Router) Route(pattern, methods string, h ...Handler) (route *Route) { for _, m := range strings.Split(methods, ",") { - r.Handle(strings.TrimSpace(m), pattern, h) + route = r.Handle(strings.TrimSpace(m), pattern, h) } + return route } // Combo returns a combo router. func (r *Router) Combo(pattern string, h ...Handler) *ComboRouter { - return &ComboRouter{r, pattern, h, map[string]bool{}} + return &ComboRouter{r, pattern, h, map[string]bool{}, nil} } // Configurable http.HandlerFunc which is called when no matching route is // found. If it is not set, http.NotFound is used. // Be sure to set 404 response code in your handler. func (r *Router) NotFound(handlers ...Handler) { + validateHandlers(handlers) r.notFound = func(rw http.ResponseWriter, req *http.Request) { c := r.m.createContext(rw, req) c.handlers = append(r.m.handlers, handlers...) @@ -231,16 +263,25 @@ func (r *Router) NotFound(handlers ...Handler) { } } +// Configurable handler which is called when route handler returns +// error. If it is not set, default handler is used. +// Be sure to set 500 response code in your handler. +func (r *Router) InternalServerError(handlers ...Handler) { + validateHandlers(handlers) + r.internalServerError = func(c *Context, err error) { + c.index = 0 + c.handlers = handlers + c.Map(err) + c.run() + } +} + func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) { if t, ok := r.routers[req.Method]; ok { - h, p := t.Match(req.URL.Path) - if h != nil { - if splat, ok := p[":splat"]; ok { - p["*"] = p[":splat"] // Better name. - splatlist := strings.Split(splat, "/") - for k, v := range splatlist { - p[com.ToStr(k)] = v - } + h, p, ok := t.Match(req.URL.Path) + if ok { + if splat, ok := p["*0"]; ok { + p["*"] = splat // Easy name. } h(rw, req, p) return @@ -250,12 +291,23 @@ func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) { r.notFound(rw, req) } +// URLFor builds path part of URL by given pair values. +func (r *Router) URLFor(name string, pairs ...string) string { + leaf, ok := r.namedRoutes[name] + if !ok { + panic("route with given name does not exists: " + name) + } + return leaf.URLPath(pairs...) +} + // ComboRouter represents a combo router. type ComboRouter struct { router *Router pattern string handlers []Handler methods map[string]bool // Registered methods. + + lastRoute *Route } func (cr *ComboRouter) checkMethod(name string) { @@ -265,9 +317,9 @@ func (cr *ComboRouter) checkMethod(name string) { cr.methods[name] = true } -func (cr *ComboRouter) route(fn func(string, ...Handler), method string, h ...Handler) *ComboRouter { +func (cr *ComboRouter) route(fn func(string, ...Handler) *Route, method string, h ...Handler) *ComboRouter { cr.checkMethod(method) - fn(cr.pattern, append(cr.handlers, h...)...) + cr.lastRoute = fn(cr.pattern, append(cr.handlers, h...)...) return cr } @@ -298,3 +350,11 @@ func (cr *ComboRouter) Options(h ...Handler) *ComboRouter { func (cr *ComboRouter) Head(h ...Handler) *ComboRouter { return cr.route(cr.router.Head, "HEAD", h...) } + +// Name sets name of ComboRouter route. +func (cr *ComboRouter) Name(name string) { + if cr.lastRoute == nil { + panic("no corresponding route to be named") + } + cr.lastRoute.Name(name) +} diff --git a/Godeps/_workspace/src/github.com/Unknwon/macaron/static.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/static.go similarity index 99% rename from Godeps/_workspace/src/github.com/Unknwon/macaron/static.go rename to Godeps/_workspace/src/gopkg.in/macaron.v1/static.go index 69d523d6203..4ff8342fc5e 100644 --- a/Godeps/_workspace/src/github.com/Unknwon/macaron/static.go +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/static.go @@ -1,5 +1,5 @@ // Copyright 2013 Martini Authors -// Copyright 2014 Unknwon +// Copyright 2014 The Macaron Authors // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain diff --git a/Godeps/_workspace/src/gopkg.in/macaron.v1/tree.go b/Godeps/_workspace/src/gopkg.in/macaron.v1/tree.go new file mode 100644 index 00000000000..8bba72fb340 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/macaron.v1/tree.go @@ -0,0 +1,379 @@ +// Copyright 2015 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "regexp" + "strings" + + "github.com/Unknwon/com" +) + +type patternType int8 + +const ( + _PATTERN_STATIC patternType = iota // /home + _PATTERN_REGEXP // /:id([0-9]+) + _PATTERN_PATH_EXT // /*.* + _PATTERN_HOLDER // /:user + _PATTERN_MATCH_ALL // /* +) + +// Leaf represents a leaf route information. +type Leaf struct { + parent *Tree + + typ patternType + pattern string + rawPattern string // Contains wildcard instead of regexp + wildcards []string + reg *regexp.Regexp + optional bool + + handle Handle +} + +var wildcardPattern = regexp.MustCompile(`:[a-zA-Z0-9]+`) + +func isSpecialRegexp(pattern, regStr string, pos []int) bool { + return len(pattern) >= pos[1]+len(regStr) && pattern[pos[1]:pos[1]+len(regStr)] == regStr +} + +// getNextWildcard tries to find next wildcard and update pattern with corresponding regexp. +func getNextWildcard(pattern string) (wildcard, _ string) { + pos := wildcardPattern.FindStringIndex(pattern) + if pos == nil { + return "", pattern + } + wildcard = pattern[pos[0]:pos[1]] + + // Reach last character or no regexp is given. + if len(pattern) == pos[1] { + return wildcard, strings.Replace(pattern, wildcard, `(.+)`, 1) + } else if pattern[pos[1]] != '(' { + switch { + case isSpecialRegexp(pattern, ":int", pos): + pattern = strings.Replace(pattern, ":int", "([0-9]+)", 1) + case isSpecialRegexp(pattern, ":string", pos): + pattern = strings.Replace(pattern, ":string", "([\\w]+)", 1) + default: + return wildcard, strings.Replace(pattern, wildcard, `(.+)`, 1) + } + } + + // Cut out placeholder directly. + return wildcard, pattern[:pos[0]] + pattern[pos[1]:] +} + +func getWildcards(pattern string) (string, []string) { + wildcards := make([]string, 0, 2) + + // Keep getting next wildcard until nothing is left. + var wildcard string + for { + wildcard, pattern = getNextWildcard(pattern) + if len(wildcard) > 0 { + wildcards = append(wildcards, wildcard) + } else { + break + } + } + + return pattern, wildcards +} + +// getRawPattern removes all regexp but keeps wildcards for building URL path. +func getRawPattern(rawPattern string) string { + rawPattern = strings.Replace(rawPattern, ":int", "", -1) + rawPattern = strings.Replace(rawPattern, ":string", "", -1) + + for { + startIdx := strings.Index(rawPattern, "(") + if startIdx == -1 { + break + } + + closeIdx := strings.Index(rawPattern, ")") + if closeIdx > -1 { + rawPattern = rawPattern[:startIdx] + rawPattern[closeIdx+1:] + } + } + return rawPattern +} + +func checkPattern(pattern string) (typ patternType, rawPattern string, wildcards []string, reg *regexp.Regexp) { + pattern = strings.TrimLeft(pattern, "?") + rawPattern = getRawPattern(pattern) + + if pattern == "*" { + typ = _PATTERN_MATCH_ALL + } else if pattern == "*.*" { + typ = _PATTERN_PATH_EXT + } else if strings.Contains(pattern, ":") { + typ = _PATTERN_REGEXP + pattern, wildcards = getWildcards(pattern) + if pattern == "(.+)" { + typ = _PATTERN_HOLDER + } else { + reg = regexp.MustCompile(pattern) + } + } + return typ, rawPattern, wildcards, reg +} + +func NewLeaf(parent *Tree, pattern string, handle Handle) *Leaf { + typ, rawPattern, wildcards, reg := checkPattern(pattern) + optional := false + if len(pattern) > 0 && pattern[0] == '?' { + optional = true + } + return &Leaf{parent, typ, pattern, rawPattern, wildcards, reg, optional, handle} +} + +// URLPath build path part of URL by given pair values. +func (l *Leaf) URLPath(pairs ...string) string { + if len(pairs)%2 != 0 { + panic("number of pairs does not match") + } + + urlPath := l.rawPattern + parent := l.parent + for parent != nil { + urlPath = parent.rawPattern + "/" + urlPath + parent = parent.parent + } + for i := 0; i < len(pairs); i += 2 { + if len(pairs[i]) == 0 { + panic("pair value cannot be empty: " + com.ToStr(i)) + } else if pairs[i][0] != ':' && pairs[i] != "*" && pairs[i] != "*.*" { + pairs[i] = ":" + pairs[i] + } + urlPath = strings.Replace(urlPath, pairs[i], pairs[i+1], 1) + } + return urlPath +} + +// Tree represents a router tree in Macaron. +type Tree struct { + parent *Tree + + typ patternType + pattern string + rawPattern string + wildcards []string + reg *regexp.Regexp + + subtrees []*Tree + leaves []*Leaf +} + +func NewSubtree(parent *Tree, pattern string) *Tree { + typ, rawPattern, wildcards, reg := checkPattern(pattern) + return &Tree{parent, typ, pattern, rawPattern, wildcards, reg, make([]*Tree, 0, 5), make([]*Leaf, 0, 5)} +} + +func NewTree() *Tree { + return NewSubtree(nil, "") +} + +func (t *Tree) addLeaf(pattern string, handle Handle) *Leaf { + for i := 0; i < len(t.leaves); i++ { + if t.leaves[i].pattern == pattern { + return t.leaves[i] + } + } + + leaf := NewLeaf(t, pattern, handle) + + // Add exact same leaf to grandparent/parent level without optional. + if leaf.optional { + parent := leaf.parent + if parent.parent != nil { + parent.parent.addLeaf(parent.pattern, handle) + } else { + parent.addLeaf("", handle) // Root tree can add as empty pattern. + } + } + + i := 0 + for ; i < len(t.leaves); i++ { + if leaf.typ < t.leaves[i].typ { + break + } + } + + if i == len(t.leaves) { + t.leaves = append(t.leaves, leaf) + } else { + t.leaves = append(t.leaves[:i], append([]*Leaf{leaf}, t.leaves[i:]...)...) + } + return leaf +} + +func (t *Tree) addSubtree(segment, pattern string, handle Handle) *Leaf { + for i := 0; i < len(t.subtrees); i++ { + if t.subtrees[i].pattern == segment { + return t.subtrees[i].addNextSegment(pattern, handle) + } + } + + subtree := NewSubtree(t, segment) + i := 0 + for ; i < len(t.subtrees); i++ { + if subtree.typ < t.subtrees[i].typ { + break + } + } + + if i == len(t.subtrees) { + t.subtrees = append(t.subtrees, subtree) + } else { + t.subtrees = append(t.subtrees[:i], append([]*Tree{subtree}, t.subtrees[i:]...)...) + } + return subtree.addNextSegment(pattern, handle) +} + +func (t *Tree) addNextSegment(pattern string, handle Handle) *Leaf { + pattern = strings.TrimPrefix(pattern, "/") + + i := strings.Index(pattern, "/") + if i == -1 { + return t.addLeaf(pattern, handle) + } + return t.addSubtree(pattern[:i], pattern[i+1:], handle) +} + +func (t *Tree) Add(pattern string, handle Handle) *Leaf { + pattern = strings.TrimSuffix(pattern, "/") + return t.addNextSegment(pattern, handle) +} + +func (t *Tree) matchLeaf(globLevel int, url string, params Params) (Handle, bool) { + for i := 0; i < len(t.leaves); i++ { + switch t.leaves[i].typ { + case _PATTERN_STATIC: + if t.leaves[i].pattern == url { + return t.leaves[i].handle, true + } + case _PATTERN_REGEXP: + results := t.leaves[i].reg.FindStringSubmatch(url) + // Number of results and wildcasrd should be exact same. + if len(results)-1 != len(t.leaves[i].wildcards) { + break + } + + for j := 0; j < len(t.leaves[i].wildcards); j++ { + params[t.leaves[i].wildcards[j]] = results[j+1] + } + return t.leaves[i].handle, true + case _PATTERN_PATH_EXT: + j := strings.LastIndex(url, ".") + if j > -1 { + params[":path"] = url[:j] + params[":ext"] = url[j+1:] + } else { + params[":path"] = url + } + return t.leaves[i].handle, true + case _PATTERN_HOLDER: + params[t.leaves[i].wildcards[0]] = url + return t.leaves[i].handle, true + case _PATTERN_MATCH_ALL: + params["*"] = url + params["*"+com.ToStr(globLevel)] = url + return t.leaves[i].handle, true + } + } + return nil, false +} + +func (t *Tree) matchSubtree(globLevel int, segment, url string, params Params) (Handle, bool) { + for i := 0; i < len(t.subtrees); i++ { + switch t.subtrees[i].typ { + case _PATTERN_STATIC: + if t.subtrees[i].pattern == segment { + if handle, ok := t.subtrees[i].matchNextSegment(globLevel, url, params); ok { + return handle, true + } + } + case _PATTERN_REGEXP: + results := t.subtrees[i].reg.FindStringSubmatch(segment) + if len(results)-1 != len(t.subtrees[i].wildcards) { + break + } + + for j := 0; j < len(t.subtrees[i].wildcards); j++ { + params[t.subtrees[i].wildcards[j]] = results[j+1] + } + if handle, ok := t.subtrees[i].matchNextSegment(globLevel, url, params); ok { + return handle, true + } + case _PATTERN_HOLDER: + if handle, ok := t.subtrees[i].matchNextSegment(globLevel+1, url, params); ok { + params[t.subtrees[i].wildcards[0]] = segment + return handle, true + } + case _PATTERN_MATCH_ALL: + if handle, ok := t.subtrees[i].matchNextSegment(globLevel+1, url, params); ok { + params["*"+com.ToStr(globLevel)] = segment + return handle, true + } + } + } + + if len(t.leaves) > 0 { + leaf := t.leaves[len(t.leaves)-1] + if leaf.typ == _PATTERN_PATH_EXT { + url = segment + "/" + url + j := strings.LastIndex(url, ".") + if j > -1 { + params[":path"] = url[:j] + params[":ext"] = url[j+1:] + } else { + params[":path"] = url + } + return leaf.handle, true + } else if leaf.typ == _PATTERN_MATCH_ALL { + params["*"] = segment + "/" + url + params["*"+com.ToStr(globLevel)] = segment + "/" + url + return leaf.handle, true + } + } + return nil, false +} + +func (t *Tree) matchNextSegment(globLevel int, url string, params Params) (Handle, bool) { + i := strings.Index(url, "/") + if i == -1 { + return t.matchLeaf(globLevel, url, params) + } + return t.matchSubtree(globLevel, url[:i], url[i+1:], params) +} + +func (t *Tree) Match(url string) (Handle, Params, bool) { + url = strings.TrimPrefix(url, "/") + url = strings.TrimSuffix(url, "/") + params := make(Params) + handle, ok := t.matchNextSegment(0, url, params) + return handle, params, ok +} + +// MatchTest returns true if given URL is matched by given pattern. +func MatchTest(pattern, url string) bool { + t := NewTree() + t.Add(pattern, nil) + _, _, ok := t.Match(url) + return ok +} diff --git a/pkg/api/api.go b/pkg/api/api.go index 839978845ab..2f22617d4a7 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -1,11 +1,11 @@ package api import ( - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/api/dtos" "github.com/grafana/grafana/pkg/middleware" m "github.com/grafana/grafana/pkg/models" - "github.com/macaron-contrib/binding" + "github.com/go-macaron/binding" ) // Register adds http routes diff --git a/pkg/api/common.go b/pkg/api/common.go index 28e95866402..0f56faf85c3 100644 --- a/pkg/api/common.go +++ b/pkg/api/common.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net/http" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/metrics" "github.com/grafana/grafana/pkg/middleware" diff --git a/pkg/api/dataproxy.go b/pkg/api/dataproxy.go index 7193198155f..77742a7a9f3 100644 --- a/pkg/api/dataproxy.go +++ b/pkg/api/dataproxy.go @@ -102,6 +102,6 @@ func ProxyDataSourceRequest(c *middleware.Context) { proxyPath := c.Params("*") proxy := NewReverseProxy(ds, proxyPath, targetUrl) proxy.Transport = dataProxyTransport - proxy.ServeHTTP(c.RW(), c.Req.Request) + proxy.ServeHTTP(c.Resp, c.Req.Request) } } diff --git a/pkg/api/static/static.go b/pkg/api/static/static.go index 43ba6a32b20..7a61c85b4f3 100644 --- a/pkg/api/static/static.go +++ b/pkg/api/static/static.go @@ -24,7 +24,7 @@ import ( "strings" "sync" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" ) var Root string diff --git a/pkg/cmd/web.go b/pkg/cmd/web.go index 1debde98a0e..1d17b29ab42 100644 --- a/pkg/cmd/web.go +++ b/pkg/cmd/web.go @@ -8,7 +8,7 @@ import ( "net/http" "path" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/api" "github.com/grafana/grafana/pkg/api/static" diff --git a/pkg/middleware/auth.go b/pkg/middleware/auth.go index 2497183b356..be3415d990b 100644 --- a/pkg/middleware/auth.go +++ b/pkg/middleware/auth.go @@ -4,7 +4,7 @@ import ( "net/url" "strings" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" diff --git a/pkg/middleware/logger.go b/pkg/middleware/logger.go index 9e0c91f69e8..cc8a49b6dc3 100644 --- a/pkg/middleware/logger.go +++ b/pkg/middleware/logger.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/log" "github.com/grafana/grafana/pkg/setting" ) diff --git a/pkg/middleware/middleware.go b/pkg/middleware/middleware.go index 8704ec5a787..db394bce746 100644 --- a/pkg/middleware/middleware.go +++ b/pkg/middleware/middleware.go @@ -4,7 +4,7 @@ import ( "strconv" "strings" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/apikeygen" diff --git a/pkg/middleware/middleware_test.go b/pkg/middleware/middleware_test.go index 97d369d00cf..309dcfab74e 100644 --- a/pkg/middleware/middleware_test.go +++ b/pkg/middleware/middleware_test.go @@ -7,7 +7,7 @@ import ( "path/filepath" "testing" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/bus" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" diff --git a/pkg/middleware/quota.go b/pkg/middleware/quota.go index f6ba74d77df..8ef58abd7e1 100644 --- a/pkg/middleware/quota.go +++ b/pkg/middleware/quota.go @@ -3,7 +3,7 @@ package middleware import ( "fmt" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/log" m "github.com/grafana/grafana/pkg/models" diff --git a/pkg/middleware/session.go b/pkg/middleware/session.go index bad912aa46f..f58cebcd6f2 100644 --- a/pkg/middleware/session.go +++ b/pkg/middleware/session.go @@ -3,12 +3,12 @@ package middleware import ( "time" - "github.com/Unknwon/macaron" - "github.com/macaron-contrib/session" - _ "github.com/macaron-contrib/session/memcache" - _ "github.com/macaron-contrib/session/mysql" - _ "github.com/macaron-contrib/session/postgres" - _ "github.com/macaron-contrib/session/redis" + "gopkg.in/macaron.v1" + "github.com/go-macaron/session" + _ "github.com/go-macaron/session/memcache" + _ "github.com/go-macaron/session/mysql" + _ "github.com/go-macaron/session/postgres" + _ "github.com/go-macaron/session/redis" ) const ( diff --git a/pkg/middleware/util.go b/pkg/middleware/util.go index 0823e4de4bf..6209b2134dd 100644 --- a/pkg/middleware/util.go +++ b/pkg/middleware/util.go @@ -3,11 +3,12 @@ package middleware import ( "strings" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" + "github.com/go-macaron/gzip" ) func Gziper() macaron.Handler { - macaronGziper := macaron.Gziper() + macaronGziper := gzip.Gziper() return func(ctx *macaron.Context) { requestPath := ctx.Req.URL.RequestURI() diff --git a/pkg/middleware/validate_host.go b/pkg/middleware/validate_host.go index 56e0a8ee35a..4d45c2fad6b 100644 --- a/pkg/middleware/validate_host.go +++ b/pkg/middleware/validate_host.go @@ -3,7 +3,7 @@ package middleware import ( "strings" - "github.com/Unknwon/macaron" + "gopkg.in/macaron.v1" "github.com/grafana/grafana/pkg/setting" ) diff --git a/pkg/setting/setting.go b/pkg/setting/setting.go index 420e06aa6b3..3de0e4b558f 100644 --- a/pkg/setting/setting.go +++ b/pkg/setting/setting.go @@ -16,7 +16,7 @@ import ( "runtime" "strings" - "github.com/macaron-contrib/session" + "github.com/go-macaron/session" "gopkg.in/ini.v1" "github.com/grafana/grafana/pkg/log"