mirror of
https://github.com/ipfs/kubo.git
synced 2025-06-27 07:57:30 +08:00
Revert "godeps: update everything to master"
This reverts commit dc758b1c819cc9dd638e54e81df62e8f8c52a393.
This commit is contained in:
76
Godeps/Godeps.json
generated
76
Godeps/Godeps.json
generated
@ -15,8 +15,8 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.8.2-2-g6ba91e2",
|
||||
"Rev": "6ba91e24c498b49d0363c723e9e2ab2b5b8fd012"
|
||||
"Comment": "v0.7.3-2-g26709e2",
|
||||
"Rev": "26709e2714106fb8ad40b773b711ebce25b78914"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/bren2010/proquint",
|
||||
@ -32,11 +32,11 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cenkalti/backoff",
|
||||
"Rev": "6c45d6bc1e78d94431dff8fc28a99f20bafa355a"
|
||||
"Rev": "9831e1e25c874e0a0601b6dc43641071414eec7a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cheggaaa/pb",
|
||||
"Rev": "d7729fd7ec1372c15b83db39834bf842bf2d69fb"
|
||||
"Rev": "e8c7cc515bfde3e267957a3b110080ceed51354e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codahale/hdrhistogram",
|
||||
@ -52,11 +52,11 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/crowdmob/goamz/aws",
|
||||
"Rev": "3a06871fe9fc0281ca90f3a7d97258d042ed64c0"
|
||||
"Rev": "82345796204222aa56be89cf930c316b1297f906"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/crowdmob/goamz/s3",
|
||||
"Rev": "3a06871fe9fc0281ca90f3a7d97258d042ed64c0"
|
||||
"Rev": "82345796204222aa56be89cf930c316b1297f906"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cryptix/mdns",
|
||||
@ -72,20 +72,20 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fd/go-nat",
|
||||
"Rev": "dcaf50131e4810440bed2cbb6f7f32c4f4cc95dd"
|
||||
"Rev": "50e7633d5f27d81490026a13e5b92d2e42d8c6bb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fzzy/radix/redis",
|
||||
"Comment": "v0.5.6",
|
||||
"Rev": "031cc11e9800a2626ee2ae629655a922b630a07d"
|
||||
"Comment": "v0.5.1",
|
||||
"Rev": "27a863cdffdb0998d13e1e11992b18489aeeaa25"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/io",
|
||||
"Rev": "b9e369e8ffb6773efc654ea13594566404314ee1"
|
||||
"Rev": "0ac967c269268f1af7d9bcc7927ccc9a589b2b36"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gogo/protobuf/proto",
|
||||
"Rev": "b9e369e8ffb6773efc654ea13594566404314ee1"
|
||||
"Rev": "0ac967c269268f1af7d9bcc7927ccc9a589b2b36"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/h2so5/utp",
|
||||
@ -93,32 +93,28 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "995efda3e073b6946b175ed93901d729ad47466a"
|
||||
"Rev": "253b2dc1ca8bae42c3b5b6e53dd2eab1a7551116"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/yamux",
|
||||
"Rev": "b2e55852ddaf823a85c67f798080eb7d08acd71d"
|
||||
"Rev": "9feabe6854fadca1abec9cd3bd2a613fe9a34000"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/howeyc/fsnotify",
|
||||
"Comment": "v0.9.0-14-g4894fe7",
|
||||
"Rev": "4894fe7efedeeef21891033e1cce3b23b9af7ad2"
|
||||
"Comment": "v0.9.0-11-g6b1ef89",
|
||||
"Rev": "6b1ef893dc11e0447abda6da20a5203481878dda"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp",
|
||||
"Rev": "c57ae84388ab59076fd547f1abeab71c2edb0a21"
|
||||
"Rev": "223008361153d7d434c1f0ac990cd3fcae6931f5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/go-update",
|
||||
"Rev": "68f5725818189545231c1fd8694793d45f2fc529"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/gateway",
|
||||
"Rev": "192609c58b8985e645cbe82ddcb28a4362ca0fdc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/go-nat-pmp",
|
||||
"Rev": "46523a463303c6ede3ddfe45bde1c7ed52ebaacd"
|
||||
"Rev": "a45aa3d54aef73b504e15eb71bea0e5565b5e6e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-base58",
|
||||
@ -168,7 +164,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-peerstream",
|
||||
"Rev": "675a5da7e3500d73c2edc84565d6c46b540ad1b4"
|
||||
"Rev": "8d52ed2801410a2af995b4e87660272d11c8a9a4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jbenet/go-random",
|
||||
@ -192,7 +188,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
|
||||
"Rev": "8fef92e41e22a70e700a96b29f066cda30ea24ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/binarydist",
|
||||
@ -200,7 +196,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "bb1103f648f811d2018d4bedcb2d4b2bce34a0f1"
|
||||
"Rev": "82ffc45b1f84ff71bd1cebed8b210118ce3d181e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
@ -225,7 +221,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "315fcfb05d4d46d4354b313d146ef688dda272a9"
|
||||
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
@ -241,43 +237,31 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/whyrusleeping/iptb",
|
||||
"Rev": "fa9bbc437fae1c3a9410e7f1bc3dd02f0449279a"
|
||||
"Rev": "3970c95a864f1a40037f796ff596607ce8ae43be"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/blowfish",
|
||||
"Rev": "ce6bda69189e9f4ff278a5e181691cd695f753ae"
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/sha3",
|
||||
"Rev": "ce6bda69189e9f4ff278a5e181691cd695f753ae"
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "589db58a47224e5786650dac2677b9c302bab6c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html",
|
||||
"Rev": "589db58a47224e5786650dac2677b9c302bab6c2"
|
||||
"Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/iana",
|
||||
"Rev": "589db58a47224e5786650dac2677b9c302bab6c2"
|
||||
"Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv4",
|
||||
"Rev": "589db58a47224e5786650dac2677b9c302bab6c2"
|
||||
"Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/ipv6",
|
||||
"Rev": "589db58a47224e5786650dac2677b9c302bab6c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding",
|
||||
"Rev": "c93e7c9fff19fb9139b5ab04ce041833add0134e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "c93e7c9fff19fb9139b5ab04ce041833add0134e"
|
||||
"Rev": "ff8eb9a34a5cbb9941ffc6f84a19a8014c2646ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/fsnotify.v1",
|
||||
@ -286,8 +270,8 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/natefinch/lumberjack.v2",
|
||||
"Comment": "v1.0-15-g588a21f",
|
||||
"Rev": "588a21fb0fa0ebdfde42670fa214576b6f0f22df"
|
||||
"Comment": "v1.0-12-gd28785c",
|
||||
"Rev": "d28785c2f27cd682d872df46ccd8232843629f54"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/tomb.v1",
|
||||
|
14
Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
14
Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@ -1,17 +1,3 @@
|
||||
# 0.8.2
|
||||
|
||||
logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
logrus: defaults to stderr instead of stdout
|
||||
hooks/sentry: add special field for `*http.Request`
|
||||
formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
formatter/\*: allow configuration of timestamp layout
|
||||
|
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
generated
vendored
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md
generated
vendored
@ -32,7 +32,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
||||
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||
|
||||
@ -183,7 +183,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
@ -211,7 +211,6 @@ func init() {
|
||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||
| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||
|
||||
#### Level logging
|
||||
|
||||
@ -270,7 +269,7 @@ init() {
|
||||
log.SetFormatter(logrus.JSONFormatter)
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(&log.TextFormatter{})
|
||||
log.SetFormatter(logrus.TextFormatter)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
2
Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
generated
vendored
2
Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go
generated
vendored
@ -188,7 +188,6 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
@ -235,7 +234,6 @@ func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
|
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
generated
vendored
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md
generated
vendored
@ -34,13 +34,12 @@ func main() {
|
||||
## Special fields
|
||||
|
||||
Some logrus fields have a special meaning in this hook,
|
||||
these are `server_name`, `logger` and `http_request`.
|
||||
these are server_name and logger.
|
||||
When logs are sent to sentry these fields are treated differently.
|
||||
- `server_name` (also known as hostname) is the name of the server which
|
||||
- server_name (also known as hostname) is the name of the server which
|
||||
is logging the event (hostname.example.com)
|
||||
- `logger` is the part of the application which is logging the event.
|
||||
- logger is the part of the application which is logging the event.
|
||||
In go this usually means setting it to the name of the package.
|
||||
- `http_request` is the in-coming request(*http.Request). The detailed request data are sent to Sentry.
|
||||
|
||||
## Timeout
|
||||
|
||||
|
22
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
generated
vendored
22
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go
generated
vendored
@ -2,7 +2,6 @@ package logrus_sentry
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/getsentry/raven-go"
|
||||
@ -37,22 +36,6 @@ func getAndDel(d logrus.Fields, key string) (string, bool) {
|
||||
return val, true
|
||||
}
|
||||
|
||||
func getAndDelRequest(d logrus.Fields, key string) (*http.Request, bool) {
|
||||
var (
|
||||
ok bool
|
||||
v interface{}
|
||||
req *http.Request
|
||||
)
|
||||
if v, ok = d[key]; !ok {
|
||||
return nil, false
|
||||
}
|
||||
if req, ok = v.(*http.Request); !ok || req == nil {
|
||||
return nil, false
|
||||
}
|
||||
delete(d, key)
|
||||
return req, true
|
||||
}
|
||||
|
||||
// SentryHook delivers logs to a sentry server.
|
||||
type SentryHook struct {
|
||||
// Timeout sets the time to wait for a delivery error from the sentry server.
|
||||
@ -78,7 +61,7 @@ func NewSentryHook(DSN string, levels []logrus.Level) (*SentryHook, error) {
|
||||
// Called when an event should be sent to sentry
|
||||
// Special fields that sentry uses to give more information to the server
|
||||
// are extracted from entry.Data (if they are found)
|
||||
// These fields are: logger, server_name and http_request
|
||||
// These fields are: logger and server_name
|
||||
func (hook *SentryHook) Fire(entry *logrus.Entry) error {
|
||||
packet := &raven.Packet{
|
||||
Message: entry.Message,
|
||||
@ -95,9 +78,6 @@ func (hook *SentryHook) Fire(entry *logrus.Entry) error {
|
||||
if serverName, ok := getAndDel(d, "server_name"); ok {
|
||||
packet.ServerName = serverName
|
||||
}
|
||||
if req, ok := getAndDelRequest(d, "http_request"); ok {
|
||||
packet.Interfaces = append(packet.Interfaces, raven.NewHttp(req))
|
||||
}
|
||||
packet.Extra = map[string]interface{}(d)
|
||||
|
||||
_, errCh := hook.client.Capture(packet, nil)
|
||||
|
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
generated
vendored
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go
generated
vendored
@ -61,12 +61,9 @@ func TestSpecialFields(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
logger.Hooks.Add(hook)
|
||||
|
||||
req, _ := http.NewRequest("GET", "url", nil)
|
||||
logger.WithFields(logrus.Fields{
|
||||
"server_name": server_name,
|
||||
"logger": logger_name,
|
||||
"http_request": req,
|
||||
"server_name": server_name,
|
||||
"logger": logger_name,
|
||||
}).Error(message)
|
||||
|
||||
packet := <-pch
|
||||
|
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
7
Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go
generated
vendored
@ -24,12 +24,11 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = DefaultTimestampFormat
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = DefaultTimestampFormat
|
||||
}
|
||||
|
||||
data["time"] = entry.Time.Format(timestampFormat)
|
||||
data["time"] = entry.Time.Format(f.TimestampFormat)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
|
||||
|
5
Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
generated
vendored
5
Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go
generated
vendored
@ -44,7 +44,7 @@ type Logger struct {
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stderr,
|
||||
Out: os.Stdout,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(levelHooks),
|
||||
Level: InfoLevel,
|
||||
@ -102,7 +102,6 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
@ -149,7 +148,6 @@ func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
@ -196,7 +194,6 @@ func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.Level >= FatalLevel {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
|
4
Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
4
Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go
generated
vendored
@ -3,7 +3,6 @@ package logrus
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@ -70,8 +69,7 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
|
||||
|
||||
if f.TimestampFormat == "" {
|
||||
f.TimestampFormat = DefaultTimestampFormat
|
||||
|
4
Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go
generated
vendored
4
Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go
generated
vendored
@ -74,7 +74,7 @@ const (
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
return &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
@ -82,8 +82,6 @@ func NewExponentialBackOff() *ExponentialBackOff {
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
3
Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go
generated
vendored
3
Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go
generated
vendored
@ -77,6 +77,9 @@ func TestGetElapsedTime(t *testing.T) {
|
||||
func TestMaxElapsedTime(t *testing.T) {
|
||||
var exp = NewExponentialBackOff()
|
||||
exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)}
|
||||
if exp.NextBackOff() != Stop {
|
||||
t.Error("error2")
|
||||
}
|
||||
// Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater
|
||||
// than the max elapsed time.
|
||||
exp.startTime = time.Time{}
|
||||
|
4
Godeps/_workspace/src/github.com/cheggaaa/pb/README.md
generated
vendored
4
Godeps/_workspace/src/github.com/cheggaaa/pb/README.md
generated
vendored
@ -58,10 +58,10 @@ bar.ShowTimeLeft = true
|
||||
bar.ShowSpeed = true
|
||||
|
||||
// sets the width of the progress bar
|
||||
bar.SetWidth(80)
|
||||
bar.SetWith(80)
|
||||
|
||||
// sets the width of the progress bar, but if terminal size smaller will be ignored
|
||||
bar.SetMaxWidth(80)
|
||||
bar.SetMaxWith(80)
|
||||
|
||||
// convert output to readable format (like KB, MB)
|
||||
bar.SetUnits(pb.U_BYTES)
|
||||
|
13
Godeps/_workspace/src/github.com/cheggaaa/pb/format.go
generated
vendored
13
Godeps/_workspace/src/github.com/cheggaaa/pb/format.go
generated
vendored
@ -6,24 +6,21 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Units int
|
||||
|
||||
const (
|
||||
// By default, without type handle
|
||||
U_NO Units = iota
|
||||
U_NO = 0
|
||||
// Handle as b, Kb, Mb, etc
|
||||
U_BYTES
|
||||
U_BYTES = 1
|
||||
)
|
||||
|
||||
// Format integer
|
||||
func Format(i int64, units Units) string {
|
||||
func Format(i int64, units int) string {
|
||||
switch units {
|
||||
case U_BYTES:
|
||||
return FormatBytes(i)
|
||||
default:
|
||||
// by default just convert to string
|
||||
return strconv.FormatInt(i, 10)
|
||||
}
|
||||
// by default just convert to string
|
||||
return strconv.Itoa(int(i))
|
||||
}
|
||||
|
||||
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
|
||||
|
133
Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go
generated
vendored
133
Godeps/_workspace/src/github.com/cheggaaa/pb/pb.go
generated
vendored
@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
@ -25,13 +24,13 @@ var (
|
||||
)
|
||||
|
||||
// Create new progress bar object
|
||||
func New(total int) *ProgressBar {
|
||||
func New(total int) (pb *ProgressBar) {
|
||||
return New64(int64(total))
|
||||
}
|
||||
|
||||
// Create new progress bar object uding int64 as total
|
||||
func New64(total int64) *ProgressBar {
|
||||
pb := &ProgressBar{
|
||||
func New64(total int64) (pb *ProgressBar) {
|
||||
pb = &ProgressBar{
|
||||
Total: total,
|
||||
RefreshRate: DEFAULT_REFRESH_RATE,
|
||||
ShowPercent: true,
|
||||
@ -39,17 +38,18 @@ func New64(total int64) *ProgressBar {
|
||||
ShowBar: true,
|
||||
ShowTimeLeft: true,
|
||||
ShowFinalTime: true,
|
||||
Units: U_NO,
|
||||
ManualUpdate: false,
|
||||
isFinish: make(chan struct{}),
|
||||
currentValue: -1,
|
||||
}
|
||||
return pb.Format(FORMAT)
|
||||
pb.Format(FORMAT)
|
||||
return
|
||||
}
|
||||
|
||||
// Create new object and start
|
||||
func StartNew(total int) *ProgressBar {
|
||||
return New(total).Start()
|
||||
func StartNew(total int) (pb *ProgressBar) {
|
||||
pb = New(total)
|
||||
pb.Start()
|
||||
return
|
||||
}
|
||||
|
||||
// Callback for custom output
|
||||
@ -71,16 +71,13 @@ type ProgressBar struct {
|
||||
Output io.Writer
|
||||
Callback Callback
|
||||
NotPrint bool
|
||||
Units Units
|
||||
Units int
|
||||
Width int
|
||||
ForceWidth bool
|
||||
ManualUpdate bool
|
||||
|
||||
finishOnce sync.Once //Guards isFinish
|
||||
isFinish chan struct{}
|
||||
|
||||
isFinish bool
|
||||
startTime time.Time
|
||||
startValue int64
|
||||
currentValue int64
|
||||
|
||||
prefix, postfix string
|
||||
@ -93,18 +90,16 @@ type ProgressBar struct {
|
||||
}
|
||||
|
||||
// Start print
|
||||
func (pb *ProgressBar) Start() *ProgressBar {
|
||||
func (pb *ProgressBar) Start() {
|
||||
pb.startTime = time.Now()
|
||||
pb.startValue = pb.current
|
||||
if pb.Total == 0 {
|
||||
pb.ShowBar = false
|
||||
pb.ShowTimeLeft = false
|
||||
pb.ShowPercent = false
|
||||
}
|
||||
}
|
||||
if !pb.ManualUpdate {
|
||||
go pb.writer()
|
||||
}
|
||||
return pb
|
||||
}
|
||||
|
||||
// Increment current value
|
||||
@ -113,14 +108,8 @@ func (pb *ProgressBar) Increment() int {
|
||||
}
|
||||
|
||||
// Set current value
|
||||
func (pb *ProgressBar) Set(current int) *ProgressBar {
|
||||
return pb.Set64(int64(current))
|
||||
}
|
||||
|
||||
// Set64 sets the current value as int64
|
||||
func (pb *ProgressBar) Set64(current int64) *ProgressBar {
|
||||
atomic.StoreInt64(&pb.current, current)
|
||||
return pb
|
||||
func (pb *ProgressBar) Set(current int) {
|
||||
atomic.StoreInt64(&pb.current, int64(current))
|
||||
}
|
||||
|
||||
// Add to current value
|
||||
@ -133,69 +122,75 @@ func (pb *ProgressBar) Add64(add int64) int64 {
|
||||
}
|
||||
|
||||
// Set prefix string
|
||||
func (pb *ProgressBar) Prefix(prefix string) *ProgressBar {
|
||||
func (pb *ProgressBar) Prefix(prefix string) (bar *ProgressBar) {
|
||||
pb.prefix = prefix
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set postfix string
|
||||
func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
|
||||
func (pb *ProgressBar) Postfix(postfix string) (bar *ProgressBar) {
|
||||
pb.postfix = postfix
|
||||
return pb
|
||||
}
|
||||
|
||||
// Set custom format for bar
|
||||
// Example: bar.Format("[=>_]")
|
||||
func (pb *ProgressBar) Format(format string) *ProgressBar {
|
||||
func (pb *ProgressBar) Format(format string) (bar *ProgressBar) {
|
||||
bar = pb
|
||||
formatEntries := strings.Split(format, "")
|
||||
if len(formatEntries) == 5 {
|
||||
pb.BarStart = formatEntries[0]
|
||||
pb.BarEnd = formatEntries[4]
|
||||
pb.Empty = formatEntries[3]
|
||||
pb.Current = formatEntries[1]
|
||||
pb.CurrentN = formatEntries[2]
|
||||
if len(formatEntries) != 5 {
|
||||
return
|
||||
}
|
||||
return pb
|
||||
pb.BarStart = formatEntries[0]
|
||||
pb.BarEnd = formatEntries[4]
|
||||
pb.Empty = formatEntries[3]
|
||||
pb.Current = formatEntries[1]
|
||||
pb.CurrentN = formatEntries[2]
|
||||
return
|
||||
}
|
||||
|
||||
// Set bar refresh rate
|
||||
func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar {
|
||||
func (pb *ProgressBar) SetRefreshRate(rate time.Duration) (bar *ProgressBar) {
|
||||
bar = pb
|
||||
pb.RefreshRate = rate
|
||||
return pb
|
||||
return
|
||||
}
|
||||
|
||||
// Set units
|
||||
// bar.SetUnits(U_NO) - by default
|
||||
// bar.SetUnits(U_BYTES) - for Mb, Kb, etc
|
||||
func (pb *ProgressBar) SetUnits(units Units) *ProgressBar {
|
||||
pb.Units = units
|
||||
return pb
|
||||
func (pb *ProgressBar) SetUnits(units int) (bar *ProgressBar) {
|
||||
bar = pb
|
||||
switch units {
|
||||
case U_NO, U_BYTES:
|
||||
pb.Units = units
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Set max width, if width is bigger than terminal width, will be ignored
|
||||
func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar {
|
||||
func (pb *ProgressBar) SetMaxWidth(width int) (bar *ProgressBar) {
|
||||
bar = pb
|
||||
pb.Width = width
|
||||
pb.ForceWidth = false
|
||||
return pb
|
||||
return
|
||||
}
|
||||
|
||||
// Set bar width
|
||||
func (pb *ProgressBar) SetWidth(width int) *ProgressBar {
|
||||
func (pb *ProgressBar) SetWidth(width int) (bar *ProgressBar) {
|
||||
bar = pb
|
||||
pb.Width = width
|
||||
pb.ForceWidth = true
|
||||
return pb
|
||||
return
|
||||
}
|
||||
|
||||
// End print
|
||||
func (pb *ProgressBar) Finish() {
|
||||
//Protect multiple calls
|
||||
pb.finishOnce.Do(func() {
|
||||
close(pb.isFinish)
|
||||
pb.write(atomic.LoadInt64(&pb.current))
|
||||
if !pb.NotPrint {
|
||||
fmt.Println()
|
||||
}
|
||||
})
|
||||
pb.isFinish = true
|
||||
pb.write(atomic.LoadInt64(&pb.current))
|
||||
if !pb.NotPrint {
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// End print and write string 'str'
|
||||
@ -233,7 +228,7 @@ func (pb *ProgressBar) write(current int64) {
|
||||
percent := float64(current) / (float64(pb.Total) / float64(100))
|
||||
percentBox = fmt.Sprintf(" %#.02f %% ", percent)
|
||||
}
|
||||
|
||||
|
||||
// counters
|
||||
if pb.ShowCounters {
|
||||
if pb.Total > 0 {
|
||||
@ -245,26 +240,22 @@ func (pb *ProgressBar) write(current int64) {
|
||||
|
||||
// time left
|
||||
fromStart := time.Now().Sub(pb.startTime)
|
||||
currentFromStart := current - pb.startValue
|
||||
select {
|
||||
case <-pb.isFinish:
|
||||
if pb.isFinish {
|
||||
if pb.ShowFinalTime {
|
||||
left := (fromStart / time.Second) * time.Second
|
||||
timeLeftBox = left.String()
|
||||
}
|
||||
default:
|
||||
if pb.ShowTimeLeft && currentFromStart > 0 {
|
||||
perEntry := fromStart / time.Duration(currentFromStart)
|
||||
left := time.Duration(pb.Total-currentFromStart) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
timeLeftBox = left.String()
|
||||
}
|
||||
} else if pb.ShowTimeLeft && current > 0 {
|
||||
perEntry := fromStart / time.Duration(current)
|
||||
left := time.Duration(pb.Total-current) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
timeLeftBox = left.String()
|
||||
}
|
||||
|
||||
// speed
|
||||
if pb.ShowSpeed && currentFromStart > 0 {
|
||||
if pb.ShowSpeed && current > 0 {
|
||||
fromStart := time.Now().Sub(pb.startTime)
|
||||
speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second))
|
||||
speed := float64(current) / (float64(fromStart) / float64(time.Second))
|
||||
speedBox = Format(int64(speed), pb.Units) + "/s "
|
||||
}
|
||||
|
||||
@ -333,14 +324,12 @@ func (pb *ProgressBar) Update() {
|
||||
|
||||
// Internal loop for writing progressbar
|
||||
func (pb *ProgressBar) writer() {
|
||||
pb.Update()
|
||||
for {
|
||||
select {
|
||||
case <-pb.isFinish:
|
||||
return
|
||||
case <-time.After(pb.RefreshRate):
|
||||
pb.Update()
|
||||
if pb.isFinish {
|
||||
break
|
||||
}
|
||||
pb.Update()
|
||||
time.Sleep(pb.RefreshRate)
|
||||
}
|
||||
}
|
||||
|
||||
|
2
Godeps/_workspace/src/github.com/cheggaaa/pb/pb_nix.go
generated
vendored
2
Godeps/_workspace/src/github.com/cheggaaa/pb/pb_nix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux darwin freebsd netbsd openbsd
|
||||
// +build linux darwin freebsd openbsd
|
||||
|
||||
package pb
|
||||
|
||||
|
7
Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go
generated
vendored
7
Godeps/_workspace/src/github.com/cheggaaa/pb/pb_test.go
generated
vendored
@ -28,10 +28,3 @@ func Test_Width(t *testing.T) {
|
||||
bar.Increment()
|
||||
bar.Finish()
|
||||
}
|
||||
|
||||
func Test_MultipleFinish(t *testing.T) {
|
||||
bar := New(5000)
|
||||
bar.Add(2000)
|
||||
bar.Finish()
|
||||
bar.Finish()
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/cheggaaa/pb/pb_x.go
generated
vendored
2
Godeps/_workspace/src/github.com/cheggaaa/pb/pb_x.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux darwin freebsd netbsd openbsd solaris
|
||||
// +build linux darwin freebsd openbsd solaris
|
||||
|
||||
package pb
|
||||
|
||||
|
2
Godeps/_workspace/src/github.com/crowdmob/goamz/aws/regions.go
generated
vendored
2
Godeps/_workspace/src/github.com/crowdmob/goamz/aws/regions.go
generated
vendored
@ -63,7 +63,7 @@ var USWest = Region{
|
||||
ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-west-1.amazonaws.com",
|
||||
"https://elasticache.us-west-1.amazonaws.com",
|
||||
|
19
Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3.go
generated
vendored
19
Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3.go
generated
vendored
@ -806,7 +806,7 @@ func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, par
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if b.S3.Auth.Token() != "" && b.S3.Signature == aws.V2Signature {
|
||||
if b.S3.Auth.Token() != "" {
|
||||
return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0])
|
||||
} else {
|
||||
return u.String()
|
||||
@ -1041,10 +1041,8 @@ func (s3 *S3) prepare(req *request) error {
|
||||
}
|
||||
}
|
||||
|
||||
if s3.Signature == aws.V2Signature && s3.Auth.Token() != "" {
|
||||
if s3.Auth.Token() != "" {
|
||||
req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()}
|
||||
} else if s3.Auth.Token() != "" {
|
||||
req.params.Set("X-Amz-Security-Token", s3.Auth.Token())
|
||||
}
|
||||
|
||||
if s3.Signature == aws.V2Signature {
|
||||
@ -1235,19 +1233,6 @@ func shouldRetry(err error) bool {
|
||||
case "read", "write":
|
||||
return true
|
||||
}
|
||||
case *url.Error:
|
||||
// url.Error can be returned either by net/url if a URL cannot be
|
||||
// parsed, or by net/http if the response is closed before the headers
|
||||
// are received or parsed correctly. In that later case, e.Op is set to
|
||||
// the HTTP method name with the first letter uppercased. We don't want
|
||||
// to retry on POST operations, since those are not idempotent, all the
|
||||
// other ones should be safe to retry.
|
||||
switch e.Op {
|
||||
case "Get", "Put", "Delete", "Head":
|
||||
return shouldRetry(e.Err)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
case *Error:
|
||||
switch e.Code {
|
||||
case "InternalError", "NoSuchUpload", "NoSuchBucket":
|
||||
|
67
Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3test/server.go
generated
vendored
67
Godeps/_workspace/src/github.com/crowdmob/goamz/s3/s3test/server.go
generated
vendored
@ -432,74 +432,11 @@ func (r bucketResource) put(a *action) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r bucketResource) post(a *action) interface{} {
|
||||
if _, multiDel := a.req.URL.Query()["delete"]; multiDel {
|
||||
return r.multiDel(a)
|
||||
}
|
||||
|
||||
fatalf(400, "Method", "bucket operation not supported")
|
||||
func (bucketResource) post(a *action) interface{} {
|
||||
fatalf(400, "Method", "bucket POST method not available")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b bucketResource) multiDel(a *action) interface{} {
|
||||
type multiDelRequestObject struct {
|
||||
Key string
|
||||
VersionId string
|
||||
}
|
||||
|
||||
type multiDelRequest struct {
|
||||
Quiet bool
|
||||
Object []*multiDelRequestObject
|
||||
}
|
||||
|
||||
type multiDelDelete struct {
|
||||
XMLName struct{} `xml:"Deleted"`
|
||||
Key string
|
||||
}
|
||||
|
||||
type multiDelError struct {
|
||||
XMLName struct{} `xml:"Error"`
|
||||
Key string
|
||||
Code string
|
||||
Message string
|
||||
}
|
||||
|
||||
type multiDelResult struct {
|
||||
XMLName struct{} `xml:"DeleteResult"`
|
||||
Deleted []*multiDelDelete
|
||||
Error []*multiDelError
|
||||
}
|
||||
|
||||
req := &multiDelRequest{}
|
||||
|
||||
if err := xml.NewDecoder(a.req.Body).Decode(req); err != nil {
|
||||
fatalf(400, "InvalidRequest", err.Error())
|
||||
}
|
||||
|
||||
res := &multiDelResult{
|
||||
Deleted: []*multiDelDelete{},
|
||||
Error: []*multiDelError{},
|
||||
}
|
||||
|
||||
for _, o := range req.Object {
|
||||
if _, exists := b.bucket.objects[o.Key]; exists {
|
||||
delete(b.bucket.objects, o.Key)
|
||||
|
||||
res.Deleted = append(res.Deleted, &multiDelDelete{
|
||||
Key: o.Key,
|
||||
})
|
||||
} else {
|
||||
res.Error = append(res.Error, &multiDelError{
|
||||
Key: o.Key,
|
||||
Code: "AccessDenied",
|
||||
Message: "Access Denied",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// validBucketName returns whether name is a valid bucket name.
|
||||
// Here are the rules, from:
|
||||
// http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
|
||||
|
91
Godeps/_workspace/src/github.com/fd/go-nat/natpmp.go
generated
vendored
91
Godeps/_workspace/src/github.com/fd/go-nat/natpmp.go
generated
vendored
@ -4,19 +4,82 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jackpal/gateway"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jackpal/go-nat-pmp"
|
||||
)
|
||||
|
||||
var (
|
||||
_ NAT = (*natpmpNAT)(nil)
|
||||
_ NAT = (*natpmp_NAT)(nil)
|
||||
)
|
||||
|
||||
func discoverNATPMP() <-chan NAT {
|
||||
res := make(chan NAT, 1)
|
||||
func natpmp_PotentialGateways() ([]net.IP, error) {
|
||||
_, ipNet_10, err := net.ParseCIDR("10.0.0.0/8")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ip, err := gateway.DiscoverGateway()
|
||||
if err == nil {
|
||||
_, ipNet_172_16, err := net.ParseCIDR("172.16.0.0/12")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, ipNet_192_168, err := net.ParseCIDR("192.168.0.0/16")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ips []net.IP
|
||||
|
||||
for _, iface := range ifaces {
|
||||
addrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, addr := range addrs {
|
||||
switch x := addr.(type) {
|
||||
case *net.IPNet:
|
||||
var ipNet *net.IPNet
|
||||
if ipNet_10.Contains(x.IP) {
|
||||
ipNet = ipNet_10
|
||||
} else if ipNet_172_16.Contains(x.IP) {
|
||||
ipNet = ipNet_172_16
|
||||
} else if ipNet_192_168.Contains(x.IP) {
|
||||
ipNet = ipNet_192_168
|
||||
}
|
||||
|
||||
if ipNet != nil {
|
||||
ip := x.IP.Mask(x.Mask)
|
||||
ip = ip.To4()
|
||||
if ip != nil {
|
||||
ip[3] = ip[3] | 0x01
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
return nil, ErrNoNATFound
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func discoverNATPMP() <-chan NAT {
|
||||
ips, err := natpmp_PotentialGateways()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res := make(chan NAT, len(ips))
|
||||
|
||||
for _, ip := range ips {
|
||||
go discoverNATPMPWithAddr(res, ip)
|
||||
}
|
||||
|
||||
@ -30,20 +93,20 @@ func discoverNATPMPWithAddr(c chan NAT, ip net.IP) {
|
||||
return
|
||||
}
|
||||
|
||||
c <- &natpmpNAT{client, ip, make(map[int]int)}
|
||||
c <- &natpmp_NAT{client, ip, make(map[int]int)}
|
||||
}
|
||||
|
||||
type natpmpNAT struct {
|
||||
type natpmp_NAT struct {
|
||||
c *natpmp.Client
|
||||
gateway net.IP
|
||||
ports map[int]int
|
||||
}
|
||||
|
||||
func (n *natpmpNAT) GetDeviceAddress() (addr net.IP, err error) {
|
||||
func (n *natpmp_NAT) GetDeviceAddress() (addr net.IP, err error) {
|
||||
return n.gateway, nil
|
||||
}
|
||||
|
||||
func (n *natpmpNAT) GetInternalAddress() (addr net.IP, err error) {
|
||||
func (n *natpmp_NAT) GetInternalAddress() (addr net.IP, err error) {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -68,7 +131,7 @@ func (n *natpmpNAT) GetInternalAddress() (addr net.IP, err error) {
|
||||
return nil, ErrNoInternalAddress
|
||||
}
|
||||
|
||||
func (n *natpmpNAT) GetExternalAddress() (addr net.IP, err error) {
|
||||
func (n *natpmp_NAT) GetExternalAddress() (addr net.IP, err error) {
|
||||
res, err := n.c.GetExternalAddress()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -78,7 +141,7 @@ func (n *natpmpNAT) GetExternalAddress() (addr net.IP, err error) {
|
||||
return net.IPv4(d[0], d[1], d[2], d[3]), nil
|
||||
}
|
||||
|
||||
func (n *natpmpNAT) AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
|
||||
func (n *natpmp_NAT) AddPortMapping(protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
@ -105,11 +168,11 @@ func (n *natpmpNAT) AddPortMapping(protocol string, internalPort int, descriptio
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func (n *natpmpNAT) DeletePortMapping(protocol string, internalPort int) (err error) {
|
||||
func (n *natpmp_NAT) DeletePortMapping(protocol string, internalPort int) (err error) {
|
||||
delete(n.ports, internalPort)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *natpmpNAT) Type() string {
|
||||
func (n *natpmp_NAT) Type() string {
|
||||
return "NAT-PMP"
|
||||
}
|
||||
|
8
Godeps/_workspace/src/github.com/fzzy/radix/redis/client.go
generated
vendored
8
Godeps/_workspace/src/github.com/fzzy/radix/redis/client.go
generated
vendored
@ -30,7 +30,6 @@ type Client struct {
|
||||
reader *bufio.Reader
|
||||
pending []*request
|
||||
completed []*Reply
|
||||
writeBuf []byte
|
||||
}
|
||||
|
||||
// request describes a client's request to the redis server
|
||||
@ -43,7 +42,7 @@ type request struct {
|
||||
// used as the read/write timeout when communicating with redis
|
||||
func DialTimeout(network, addr string, timeout time.Duration) (*Client, error) {
|
||||
// establish a connection
|
||||
conn, err := net.DialTimeout(network, addr, timeout)
|
||||
conn, err := net.Dial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -52,7 +51,6 @@ func DialTimeout(network, addr string, timeout time.Duration) (*Client, error) {
|
||||
c.Conn = conn
|
||||
c.timeout = timeout
|
||||
c.reader = bufio.NewReaderSize(conn, bufSize)
|
||||
c.writeBuf = make([]byte, 0, 1024)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@ -154,9 +152,7 @@ func (c *Client) writeRequest(requests ...*request) error {
|
||||
req := make([]interface{}, 0, len(requests[i].args)+1)
|
||||
req = append(req, requests[i].cmd)
|
||||
req = append(req, requests[i].args...)
|
||||
buf := resp.AppendArbitraryAsFlattenedStrings(c.writeBuf[:0], req)
|
||||
|
||||
_, err := c.Conn.Write(buf)
|
||||
err := resp.WriteArbitraryAsFlattenedStrings(c.Conn, req)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return err
|
||||
|
3
Godeps/_workspace/src/github.com/fzzy/radix/redis/client_test.go
generated
vendored
3
Godeps/_workspace/src/github.com/fzzy/radix/redis/client_test.go
generated
vendored
@ -3,10 +3,9 @@ package redis
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
. "testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func dial(t *T) *Client {
|
||||
|
23
Godeps/_workspace/src/github.com/fzzy/radix/redis/reply.go
generated
vendored
23
Godeps/_workspace/src/github.com/fzzy/radix/redis/reply.go
generated
vendored
@ -124,29 +124,6 @@ func (r *Reply) Int() (int, error) {
|
||||
return int(i64), nil
|
||||
}
|
||||
|
||||
// Float64 returns the reply value as a float64 or an error,
|
||||
// if the reply type is not BulkReply or the reply type
|
||||
// BulkReply could not be parsed to an float64.
|
||||
func (r *Reply) Float64() (float64, error) {
|
||||
if r.Type == ErrorReply {
|
||||
return 0, r.Err
|
||||
}
|
||||
if r.Type == BulkReply {
|
||||
s, err := r.Str()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f64, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return 0, errors.New("failed to parse float value from string value")
|
||||
} else {
|
||||
return f64, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New("float value is not available for this reply type")
|
||||
}
|
||||
|
||||
// Bool returns false, if the reply value equals to 0 or "0", otherwise true; or
|
||||
// an error, if the reply type is not IntegerReply or BulkReply.
|
||||
func (r *Reply) Bool() (bool, error) {
|
||||
|
22
Godeps/_workspace/src/github.com/fzzy/radix/redis/reply_test.go
generated
vendored
22
Godeps/_workspace/src/github.com/fzzy/radix/redis/reply_test.go
generated
vendored
@ -1,9 +1,8 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
. "testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
. "testing"
|
||||
)
|
||||
|
||||
func TestStr(t *T) {
|
||||
@ -81,25 +80,6 @@ func TestBool(t *T) {
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestFloat64(t *T) {
|
||||
r := &Reply{Type: ErrorReply, Err: LoadingError}
|
||||
_, err := r.Float64()
|
||||
assert.Equal(t, LoadingError, err)
|
||||
|
||||
r = &Reply{Type: IntegerReply, int: 5}
|
||||
_, err = r.Float64()
|
||||
assert.NotNil(t, err)
|
||||
|
||||
r = &Reply{Type: BulkReply, buf: []byte("5.1")}
|
||||
b, err := r.Float64()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, float64(5.1), b)
|
||||
|
||||
r = &Reply{Type: BulkReply, buf: []byte("foo")}
|
||||
_, err = r.Float64()
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestList(t *T) {
|
||||
r := &Reply{Type: MultiReply}
|
||||
r.Elems = make([]*Reply, 3)
|
||||
|
255
Godeps/_workspace/src/github.com/fzzy/radix/redis/resp/resp.go
generated
vendored
255
Godeps/_workspace/src/github.com/fzzy/radix/redis/resp/resp.go
generated
vendored
@ -29,12 +29,12 @@ const (
|
||||
Nil
|
||||
)
|
||||
|
||||
var (
|
||||
simpleStrPrefix = []byte{'+'}
|
||||
errPrefix = []byte{'-'}
|
||||
intPrefix = []byte{':'}
|
||||
bulkStrPrefix = []byte{'$'}
|
||||
arrayPrefix = []byte{'*'}
|
||||
const (
|
||||
simpleStrPrefix = '+'
|
||||
errPrefix = '-'
|
||||
intPrefix = ':'
|
||||
bulkStrPrefix = '$'
|
||||
arrayPrefix = '*'
|
||||
)
|
||||
|
||||
// Parse errors
|
||||
@ -65,7 +65,7 @@ func NewMessage(b []byte) (*Message, error) {
|
||||
// resp.WriteArbitrary(w, []interface{}{bar, baz})
|
||||
//
|
||||
func NewSimpleString(s string) *Message {
|
||||
b := append(make([]byte, 0, len(s)+3), '+')
|
||||
b := append(make([]byte, 0, len(s) + 3), '+')
|
||||
b = append(b, []byte(s)...)
|
||||
b = append(b, '\r', '\n')
|
||||
return &Message{
|
||||
@ -88,15 +88,15 @@ func bufioReadMessage(r *bufio.Reader) (*Message, error) {
|
||||
return nil, err
|
||||
}
|
||||
switch b[0] {
|
||||
case simpleStrPrefix[0]:
|
||||
case simpleStrPrefix:
|
||||
return readSimpleStr(r)
|
||||
case errPrefix[0]:
|
||||
case errPrefix:
|
||||
return readError(r)
|
||||
case intPrefix[0]:
|
||||
case intPrefix:
|
||||
return readInt(r)
|
||||
case bulkStrPrefix[0]:
|
||||
case bulkStrPrefix:
|
||||
return readBulkStr(r)
|
||||
case arrayPrefix[0]:
|
||||
case arrayPrefix:
|
||||
return readArray(r)
|
||||
default:
|
||||
return nil, badType
|
||||
@ -248,14 +248,6 @@ func (m *Message) Array() ([]*Message, error) {
|
||||
return nil, badType
|
||||
}
|
||||
|
||||
func writeBytesHelper(w io.Writer, b []byte, lastErr error) error {
|
||||
if lastErr != nil {
|
||||
return lastErr
|
||||
}
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteMessage takes in the given Message and writes its encoded form to the
|
||||
// given io.Writer
|
||||
func WriteMessage(w io.Writer, m *Message) error {
|
||||
@ -263,54 +255,22 @@ func WriteMessage(w io.Writer, m *Message) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// AppendArbitrary takes in any primitive golang value, or Message, and appends
|
||||
// its encoded form to the given buffer, inferring types where appropriate. It
|
||||
// then returns the appended buffer
|
||||
func AppendArbitrary(buf []byte, m interface{}) []byte {
|
||||
return appendArb(buf, m, false, false)
|
||||
}
|
||||
|
||||
// WriteArbitrary takes in any primitive golang value, or Message, and writes
|
||||
// its encoded form to the given io.Writer, inferring types where appropriate.
|
||||
func WriteArbitrary(w io.Writer, m interface{}) error {
|
||||
buf := AppendArbitrary(make([]byte, 0, 1024), m)
|
||||
_, err := w.Write(buf)
|
||||
b := format(m, false)
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
// AppendArbitraryAsString is similar to AppendArbitraryAsFlattenedString except
|
||||
// that it won't flatten any embedded arrays.
|
||||
func AppendArbitraryAsStrings(buf []byte, m interface{}) []byte {
|
||||
return appendArb(buf, m, true, false)
|
||||
}
|
||||
|
||||
// WriteArbitraryAsString is similar to WriteArbitraryAsFlattenedString except
|
||||
// that it won't flatten any embedded arrays.
|
||||
func WriteArbitraryAsString(w io.Writer, m interface{}) error {
|
||||
buf := AppendArbitraryAsStrings(make([]byte, 0, 1024), m)
|
||||
_, err := w.Write(buf)
|
||||
b := format(m, true)
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
// AppendArbitraryAsFlattenedStrings is similar to AppendArbitrary except that
|
||||
// it will encode all types except Array as a BulkStr, converting the argument
|
||||
// into a string first as necessary. It will also flatten any embedded arrays
|
||||
// into a single long array. This is useful because commands to a redis server
|
||||
// must be given as an array of bulk strings. If the argument isn't already in a
|
||||
// slice or map it will be wrapped so that it is written as an Array of size
|
||||
// one.
|
||||
//
|
||||
// Note that if a Message type is found it will *not* be encoded to a BulkStr,
|
||||
// but will simply be passed through as whatever type it already represents.
|
||||
func AppendArbitraryAsFlattenedStrings(buf []byte, m interface{}) []byte {
|
||||
fl := flattenedLength(m)
|
||||
buf = append(buf, arrayPrefix...)
|
||||
buf = strconv.AppendInt(buf, int64(fl), 10)
|
||||
buf = append(buf, delim...)
|
||||
|
||||
return appendArb(buf, m, true, true)
|
||||
}
|
||||
|
||||
// WriteArbitraryAsFlattenedStrings is similar to WriteArbitrary except that it
|
||||
// will encode all types except Array as a BulkStr, converting the argument into
|
||||
// a string first as necessary. It will also flatten any embedded arrays into a
|
||||
@ -321,85 +281,76 @@ func AppendArbitraryAsFlattenedStrings(buf []byte, m interface{}) []byte {
|
||||
// Note that if a Message type is found it will *not* be encoded to a BulkStr,
|
||||
// but will simply be passed through as whatever type it already represents.
|
||||
func WriteArbitraryAsFlattenedStrings(w io.Writer, m interface{}) error {
|
||||
buf := AppendArbitraryAsFlattenedStrings(make([]byte, 0, 1024), m)
|
||||
_, err := w.Write(buf)
|
||||
return err
|
||||
fm := flatten(m)
|
||||
return WriteArbitraryAsString(w, fm)
|
||||
}
|
||||
|
||||
func appendArb(buf []byte, m interface{}, forceString, flattened bool) []byte {
|
||||
func format(m interface{}, forceString bool) []byte {
|
||||
switch mt := m.(type) {
|
||||
case []byte:
|
||||
return appendStr(buf, mt)
|
||||
return formatStr(mt)
|
||||
case string:
|
||||
return appendStr(buf, []byte(mt))
|
||||
return formatStr([]byte(mt))
|
||||
case bool:
|
||||
if mt {
|
||||
return appendStr(buf, []byte("1"))
|
||||
return formatStr([]byte("1"))
|
||||
} else {
|
||||
return appendStr(buf, []byte("0"))
|
||||
return formatStr([]byte("0"))
|
||||
}
|
||||
case nil:
|
||||
if forceString {
|
||||
return appendStr(buf, []byte{})
|
||||
return formatStr([]byte{})
|
||||
} else {
|
||||
return appendNil(buf)
|
||||
return formatNil()
|
||||
}
|
||||
case int:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case int8:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case int16:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case int32:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case int64:
|
||||
return appendInt(buf, mt, forceString)
|
||||
return formatInt(mt, forceString)
|
||||
case uint:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case uint8:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case uint16:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case uint32:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case uint64:
|
||||
return appendInt(buf, int64(mt), forceString)
|
||||
return formatInt(int64(mt), forceString)
|
||||
case float32:
|
||||
ft := strconv.FormatFloat(float64(mt), 'f', -1, 32)
|
||||
return appendStr(buf, []byte(ft))
|
||||
return formatStr([]byte(ft))
|
||||
case float64:
|
||||
ft := strconv.FormatFloat(mt, 'f', -1, 64)
|
||||
return appendStr(buf, []byte(ft))
|
||||
return formatStr([]byte(ft))
|
||||
case error:
|
||||
if forceString {
|
||||
return appendStr(buf, []byte(mt.Error()))
|
||||
return formatStr([]byte(mt.Error()))
|
||||
} else {
|
||||
return appendErr(buf, mt)
|
||||
return formatErr(mt)
|
||||
}
|
||||
|
||||
// For the following cases, where we are writing an array, we only append the
|
||||
// array header (a new array) if flattened is false, otherwise we just append
|
||||
// each element inline and assume the array header has already been written
|
||||
|
||||
// We duplicate the below code here a bit, since this is the common case and
|
||||
// it'd be better to not get the reflect package involved here
|
||||
case []interface{}:
|
||||
l := len(mt)
|
||||
|
||||
if !flattened {
|
||||
buf = append(buf, arrayPrefix...)
|
||||
buf = strconv.AppendInt(buf, int64(l), 10)
|
||||
buf = append(buf, delim...)
|
||||
}
|
||||
|
||||
b := make([]byte, 0, l*1024)
|
||||
b = append(b, '*')
|
||||
b = append(b, []byte(strconv.Itoa(l))...)
|
||||
b = append(b, []byte("\r\n")...)
|
||||
for i := 0; i < l; i++ {
|
||||
buf = appendArb(buf, mt[i], forceString, flattened)
|
||||
b = append(b, format(mt[i], forceString)...)
|
||||
}
|
||||
return buf
|
||||
return b
|
||||
|
||||
case *Message:
|
||||
buf = append(buf, mt.raw...)
|
||||
return buf
|
||||
return mt.raw
|
||||
|
||||
default:
|
||||
// Fallback to reflect-based.
|
||||
@ -407,119 +358,109 @@ func appendArb(buf []byte, m interface{}, forceString, flattened bool) []byte {
|
||||
case reflect.Slice:
|
||||
rm := reflect.ValueOf(mt)
|
||||
l := rm.Len()
|
||||
|
||||
if !flattened {
|
||||
buf = append(buf, arrayPrefix...)
|
||||
buf = strconv.AppendInt(buf, int64(l), 10)
|
||||
buf = append(buf, delim...)
|
||||
}
|
||||
|
||||
b := make([]byte, 0, l*1024)
|
||||
b = append(b, '*')
|
||||
b = append(b, []byte(strconv.Itoa(l))...)
|
||||
b = append(b, []byte("\r\n")...)
|
||||
for i := 0; i < l; i++ {
|
||||
vv := rm.Index(i).Interface()
|
||||
buf = appendArb(buf, vv, forceString, flattened)
|
||||
b = append(b, format(vv, forceString)...)
|
||||
}
|
||||
return buf
|
||||
|
||||
return b
|
||||
case reflect.Map:
|
||||
rm := reflect.ValueOf(mt)
|
||||
l := rm.Len() * 2
|
||||
|
||||
if !flattened {
|
||||
buf = append(buf, arrayPrefix...)
|
||||
buf = strconv.AppendInt(buf, int64(l), 10)
|
||||
buf = append(buf, delim...)
|
||||
}
|
||||
|
||||
b := make([]byte, 0, l*1024)
|
||||
b = append(b, '*')
|
||||
b = append(b, []byte(strconv.Itoa(l))...)
|
||||
b = append(b, []byte("\r\n")...)
|
||||
keys := rm.MapKeys()
|
||||
for _, k := range keys {
|
||||
kv := k.Interface()
|
||||
vv := rm.MapIndex(k).Interface()
|
||||
buf = appendArb(buf, kv, forceString, flattened)
|
||||
buf = appendArb(buf, vv, forceString, flattened)
|
||||
b = append(b, format(kv, forceString)...)
|
||||
b = append(b, format(vv, forceString)...)
|
||||
}
|
||||
return buf
|
||||
|
||||
return b
|
||||
default:
|
||||
return appendStr(buf, []byte(fmt.Sprint(m)))
|
||||
return formatStr([]byte(fmt.Sprint(m)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var typeOfBytes = reflect.TypeOf([]byte(nil))
|
||||
|
||||
func flattenedLength(m interface{}) int {
|
||||
func flatten(m interface{}) []interface{} {
|
||||
t := reflect.TypeOf(m)
|
||||
|
||||
// If it's a byte-slice we don't want to flatten
|
||||
if t == typeOfBytes {
|
||||
return 1
|
||||
return []interface{}{m}
|
||||
}
|
||||
|
||||
total := 0
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
rm := reflect.ValueOf(m)
|
||||
l := rm.Len()
|
||||
ret := make([]interface{}, 0, l)
|
||||
for i := 0; i < l; i++ {
|
||||
total += flattenedLength(rm.Index(i).Interface())
|
||||
ret = append(ret, flatten(rm.Index(i).Interface())...)
|
||||
}
|
||||
return ret
|
||||
|
||||
case reflect.Map:
|
||||
rm := reflect.ValueOf(m)
|
||||
l := rm.Len() * 2
|
||||
keys := rm.MapKeys()
|
||||
ret := make([]interface{}, 0, l)
|
||||
for _, k := range keys {
|
||||
kv := k.Interface()
|
||||
vv := rm.MapIndex(k).Interface()
|
||||
total += flattenedLength(kv)
|
||||
total += flattenedLength(vv)
|
||||
ret = append(ret, flatten(kv)...)
|
||||
ret = append(ret, flatten(vv)...)
|
||||
}
|
||||
return ret
|
||||
|
||||
default:
|
||||
total++
|
||||
return []interface{}{m}
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
func appendStr(buf []byte, b []byte) []byte {
|
||||
buf = append(buf, bulkStrPrefix...)
|
||||
buf = strconv.AppendInt(buf, int64(len(b)), 10)
|
||||
buf = append(buf, delim...)
|
||||
buf = append(buf, b...)
|
||||
buf = append(buf, delim...)
|
||||
return buf
|
||||
func formatStr(b []byte) []byte {
|
||||
l := strconv.Itoa(len(b))
|
||||
bs := make([]byte, 0, len(l)+len(b)+5)
|
||||
bs = append(bs, bulkStrPrefix)
|
||||
bs = append(bs, []byte(l)...)
|
||||
bs = append(bs, delim...)
|
||||
bs = append(bs, b...)
|
||||
bs = append(bs, delim...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func appendErr(buf []byte, ierr error) []byte {
|
||||
buf = append(buf, errPrefix...)
|
||||
buf = append(buf, []byte(ierr.Error())...)
|
||||
buf = append(buf, delim...)
|
||||
return buf
|
||||
func formatErr(ierr error) []byte {
|
||||
ierrstr := []byte(ierr.Error())
|
||||
bs := make([]byte, 0, len(ierrstr)+3)
|
||||
bs = append(bs, errPrefix)
|
||||
bs = append(bs, ierrstr...)
|
||||
bs = append(bs, delim...)
|
||||
return bs
|
||||
}
|
||||
|
||||
func appendInt(buf []byte, i int64, forceString bool) []byte {
|
||||
if !forceString {
|
||||
buf = append(buf, intPrefix...)
|
||||
} else {
|
||||
// Really want to avoid alloating a new []byte. So I write the int to
|
||||
// the buf for the sole purpose of getting its length as a string, and
|
||||
// even though it'll be immediately overwritten right after and
|
||||
// AppendInt will be called again. This isn't great.
|
||||
tmpBuf := strconv.AppendInt(buf, i, 10)
|
||||
|
||||
buf = append(buf, bulkStrPrefix...)
|
||||
buf = strconv.AppendInt(buf, int64(len(tmpBuf)-len(buf)+1), 10)
|
||||
buf = append(buf, delim...)
|
||||
func formatInt(i int64, forceString bool) []byte {
|
||||
istr := strconv.FormatInt(i, 10)
|
||||
if forceString {
|
||||
return formatStr([]byte(istr))
|
||||
}
|
||||
|
||||
buf = strconv.AppendInt(buf, i, 10)
|
||||
buf = append(buf, delim...)
|
||||
return buf
|
||||
bs := make([]byte, 0, len(istr)+3)
|
||||
bs = append(bs, intPrefix)
|
||||
bs = append(bs, istr...)
|
||||
bs = append(bs, delim...)
|
||||
return bs
|
||||
}
|
||||
|
||||
var nilFormatted = []byte("$-1\r\n")
|
||||
|
||||
func appendNil(buf []byte) []byte {
|
||||
return append(buf, nilFormatted...)
|
||||
func formatNil() []byte {
|
||||
return nilFormatted
|
||||
}
|
||||
|
3
Godeps/_workspace/src/github.com/fzzy/radix/redis/resp/resp_test.go
generated
vendored
3
Godeps/_workspace/src/github.com/fzzy/radix/redis/resp/resp_test.go
generated
vendored
@ -3,9 +3,8 @@ package resp
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
. "testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
. "testing"
|
||||
)
|
||||
|
||||
func TestRead(t *T) {
|
||||
|
4
Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go
generated
vendored
4
Godeps/_workspace/src/github.com/gogo/protobuf/proto/encode_gogo.go
generated
vendored
@ -40,10 +40,6 @@ import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func NewRequiredNotSetError(field string) *RequiredNotSetError {
|
||||
return &RequiredNotSetError{field}
|
||||
}
|
||||
|
||||
type Sizer interface {
|
||||
Size() int
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md
generated
vendored
2
Godeps/_workspace/src/github.com/hashicorp/golang-lru/README.md
generated
vendored
@ -20,6 +20,6 @@ for i := 0; i < 256; i++ {
|
||||
l.Add(i, nil)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||
panic("bad len: %v", l.Len())
|
||||
}
|
||||
```
|
||||
|
75
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
generated
vendored
75
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru.go
generated
vendored
@ -14,8 +14,7 @@ type Cache struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
lock sync.RWMutex
|
||||
onEvicted func(key interface{}, value interface{})
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
@ -26,10 +25,6 @@ type entry struct {
|
||||
|
||||
// New creates an LRU of the given size
|
||||
func New(size int) (*Cache, error) {
|
||||
return NewWithEvict(size, nil)
|
||||
}
|
||||
|
||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
@ -37,7 +32,6 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{}))
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element, size),
|
||||
onEvicted: onEvicted,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
@ -46,19 +40,12 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{}))
|
||||
func (c *Cache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.onEvicted != nil {
|
||||
for k, v := range c.items {
|
||||
c.onEvicted(k, v.Value.(*entry).value)
|
||||
}
|
||||
}
|
||||
|
||||
c.evictList = list.New()
|
||||
c.items = make(map[interface{}]*list.Element, c.size)
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occured.
|
||||
func (c *Cache) Add(key, value interface{}) bool {
|
||||
// Add adds a value to the cache.
|
||||
func (c *Cache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
@ -66,7 +53,7 @@ func (c *Cache) Add(key, value interface{}) bool {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
return
|
||||
}
|
||||
|
||||
// Add new item
|
||||
@ -74,12 +61,10 @@ func (c *Cache) Add(key, value interface{}) bool {
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
if c.evictList.Len() > c.size {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
@ -94,27 +79,6 @@ func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if a key is in the cache, without updating the recent-ness or deleting it for being stale.
|
||||
func (c *Cache) Contains(key interface{}) (ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Returns the key value (or undefined if not found) without updating the "recently used"-ness of the key.
|
||||
// (If you find yourself using this a lot, you might be using the wrong sort of data structure, but there are some use cases where it's handy.)
|
||||
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
if ent, ok := c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
@ -132,30 +96,21 @@ func (c *Cache) RemoveOldest() {
|
||||
c.removeOldest()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
// Keys returns a slice of the keys in the cache.
|
||||
func (c *Cache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
keys := make([]interface{}, len(c.items))
|
||||
ent := c.evictList.Back()
|
||||
i := 0
|
||||
for ent != nil {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
ent = ent.Prev()
|
||||
for k := range c.items {
|
||||
keys[i] = k
|
||||
i++
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *Cache) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
@ -169,7 +124,11 @@ func (c *Cache) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvicted != nil {
|
||||
c.onEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
85
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
85
Godeps/_workspace/src/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
@ -3,31 +3,18 @@ package lru
|
||||
import "testing"
|
||||
|
||||
func TestLRU(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
if k != v {
|
||||
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||
}
|
||||
evictCounter += 1
|
||||
}
|
||||
l, err := NewWithEvict(128, onEvicted)
|
||||
l, err := New(128)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, i)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
}
|
||||
|
||||
if evictCounter != 128 {
|
||||
t.Fatalf("bad evict count: %v", evictCounter)
|
||||
}
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||
for _, k := range l.Keys() {
|
||||
if v, ok := l.Get(k); !ok || v != k {
|
||||
t.Fatalf("bad key: %v", k)
|
||||
}
|
||||
}
|
||||
@ -51,14 +38,6 @@ func TestLRU(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||
|
||||
for i, k := range l.Keys() {
|
||||
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||
t.Fatalf("out of order key: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
l.Purge()
|
||||
if l.Len() != 0 {
|
||||
t.Fatalf("bad len: %v", l.Len())
|
||||
@ -67,61 +46,3 @@ func TestLRU(t *testing.T) {
|
||||
t.Fatalf("should contain nothing")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Add returns true/false if an eviction occured
|
||||
func TestLRUAdd(t *testing.T) {
|
||||
evictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
evictCounter += 1
|
||||
}
|
||||
|
||||
l, err := NewWithEvict(1, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||
t.Errorf("should not have an eviction")
|
||||
}
|
||||
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||
t.Errorf("should have an eviction")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Contains doesn't update recent-ness
|
||||
func TestLRUContains(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if !l.Contains(1) {
|
||||
t.Errorf("1 should be contained")
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Peek doesn't update recent-ness
|
||||
func TestLRUPeek(t *testing.T) {
|
||||
l, err := New(2)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
67
Godeps/_workspace/src/github.com/hashicorp/yamux/session.go
generated
vendored
67
Godeps/_workspace/src/github.com/hashicorp/yamux/session.go
generated
vendored
@ -50,11 +50,6 @@ type Session struct {
|
||||
streams map[uint32]*Stream
|
||||
streamLock sync.Mutex
|
||||
|
||||
// synCh acts like a semaphore. It is sized to the AcceptBacklog which
|
||||
// is assumed to be symmetric between the client and server. This allows
|
||||
// the client to avoid exceeding the backlog and instead blocks the open.
|
||||
synCh chan struct{}
|
||||
|
||||
// acceptCh is used to pass ready streams to the client
|
||||
acceptCh chan *Stream
|
||||
|
||||
@ -62,10 +57,6 @@ type Session struct {
|
||||
// or to send a header out directly.
|
||||
sendCh chan sendReady
|
||||
|
||||
// recvDoneCh is closed when recv() exits to avoid a race
|
||||
// between stream registration and stream shutdown
|
||||
recvDoneCh chan struct{}
|
||||
|
||||
// shutdown is used to safely close a session
|
||||
shutdown bool
|
||||
shutdownErr error
|
||||
@ -90,10 +81,8 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
|
||||
bufRead: bufio.NewReader(conn),
|
||||
pings: make(map[uint32]chan struct{}),
|
||||
streams: make(map[uint32]*Stream),
|
||||
synCh: make(chan struct{}, config.AcceptBacklog),
|
||||
acceptCh: make(chan *Stream, config.AcceptBacklog),
|
||||
sendCh: make(chan sendReady, 64),
|
||||
recvDoneCh: make(chan struct{}),
|
||||
shutdownCh: make(chan struct{}),
|
||||
}
|
||||
if client {
|
||||
@ -119,14 +108,6 @@ func (s *Session) IsClosed() bool {
|
||||
}
|
||||
}
|
||||
|
||||
// NumStreams returns the number of currently open streams
|
||||
func (s *Session) NumStreams() int {
|
||||
s.streamLock.Lock()
|
||||
num := len(s.streams)
|
||||
s.streamLock.Unlock()
|
||||
return num
|
||||
}
|
||||
|
||||
// Open is used to create a new stream as a net.Conn
|
||||
func (s *Session) Open() (net.Conn, error) {
|
||||
return s.OpenStream()
|
||||
@ -141,13 +122,6 @@ func (s *Session) OpenStream() (*Stream, error) {
|
||||
return nil, ErrRemoteGoAway
|
||||
}
|
||||
|
||||
// Block if we have too many inflight SYNs
|
||||
select {
|
||||
case s.synCh <- struct{}{}:
|
||||
case <-s.shutdownCh:
|
||||
return nil, ErrSessionShutdown
|
||||
}
|
||||
|
||||
GET_ID:
|
||||
// Get and ID, and check for stream exhaustion
|
||||
id := atomic.LoadUint32(&s.nextStreamID)
|
||||
@ -165,10 +139,7 @@ GET_ID:
|
||||
s.streamLock.Unlock()
|
||||
|
||||
// Send the window update to create
|
||||
if err := stream.sendWindowUpdate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
return stream, stream.sendWindowUpdate()
|
||||
}
|
||||
|
||||
// Accept is used to block until the next available stream
|
||||
@ -182,10 +153,7 @@ func (s *Session) Accept() (net.Conn, error) {
|
||||
func (s *Session) AcceptStream() (*Stream, error) {
|
||||
select {
|
||||
case stream := <-s.acceptCh:
|
||||
if err := stream.sendWindowUpdate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
return stream, stream.sendWindowUpdate()
|
||||
case <-s.shutdownCh:
|
||||
return nil, s.shutdownErr
|
||||
}
|
||||
@ -206,7 +174,6 @@ func (s *Session) Close() error {
|
||||
}
|
||||
close(s.shutdownCh)
|
||||
s.conn.Close()
|
||||
<-s.recvDoneCh
|
||||
|
||||
s.streamLock.Lock()
|
||||
defer s.streamLock.Unlock()
|
||||
@ -358,14 +325,6 @@ func (s *Session) send() {
|
||||
|
||||
// recv is a long running goroutine that accepts new data
|
||||
func (s *Session) recv() {
|
||||
if err := s.recvLoop(); err != nil {
|
||||
s.exitErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// recvLoop continues to receive data until a fatal error is encountered
|
||||
func (s *Session) recvLoop() error {
|
||||
defer close(s.recvDoneCh)
|
||||
hdr := header(make([]byte, headerSize))
|
||||
var handler func(header) error
|
||||
for {
|
||||
@ -374,13 +333,15 @@ func (s *Session) recvLoop() error {
|
||||
if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
|
||||
s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
|
||||
}
|
||||
return err
|
||||
s.exitErr(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify the version
|
||||
if hdr.Version() != protoVersion {
|
||||
s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
|
||||
return ErrInvalidVersion
|
||||
s.exitErr(ErrInvalidVersion)
|
||||
return
|
||||
}
|
||||
|
||||
// Switch on the type
|
||||
@ -394,12 +355,14 @@ func (s *Session) recvLoop() error {
|
||||
case typePing:
|
||||
handler = s.handlePing
|
||||
default:
|
||||
return ErrInvalidMsgType
|
||||
s.exitErr(ErrInvalidMsgType)
|
||||
return
|
||||
}
|
||||
|
||||
// Invoke the handler
|
||||
if err := handler(hdr); err != nil {
|
||||
return err
|
||||
s.exitErr(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -540,13 +503,3 @@ func (s *Session) closeStream(id uint32) {
|
||||
delete(s.streams, id)
|
||||
s.streamLock.Unlock()
|
||||
}
|
||||
|
||||
// establishStream is used to mark a stream that was in the
|
||||
// SYN Sent state as established.
|
||||
func (s *Session) establishStream() {
|
||||
select {
|
||||
case <-s.synCh:
|
||||
default:
|
||||
panic("established stream without inflight syn")
|
||||
}
|
||||
}
|
||||
|
83
Godeps/_workspace/src/github.com/hashicorp/yamux/session_test.go
generated
vendored
83
Godeps/_workspace/src/github.com/hashicorp/yamux/session_test.go
generated
vendored
@ -76,13 +76,6 @@ func TestAccept(t *testing.T) {
|
||||
defer client.Close()
|
||||
defer server.Close()
|
||||
|
||||
if client.NumStreams() != 0 {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
if server.NumStreams() != 0 {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(4)
|
||||
|
||||
@ -170,10 +163,6 @@ func TestSendData_Small(t *testing.T) {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if server.NumStreams() != 1 {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
buf := make([]byte, 4)
|
||||
for i := 0; i < 1000; i++ {
|
||||
n, err := stream.Read(buf)
|
||||
@ -200,10 +189,6 @@ func TestSendData_Small(t *testing.T) {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
if client.NumStreams() != 1 {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
n, err := stream.Write([]byte("test"))
|
||||
if err != nil {
|
||||
@ -229,13 +214,6 @@ func TestSendData_Small(t *testing.T) {
|
||||
case <-time.After(time.Second):
|
||||
panic("timeout")
|
||||
}
|
||||
|
||||
if client.NumStreams() != 0 {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
if server.NumStreams() != 0 {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendData_Large(t *testing.T) {
|
||||
@ -598,26 +576,21 @@ func TestBacklogExceeded(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt to open a new stream
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := client.Open()
|
||||
errCh <- err
|
||||
}()
|
||||
// Exceed the backlog!
|
||||
stream, err := client.Open()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// Shutdown the server
|
||||
go func() {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
server.Close()
|
||||
}()
|
||||
if _, err := stream.Write([]byte("foo")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err == nil {
|
||||
t.Fatalf("open should fail")
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
t.Fatalf("timeout")
|
||||
buf := make([]byte, 4)
|
||||
stream.SetReadDeadline(time.Now().Add(50 * time.Millisecond))
|
||||
if _, err := stream.Read(buf); err != ErrConnectionReset {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -754,33 +727,3 @@ func TestSendData_VeryLarge(t *testing.T) {
|
||||
panic("timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBacklogExceeded_Accept(t *testing.T) {
|
||||
client, server := testClientServer()
|
||||
defer client.Close()
|
||||
defer server.Close()
|
||||
|
||||
max := 5 * client.config.AcceptBacklog
|
||||
go func() {
|
||||
for i := 0; i < max; i++ {
|
||||
stream, err := server.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer stream.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Fill the backlog
|
||||
for i := 0; i < max; i++ {
|
||||
stream, err := client.Open()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
if _, err := stream.Write([]byte("foo")); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
31
Godeps/_workspace/src/github.com/hashicorp/yamux/stream.go
generated
vendored
31
Godeps/_workspace/src/github.com/hashicorp/yamux/stream.go
generated
vendored
@ -86,8 +86,6 @@ func (s *Stream) Read(b []byte) (n int, err error) {
|
||||
START:
|
||||
s.stateLock.Lock()
|
||||
switch s.state {
|
||||
case streamLocalClose:
|
||||
fallthrough
|
||||
case streamRemoteClose:
|
||||
fallthrough
|
||||
case streamClosed:
|
||||
@ -267,7 +265,6 @@ func (s *Stream) sendClose() error {
|
||||
|
||||
// Close is used to close the stream
|
||||
func (s *Stream) Close() error {
|
||||
closeStream := false
|
||||
s.stateLock.Lock()
|
||||
switch s.state {
|
||||
// Opened means we need to signal a close
|
||||
@ -282,7 +279,7 @@ func (s *Stream) Close() error {
|
||||
case streamLocalClose:
|
||||
case streamRemoteClose:
|
||||
s.state = streamClosed
|
||||
closeStream = true
|
||||
s.session.closeStream(s.id)
|
||||
goto SEND_CLOSE
|
||||
|
||||
case streamClosed:
|
||||
@ -296,9 +293,6 @@ SEND_CLOSE:
|
||||
s.stateLock.Unlock()
|
||||
s.sendClose()
|
||||
s.notifyWaiting()
|
||||
if closeStream {
|
||||
s.session.closeStream(s.id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -313,23 +307,14 @@ func (s *Stream) forceClose() {
|
||||
// processFlags is used to update the state of the stream
|
||||
// based on set flags, if any. Lock must be held
|
||||
func (s *Stream) processFlags(flags uint16) error {
|
||||
// Close the stream without holding the state lock
|
||||
closeStream := false
|
||||
defer func() {
|
||||
if closeStream {
|
||||
s.session.closeStream(s.id)
|
||||
}
|
||||
}()
|
||||
|
||||
s.stateLock.Lock()
|
||||
defer s.stateLock.Unlock()
|
||||
if flags&flagACK == flagACK {
|
||||
if s.state == streamSYNSent {
|
||||
s.state = streamEstablished
|
||||
}
|
||||
s.session.establishStream()
|
||||
}
|
||||
if flags&flagFIN == flagFIN {
|
||||
|
||||
} else if flags&flagFIN == flagFIN {
|
||||
switch s.state {
|
||||
case streamSYNSent:
|
||||
fallthrough
|
||||
@ -340,19 +325,15 @@ func (s *Stream) processFlags(flags uint16) error {
|
||||
s.notifyWaiting()
|
||||
case streamLocalClose:
|
||||
s.state = streamClosed
|
||||
closeStream = true
|
||||
s.session.closeStream(s.id)
|
||||
s.notifyWaiting()
|
||||
default:
|
||||
s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
|
||||
return ErrUnexpectedFlag
|
||||
}
|
||||
}
|
||||
if flags&flagRST == flagRST {
|
||||
if s.state == streamSYNSent {
|
||||
s.session.establishStream()
|
||||
}
|
||||
} else if flags&flagRST == flagRST {
|
||||
s.state = streamReset
|
||||
closeStream = true
|
||||
s.session.closeStream(s.id)
|
||||
s.notifyWaiting()
|
||||
}
|
||||
return nil
|
||||
|
2
Godeps/_workspace/src/github.com/howeyc/fsnotify/fsnotify_bsd.go
generated
vendored
2
Godeps/_workspace/src/github.com/howeyc/fsnotify/fsnotify_bsd.go
generated
vendored
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
// +build freebsd openbsd netbsd darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
|
2
Godeps/_workspace/src/github.com/howeyc/fsnotify/fsnotify_open_bsd.go
generated
vendored
2
Godeps/_workspace/src/github.com/howeyc/fsnotify/fsnotify_open_bsd.go
generated
vendored
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
// +build freebsd openbsd netbsd
|
||||
|
||||
package fsnotify
|
||||
|
||||
|
2
Godeps/_workspace/src/github.com/huin/goupnp/goupnp.go
generated
vendored
2
Godeps/_workspace/src/github.com/huin/goupnp/goupnp.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/huin/goupnp/httpu"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/huin/goupnp/ssdp"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/html/charset"
|
||||
)
|
||||
|
||||
// ContextError is an error that wraps an error with some context information.
|
||||
@ -105,7 +104,6 @@ func requestXml(url string, defaultSpace string, doc interface{}) error {
|
||||
|
||||
decoder := xml.NewDecoder(resp.Body)
|
||||
decoder.DefaultSpace = defaultSpace
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
|
||||
return decoder.Decode(doc)
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/huin/goupnp/soap/soap.go
generated
vendored
2
Godeps/_workspace/src/github.com/huin/goupnp/soap/soap.go
generated
vendored
@ -42,7 +42,7 @@ func (client *SOAPClient) PerformAction(actionNamespace, actionName string, inAc
|
||||
Method: "POST",
|
||||
URL: &client.EndpointURL,
|
||||
Header: http.Header{
|
||||
"SOAPACTION": []string{`"` + actionNamespace + "#" + actionName + `"`},
|
||||
"SOAPACTION": []string{actionNamespace + "#" + actionName},
|
||||
"CONTENT-TYPE": []string{"text/xml; charset=\"utf-8\""},
|
||||
},
|
||||
Body: ioutil.NopCloser(bytes.NewBuffer(requestBytes)),
|
||||
|
27
Godeps/_workspace/src/github.com/jackpal/gateway/LICENSE
generated
vendored
27
Godeps/_workspace/src/github.com/jackpal/gateway/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
// Copyright (c) 2010 Jack Palevich. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
7
Godeps/_workspace/src/github.com/jackpal/gateway/README.md
generated
vendored
7
Godeps/_workspace/src/github.com/jackpal/gateway/README.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# gateway
|
||||
|
||||
A very simple library for discovering the IP address of the local LAN gateway.
|
||||
|
||||
Provides implementations for Linux, OS X (Darwin) and Windows.
|
||||
|
||||
Pull requests for other OSs happily considered!
|
40
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_darwin.go
generated
vendored
40
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_darwin.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "-n", "get", "0.0.0.0")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Darwin route out format is always like this:
|
||||
// route to: default
|
||||
// destination: default
|
||||
// mask: default
|
||||
// gateway: 192.168.1.1
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("gateway:")) {
|
||||
gatewayFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(gatewayFields[1]))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
75
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_linux.go
generated
vendored
75
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_linux.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func discoverGatewayUsingIp() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("ip", "route", "show")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Linux 'ip route show' format looks like this:
|
||||
// default via 192.168.178.1 dev wlp3s0 metric 303
|
||||
// 192.168.178.0/24 dev wlp3s0 proto kernel scope link src 192.168.178.76 metric 303
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("default")) {
|
||||
ipFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(ipFields[2]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func discoverGatewayUsingRoute() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "-n")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Linux route out format is always like this:
|
||||
// Kernel IP routing table
|
||||
// Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
// 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 eth0
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("0.0.0.0")) {
|
||||
ipFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(ipFields[1]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
ip, err = discoverGatewayUsingRoute()
|
||||
if err != nil {
|
||||
ip, err = discoverGatewayUsingIp()
|
||||
}
|
||||
return
|
||||
}
|
10
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_test.go
generated
vendored
10
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_test.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestGateway(t *testing.T) {
|
||||
ip, err := DiscoverGateway()
|
||||
if err != nil {
|
||||
t.Errorf("DiscoverGateway() = %v,%v", ip, err)
|
||||
}
|
||||
}
|
14
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_unimplemented.go
generated
vendored
14
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_unimplemented.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// +build !darwin,!linux,!windows
|
||||
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
err = fmt.Errorf("DiscoverGateway not implemented for OS %s", runtime.GOOS)
|
||||
return
|
||||
}
|
43
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_windows.go
generated
vendored
43
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_windows.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "print", "0.0.0.0")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Windows route output format is always like this:
|
||||
// ===========================================================================
|
||||
// Active Routes:
|
||||
// Network Destination Netmask Gateway Interface Metric
|
||||
// 0.0.0.0 0.0.0.0 192.168.1.1 192.168.1.100 20
|
||||
// ===========================================================================
|
||||
// I'm trying to pick the active route,
|
||||
// then jump 2 lines and pick the third IP
|
||||
// Not using regex because output is quite standard from Windows XP to 8 (NEEDS TESTING)
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for idx, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("Active Routes:")) {
|
||||
ipFields := bytes.Fields(outputLines[idx+2])
|
||||
ip = net.ParseIP(string(ipFields[2]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
12
Godeps/_workspace/src/github.com/jackpal/go-nat-pmp/natpmp.go
generated
vendored
12
Godeps/_workspace/src/github.com/jackpal/go-nat-pmp/natpmp.go
generated
vendored
@ -2,7 +2,6 @@ package natpmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jackpal/gateway"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
@ -37,17 +36,6 @@ func NewClient(gateway net.IP) (nat *Client) {
|
||||
return &Client{gateway}
|
||||
}
|
||||
|
||||
// Create a NAT-PMP client for the NAT-PMP server at the default gateway.
|
||||
func NewClientForDefaultGateway() (nat *Client, err error) {
|
||||
var g net.IP
|
||||
g, err = gateway.DiscoverGateway()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nat = NewClient(g)
|
||||
return
|
||||
}
|
||||
|
||||
// Results of the NAT-PMP GetExternalAddress operation
|
||||
type GetExternalAddressResult struct {
|
||||
SecondsSinceStartOfEpoc uint32
|
||||
|
13
Godeps/_workspace/src/github.com/jackpal/go-nat-pmp/natpmp_test.go
generated
vendored
13
Godeps/_workspace/src/github.com/jackpal/go-nat-pmp/natpmp_test.go
generated
vendored
@ -1,13 +0,0 @@
|
||||
package natpmp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNatPMP(t *testing.T) {
|
||||
client, err := NewClientForDefaultGateway()
|
||||
if err != nil {
|
||||
t.Errorf("NewClientForDefaultGateway() = %v,%v", client, err)
|
||||
return
|
||||
}
|
||||
}
|
2
Godeps/_workspace/src/github.com/jbenet/go-peerstream/listener.go
generated
vendored
2
Godeps/_workspace/src/github.com/jbenet/go-peerstream/listener.go
generated
vendored
@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
// AcceptConcurrency is how many connections can simultaneously be
|
||||
// in process of being accepted. Handshakes can sometimes occur as
|
||||
// in process of being accepted. Handshakes can sometimes occurr as
|
||||
// part of this process, so it may take some time. It is imporant to
|
||||
// rate limit lest a malicious influx of connections would cause our
|
||||
// node to consume all its resources accepting new connections.
|
||||
|
6
Godeps/_workspace/src/github.com/jbenet/go-reuseport/impl_unix.go
generated
vendored
6
Godeps/_workspace/src/github.com/jbenet/go-reuseport/impl_unix.go
generated
vendored
@ -115,7 +115,9 @@ func dial(dialer net.Dialer, netw, addr string) (c net.Conn, err error) {
|
||||
// here we just try again 3 times.
|
||||
for i := 0; i < 3; i++ {
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
err = errTimeout
|
||||
if err == nil {
|
||||
err = errTimeout
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
@ -355,6 +357,6 @@ var errTimeout = &timeoutError{}
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Error() string { return "i/o timeout (reuseport)" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
2
Godeps/_workspace/src/github.com/jbenet/go-reuseport/poll/error.go
generated
vendored
2
Godeps/_workspace/src/github.com/jbenet/go-reuseport/poll/error.go
generated
vendored
@ -4,6 +4,6 @@ var errTimeout = &timeoutError{}
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" }
|
||||
func (e *timeoutError) Error() string { return "i/o timeout (reuseport poll)" }
|
||||
func (e *timeoutError) Timeout() bool { return true }
|
||||
func (e *timeoutError) Temporary() bool { return true }
|
||||
|
4
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
4
Godeps/_workspace/src/github.com/kardianos/osext/README.md
generated
vendored
@ -4,9 +4,7 @@
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file. Both
|
||||
working directory and the os.Args[0] value are arbitrary and cannot
|
||||
be relied on; os.Args[0] can be "faked".
|
||||
or finding resources located relative to the executable file.
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
|
6
Godeps/_workspace/src/github.com/kardianos/osext/osext.go
generated
vendored
6
Godeps/_workspace/src/github.com/kardianos/osext/osext.go
generated
vendored
@ -16,12 +16,12 @@ func Executable() (string, error) {
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name and any trailing slash.
|
||||
// path. Excludes the executable name.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Dir(p), nil
|
||||
folder, _ := filepath.Split(p)
|
||||
return folder, nil
|
||||
}
|
||||
|
23
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
23
Godeps/_workspace/src/github.com/kardianos/osext/osext_test.go
generated
vendored
@ -24,29 +24,6 @@ const (
|
||||
executableEnvValueDelete = "delete"
|
||||
)
|
||||
|
||||
func TestPrintExecutable(t *testing.T) {
|
||||
ef, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
t.Log("Executable:", ef)
|
||||
}
|
||||
func TestPrintExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
t.Log("Executable Folder:", ef)
|
||||
}
|
||||
func TestExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
if ef[len(ef)-1] == filepath.Separator {
|
||||
t.Fatal("ExecutableFolder ends with a trailing slash.")
|
||||
}
|
||||
}
|
||||
func TestExecutableMatch(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
|
53
Godeps/_workspace/src/github.com/miekg/dns/client.go
generated
vendored
53
Godeps/_workspace/src/github.com/miekg/dns/client.go
generated
vendored
@ -66,9 +66,6 @@ func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
@ -89,9 +86,6 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
@ -128,39 +122,31 @@ func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||
return r, rtt, nil
|
||||
}
|
||||
|
||||
func (c *Client) dialTimeout() time.Duration {
|
||||
if c.DialTimeout != 0 {
|
||||
return c.DialTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) readTimeout() time.Duration {
|
||||
if c.ReadTimeout != 0 {
|
||||
return c.ReadTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) writeTimeout() time.Duration {
|
||||
if c.WriteTimeout != 0 {
|
||||
return c.WriteTimeout
|
||||
}
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
timeout := dnsTimeout
|
||||
var co *Conn
|
||||
if c.DialTimeout != 0 {
|
||||
timeout = c.DialTimeout
|
||||
}
|
||||
if c.Net == "" {
|
||||
co, err = DialTimeout("udp", a, c.dialTimeout())
|
||||
co, err = DialTimeout("udp", a, timeout)
|
||||
} else {
|
||||
co, err = DialTimeout(c.Net, a, c.dialTimeout())
|
||||
co, err = DialTimeout(c.Net, a, timeout)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
timeout = dnsTimeout
|
||||
if c.ReadTimeout != 0 {
|
||||
timeout = c.ReadTimeout
|
||||
}
|
||||
co.SetReadDeadline(time.Now().Add(timeout))
|
||||
timeout = dnsTimeout
|
||||
if c.WriteTimeout != 0 {
|
||||
timeout = c.WriteTimeout
|
||||
}
|
||||
co.SetWriteDeadline(time.Now().Add(timeout))
|
||||
defer co.Close()
|
||||
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
@ -170,18 +156,11 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||
if opt == nil && c.UDPSize >= MinMsgSize {
|
||||
co.UDPSize = c.UDPSize
|
||||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(c.readTimeout()))
|
||||
co.SetWriteDeadline(time.Now().Add(c.writeTimeout()))
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, co.rtt, err
|
||||
}
|
||||
|
||||
|
23
Godeps/_workspace/src/github.com/miekg/dns/client_test.go
generated
vendored
23
Godeps/_workspace/src/github.com/miekg/dns/client_test.go
generated
vendored
@ -37,29 +37,6 @@ func TestClientSync(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientSyncBadId(t *testing.T) {
|
||||
HandleFunc("miek.nl.", HelloServerBadId)
|
||||
defer HandleRemove("miek.nl.")
|
||||
|
||||
s, addrstr, err := RunLocalUDPServer("127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to run test server: %v", err)
|
||||
}
|
||||
defer s.Shutdown()
|
||||
|
||||
m := new(Msg)
|
||||
m.SetQuestion("miek.nl.", TypeSOA)
|
||||
|
||||
c := new(Client)
|
||||
if _, _, err := c.Exchange(m, addrstr); err != ErrId {
|
||||
t.Errorf("did not find a bad Id")
|
||||
}
|
||||
// And now with plain Exchange().
|
||||
if _, err := Exchange(m, addrstr); err != ErrId {
|
||||
t.Errorf("did not find a bad Id")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientEDNS0(t *testing.T) {
|
||||
HandleFunc("miek.nl.", HelloServer)
|
||||
defer HandleRemove("miek.nl.")
|
||||
|
25
Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
25
Godeps/_workspace/src/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -35,12 +34,8 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
switch uint8(algo) {
|
||||
case DSA:
|
||||
switch m["algorithm"] {
|
||||
case "3 (DSA)":
|
||||
priv, e := readPrivateKeyDSA(m)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
@ -51,15 +46,15 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) {
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return (*DSAPrivateKey)(priv), e
|
||||
case RSAMD5:
|
||||
case "1 (RSAMD5)":
|
||||
fallthrough
|
||||
case RSASHA1:
|
||||
case "5 (RSASHA1)":
|
||||
fallthrough
|
||||
case RSASHA1NSEC3SHA1:
|
||||
case "7 (RSASHA1NSEC3SHA1)":
|
||||
fallthrough
|
||||
case RSASHA256:
|
||||
case "8 (RSASHA256)":
|
||||
fallthrough
|
||||
case RSASHA512:
|
||||
case "10 (RSASHA512)":
|
||||
priv, e := readPrivateKeyRSA(m)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
@ -70,11 +65,11 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (PrivateKey, error) {
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return (*RSAPrivateKey)(priv), e
|
||||
case ECCGOST:
|
||||
case "12 (ECC-GOST)":
|
||||
return nil, ErrPrivKey
|
||||
case ECDSAP256SHA256:
|
||||
case "13 (ECDSAP256SHA256)":
|
||||
fallthrough
|
||||
case ECDSAP384SHA384:
|
||||
case "14 (ECDSAP384SHA384)":
|
||||
priv, e := readPrivateKeyECDSA(m)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
|
83
Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
generated
vendored
83
Godeps/_workspace/src/github.com/miekg/dns/parse_test.go
generated
vendored
@ -548,9 +548,6 @@ a.example.com. IN A 127.0.0.1
|
||||
8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG
|
||||
$ORIGIN a.example.com.
|
||||
test IN A 127.0.0.1
|
||||
IN SSHFP 1 2 (
|
||||
BC6533CDC95A79078A39A56EA7635984ED655318ADA9
|
||||
B6159E30723665DA95BB )
|
||||
$ORIGIN b.example.com.
|
||||
test IN CNAME test.a.example.com.
|
||||
`
|
||||
@ -907,9 +904,8 @@ func TestILNP(t *testing.T) {
|
||||
|
||||
func TestNsapGposEidNimloc(t *testing.T) {
|
||||
dt := map[string]string{
|
||||
"foo.bar.com. IN NSAP 21 47000580ffff000000321099991111222233334444": "foo.bar.com.\t3600\tIN\tNSAP\t0x47000580ffff000000321099991111222233334444",
|
||||
"foo.bar.com. IN NSAP 0x47000580ffff000000321099991111222233334444": "foo.bar.com.\t3600\tIN\tNSAP\t0x47000580ffff000000321099991111222233334444",
|
||||
"host.school.de IN NSAP 17 39276f3100111100002222333344449876": "host.school.de.\t3600\tIN\tNSAP\t0x39276f3100111100002222333344449876",
|
||||
"foo.bar.com. IN NSAP 21 47000580ffff000000321099991111222233334444": "foo.bar.com.\t3600\tIN\tNSAP\t21 47000580ffff000000321099991111222233334444",
|
||||
"host.school.de IN NSAP 17 39276f3100111100002222333344449876": "host.school.de.\t3600\tIN\tNSAP\t17 39276f3100111100002222333344449876",
|
||||
"444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.",
|
||||
"lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0",
|
||||
"hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0",
|
||||
@ -1380,78 +1376,3 @@ func TestParseIPSECKEY(t *testing.T) {
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseTokenOverflow(t *testing.T) {
|
||||
_, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797")
|
||||
if err == nil {
|
||||
t.Fatalf("token overflow should return an error")
|
||||
}
|
||||
t.Logf("err: %s\n", err)
|
||||
}
|
||||
|
||||
func TestParseTLSA(t *testing.T) {
|
||||
lt := []string{
|
||||
"_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00",
|
||||
"_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3",
|
||||
"_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2",
|
||||
}
|
||||
for _, o := range lt {
|
||||
rr, err := NewRR(o)
|
||||
if err != nil {
|
||||
t.Error("failed to parse RR: ", err)
|
||||
continue
|
||||
}
|
||||
if rr.String() != o {
|
||||
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String())
|
||||
} else {
|
||||
t.Logf("RR is OK: `%s'", rr.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSSHFP(t *testing.T) {
|
||||
lt := []string{
|
||||
"test.example.org.\t300\tSSHFP\t1 2 (\n" +
|
||||
"\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" +
|
||||
"\t\t\t\t\tB6159E30723665DA95BB )",
|
||||
"test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )",
|
||||
}
|
||||
result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB"
|
||||
for _, o := range lt {
|
||||
rr, err := NewRR(o)
|
||||
if err != nil {
|
||||
t.Error("failed to parse RR: ", err)
|
||||
continue
|
||||
}
|
||||
if rr.String() != result {
|
||||
t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String())
|
||||
} else {
|
||||
t.Logf("RR is OK: `%s'", rr.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseHINFO(t *testing.T) {
|
||||
dt := map[string]string{
|
||||
"example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"",
|
||||
"example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"",
|
||||
"example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"",
|
||||
"example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"",
|
||||
// "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"",
|
||||
// This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html
|
||||
// but effectively, even Bind would replace it to correctly formed text when you AXFR
|
||||
// TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function
|
||||
}
|
||||
for i, o := range dt {
|
||||
rr, err := NewRR(i)
|
||||
if err != nil {
|
||||
t.Error("failed to parse RR: ", err)
|
||||
continue
|
||||
}
|
||||
if rr.String() != o {
|
||||
t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String())
|
||||
} else {
|
||||
t.Logf("RR is OK: `%s'", rr.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
10
Godeps/_workspace/src/github.com/miekg/dns/server_test.go
generated
vendored
10
Godeps/_workspace/src/github.com/miekg/dns/server_test.go
generated
vendored
@ -17,16 +17,6 @@ func HelloServer(w ResponseWriter, req *Msg) {
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
func HelloServerBadId(w ResponseWriter, req *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetReply(req)
|
||||
m.Id += 1
|
||||
|
||||
m.Extra = make([]RR, 1)
|
||||
m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}}
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
||||
func AnotherHelloServer(w ResponseWriter, req *Msg) {
|
||||
m := new(Msg)
|
||||
m.SetReply(req)
|
||||
|
15
Godeps/_workspace/src/github.com/miekg/dns/types.go
generated
vendored
15
Godeps/_workspace/src/github.com/miekg/dns/types.go
generated
vendored
@ -255,10 +255,8 @@ type HINFO struct {
|
||||
|
||||
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *HINFO) copy() RR { return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} }
|
||||
func (rr *HINFO) String() string {
|
||||
return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os})
|
||||
}
|
||||
func (rr *HINFO) len() int { return rr.Hdr.len() + len(rr.Cpu) + len(rr.Os) }
|
||||
func (rr *HINFO) String() string { return rr.Hdr.String() + rr.Cpu + " " + rr.Os }
|
||||
func (rr *HINFO) len() int { return rr.Hdr.len() + len(rr.Cpu) + len(rr.Os) }
|
||||
|
||||
type MB struct {
|
||||
Hdr RR_Header
|
||||
@ -1148,13 +1146,14 @@ func (rr *RKEY) String() string {
|
||||
}
|
||||
|
||||
type NSAP struct {
|
||||
Hdr RR_Header
|
||||
Nsap string
|
||||
Hdr RR_Header
|
||||
Length uint8
|
||||
Nsap string
|
||||
}
|
||||
|
||||
func (rr *NSAP) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *NSAP) copy() RR { return &NSAP{*rr.Hdr.copyHeader(), rr.Nsap} }
|
||||
func (rr *NSAP) String() string { return rr.Hdr.String() + "0x" + rr.Nsap }
|
||||
func (rr *NSAP) copy() RR { return &NSAP{*rr.Hdr.copyHeader(), rr.Length, rr.Nsap} }
|
||||
func (rr *NSAP) String() string { return rr.Hdr.String() + strconv.Itoa(int(rr.Length)) + " " + rr.Nsap }
|
||||
func (rr *NSAP) len() int { return rr.Hdr.len() + 1 + len(rr.Nsap) + 1 }
|
||||
|
||||
type NSAPPTR struct {
|
||||
|
4
Godeps/_workspace/src/github.com/miekg/dns/zscan.go
generated
vendored
4
Godeps/_workspace/src/github.com/miekg/dns/zscan.go
generated
vendored
@ -500,14 +500,14 @@ func zlexer(s *scan, c chan lex) {
|
||||
for err == nil {
|
||||
l.column = s.position.Column
|
||||
l.line = s.position.Line
|
||||
if stri >= maxTok {
|
||||
if stri > maxTok {
|
||||
l.token = "token length insufficient for parsing"
|
||||
l.err = true
|
||||
debug.Printf("[%+v]", l.token)
|
||||
c <- l
|
||||
return
|
||||
}
|
||||
if comi >= maxTok {
|
||||
if comi > maxTok {
|
||||
l.token = "comment length insufficient for parsing"
|
||||
l.err = true
|
||||
debug.Printf("[%+v]", l.token)
|
||||
|
92
Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
generated
vendored
92
Godeps/_workspace/src/github.com/miekg/dns/zscan_rr.go
generated
vendored
@ -49,9 +49,6 @@ func endingToString(c chan lex, errstr, f string) (string, *ParseError, string)
|
||||
s := ""
|
||||
l := <-c // zString
|
||||
for l.value != zNewline && l.value != zEOF {
|
||||
if l.err {
|
||||
return s, &ParseError{f, errstr, l}, ""
|
||||
}
|
||||
switch l.value {
|
||||
case zString:
|
||||
s += l.token
|
||||
@ -71,17 +68,11 @@ func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, stri
|
||||
quote := false
|
||||
l := <-c
|
||||
var s []string
|
||||
if l.err {
|
||||
return s, &ParseError{f, errstr, l}, ""
|
||||
}
|
||||
switch l.value == zQuote {
|
||||
case true: // A number of quoted string
|
||||
s = make([]string, 0)
|
||||
empty := true
|
||||
for l.value != zNewline && l.value != zEOF {
|
||||
if l.err {
|
||||
return nil, &ParseError{f, errstr, l}, ""
|
||||
}
|
||||
switch l.value {
|
||||
case zString:
|
||||
empty = false
|
||||
@ -100,7 +91,7 @@ func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, stri
|
||||
p, i = p+255, i+255
|
||||
}
|
||||
s = append(s, sx...)
|
||||
break
|
||||
break;
|
||||
}
|
||||
|
||||
s = append(s, l.token)
|
||||
@ -126,9 +117,6 @@ func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, stri
|
||||
case false: // Unquoted text record
|
||||
s = make([]string, 1)
|
||||
for l.value != zNewline && l.value != zEOF {
|
||||
if l.err {
|
||||
return s, &ParseError{f, errstr, l}, ""
|
||||
}
|
||||
s[0] += l.token
|
||||
l = <-c
|
||||
}
|
||||
@ -345,24 +333,11 @@ func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
rr := new(HINFO)
|
||||
rr.Hdr = h
|
||||
|
||||
chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f)
|
||||
if e != nil {
|
||||
return nil, e, c1
|
||||
}
|
||||
|
||||
if ln := len(chunks); ln == 0 {
|
||||
return rr, nil, ""
|
||||
} else if ln == 1 {
|
||||
// Can we split it?
|
||||
if out := strings.Fields(chunks[0]); len(out) > 1 {
|
||||
chunks = out
|
||||
} else {
|
||||
chunks = append(chunks, "")
|
||||
}
|
||||
}
|
||||
|
||||
rr.Cpu = chunks[0]
|
||||
rr.Os = strings.Join(chunks[1:], " ")
|
||||
l := <-c
|
||||
rr.Cpu = l.token
|
||||
<-c // zBlank
|
||||
l = <-c // zString
|
||||
rr.Os = l.token
|
||||
|
||||
return rr, nil, ""
|
||||
}
|
||||
@ -1463,9 +1438,9 @@ func setWKS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
case zString:
|
||||
if k, err = net.LookupPort(proto, l.token); err != nil {
|
||||
if i, e := strconv.Atoi(l.token); e != nil { // If a number use that
|
||||
return nil, &ParseError{f, "bad WKS BitMap", l}, ""
|
||||
} else {
|
||||
rr.BitMap = append(rr.BitMap, uint16(i))
|
||||
} else {
|
||||
return nil, &ParseError{f, "bad WKS BitMap", l}, ""
|
||||
}
|
||||
}
|
||||
rr.BitMap = append(rr.BitMap, uint16(k))
|
||||
@ -1498,11 +1473,8 @@ func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
}
|
||||
rr.Type = uint8(i)
|
||||
<-c // zBlank
|
||||
s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f)
|
||||
if e1 != nil {
|
||||
return nil, e1, c1
|
||||
}
|
||||
rr.FingerPrint = s
|
||||
l = <-c
|
||||
rr.FingerPrint = l.token
|
||||
return rr, nil, ""
|
||||
}
|
||||
|
||||
@ -1622,28 +1594,21 @@ func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
func setNSAP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
rr := new(NSAP)
|
||||
rr.Hdr = h
|
||||
chunks, e1, c1 := endingToTxtSlice(c, "bad NSAP Nsap", f)
|
||||
if e1 != nil {
|
||||
l := <-c
|
||||
if l.length == 0 {
|
||||
return rr, nil, l.comment
|
||||
}
|
||||
i, e := strconv.Atoi(l.token)
|
||||
if e != nil {
|
||||
return nil, &ParseError{f, "bad NSAP Length", l}, ""
|
||||
}
|
||||
rr.Length = uint8(i)
|
||||
<-c // zBlank
|
||||
s, e1, c1 := endingToString(c, "bad NSAP Nsap", f)
|
||||
if e != nil {
|
||||
return nil, e1, c1
|
||||
}
|
||||
// data would come as one string or multiple... Just to ignore possible
|
||||
// variety let's merge things back together and split to actual "words"
|
||||
s := strings.Fields(strings.Join(chunks, " "))
|
||||
if len(s) == 0 {
|
||||
return rr, nil, c1
|
||||
}
|
||||
if len(s[0]) >= 2 && s[0][0:2] == "0x" || s[0][0:2] == "0X" {
|
||||
// although RFC only suggests 0x there is no clarification that X is not allowed
|
||||
rr.Nsap = strings.Join(s, "")[2:]
|
||||
} else {
|
||||
// since we do not know what to do with this data, and, we would not use original length
|
||||
// in formatting, it's moot to check correctness of the length
|
||||
_, err := strconv.Atoi(s[0])
|
||||
if err != nil {
|
||||
return nil, &ParseError{f, "bad NSAP Length", lex{token: s[0]}}, ""
|
||||
}
|
||||
rr.Nsap = strings.Join(s[1:], "")
|
||||
}
|
||||
rr.Nsap = s
|
||||
return rr, nil, c1
|
||||
}
|
||||
|
||||
@ -1799,10 +1764,9 @@ func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
|
||||
return nil, &ParseError{f, "bad TLSA MatchingType", l}, ""
|
||||
}
|
||||
rr.MatchingType = uint8(i)
|
||||
// So this needs be e2 (i.e. different than e), because...??t
|
||||
s, e2, c1 := endingToString(c, "bad TLSA Certificate", f)
|
||||
if e2 != nil {
|
||||
return nil, e2, c1
|
||||
s, e, c1 := endingToString(c, "bad TLSA Certificate", f)
|
||||
if e != nil {
|
||||
return nil, e.(*ParseError), c1
|
||||
}
|
||||
rr.Certificate = s
|
||||
return rr, nil, c1
|
||||
@ -2189,7 +2153,7 @@ var typeToparserFunc = map[uint16]parserFunc{
|
||||
TypeEUI64: parserFunc{setEUI64, false},
|
||||
TypeGID: parserFunc{setGID, false},
|
||||
TypeGPOS: parserFunc{setGPOS, false},
|
||||
TypeHINFO: parserFunc{setHINFO, true},
|
||||
TypeHINFO: parserFunc{setHINFO, false},
|
||||
TypeHIP: parserFunc{setHIP, true},
|
||||
TypeIPSECKEY: parserFunc{setIPSECKEY, true},
|
||||
TypeKX: parserFunc{setKX, false},
|
||||
@ -2225,7 +2189,7 @@ var typeToparserFunc = map[uint16]parserFunc{
|
||||
TypeSOA: parserFunc{setSOA, false},
|
||||
TypeSPF: parserFunc{setSPF, true},
|
||||
TypeSRV: parserFunc{setSRV, false},
|
||||
TypeSSHFP: parserFunc{setSSHFP, true},
|
||||
TypeSSHFP: parserFunc{setSSHFP, false},
|
||||
TypeTALINK: parserFunc{setTALINK, false},
|
||||
TypeTA: parserFunc{setTA, true},
|
||||
TypeTLSA: parserFunc{setTLSA, true},
|
||||
|
413
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
413
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db.go
generated
vendored
@ -63,14 +63,13 @@ type DB struct // DB is a LevelDB database.
|
||||
journalAckC chan error
|
||||
|
||||
// Compaction.
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
compErrC chan error
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compWriteLocking bool
|
||||
compStats []cStats
|
||||
tcompCmdC chan cCmd
|
||||
tcompPauseC chan chan<- struct{}
|
||||
mcompCmdC chan cCmd
|
||||
compErrC chan error
|
||||
compPerErrC chan error
|
||||
compErrSetC chan error
|
||||
compStats []cStats
|
||||
|
||||
// Close.
|
||||
closeW sync.WaitGroup
|
||||
@ -109,44 +108,28 @@ func openDB(s *session) (*DB, error) {
|
||||
closeC: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Read-only mode.
|
||||
readOnly := s.o.GetReadOnly()
|
||||
if err := db.recoverJournal(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if readOnly {
|
||||
// Recover journals (read-only mode).
|
||||
if err := db.recoverJournalRO(); err != nil {
|
||||
return nil, err
|
||||
// Remove any obsolete files.
|
||||
if err := db.checkAndCleanFiles(); err != nil {
|
||||
// Close journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
}
|
||||
} else {
|
||||
// Recover journals.
|
||||
if err := db.recoverJournal(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove any obsolete files.
|
||||
if err := db.checkAndCleanFiles(); err != nil {
|
||||
// Close journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Doesn't need to be included in the wait group.
|
||||
go db.compactionError()
|
||||
go db.mpoolDrain()
|
||||
|
||||
if readOnly {
|
||||
db.SetReadOnly()
|
||||
} else {
|
||||
db.closeW.Add(3)
|
||||
go db.tCompaction()
|
||||
go db.mCompaction()
|
||||
go db.jWriter()
|
||||
}
|
||||
db.closeW.Add(3)
|
||||
go db.tCompaction()
|
||||
go db.mCompaction()
|
||||
go db.jWriter()
|
||||
|
||||
s.logf("db@open done T·%v", time.Since(start))
|
||||
|
||||
@ -292,7 +275,7 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
// We will drop corrupted table.
|
||||
strict = o.GetStrict(opt.StrictRecovery)
|
||||
|
||||
rec = &sessionRecord{}
|
||||
rec = &sessionRecord{numLevel: o.GetNumLevel()}
|
||||
bpool = util.NewBufferPool(o.GetBlockSize() + 5)
|
||||
)
|
||||
buildTable := func(iter iterator.Iterator) (tmp storage.File, size int64, err error) {
|
||||
@ -467,136 +450,132 @@ func recoverTable(s *session, o *opt.Options) error {
|
||||
}
|
||||
|
||||
func (db *DB) recoverJournal() error {
|
||||
// Get all journals and sort it by file number.
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
// Get all tables and sort it by file number.
|
||||
journalFiles_, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files(allJournalFiles).sort()
|
||||
journalFiles := files(journalFiles_)
|
||||
journalFiles.sort()
|
||||
|
||||
// Journals that will be recovered.
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
// Discard older journal.
|
||||
prev := -1
|
||||
for i, file := range journalFiles {
|
||||
if file.Num() >= db.s.stJournalNum {
|
||||
if prev >= 0 {
|
||||
i--
|
||||
journalFiles[i] = journalFiles[prev]
|
||||
}
|
||||
journalFiles = journalFiles[i:]
|
||||
break
|
||||
} else if file.Num() == db.s.stPrevJournalNum {
|
||||
prev = i
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
of storage.File // Obsolete file.
|
||||
rec = &sessionRecord{}
|
||||
)
|
||||
var jr *journal.Reader
|
||||
var of storage.File
|
||||
var mem *memdb.DB
|
||||
batch := new(Batch)
|
||||
cm := newCMem(db.s)
|
||||
buf := new(util.Buffer)
|
||||
// Options.
|
||||
strict := db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum := db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer := db.s.o.GetWriteBuffer()
|
||||
recoverJournal := func(file storage.File) error {
|
||||
db.logf("journal@recovery recovering @%d", file.Num())
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
// Recover journals.
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(recJournalFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
db.s.markFileNum(recJournalFiles[len(recJournalFiles)-1].Num())
|
||||
|
||||
var (
|
||||
// Options.
|
||||
strict = db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer = db.s.o.GetWriteBuffer()
|
||||
|
||||
jr *journal.Reader
|
||||
mdb = memdb.New(db.s.icmp, writeBuffer)
|
||||
buf = &util.Buffer{}
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
|
||||
fr, err := jf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if of != nil {
|
||||
if mdb.Len() > 0 {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, -1); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rec.setJournalNum(jf.Num())
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
rec.resetAddedTables()
|
||||
|
||||
of.Remove()
|
||||
of = nil
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
mdb.Reset()
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
|
||||
// Flush it if large enough.
|
||||
if mdb.Size() >= writeBuffer {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||
fr.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
mdb.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
of = jf
|
||||
// Create/reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(reader, dropper{db.s, file}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(reader, dropper{db.s, file}, strict, checksum)
|
||||
}
|
||||
|
||||
// Flush the last memdb.
|
||||
if mdb.Len() > 0 {
|
||||
if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
|
||||
// Flush memdb and remove obsolete journal file.
|
||||
if of != nil {
|
||||
if mem.Len() > 0 {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := cm.commit(file.Num(), db.seq); err != nil {
|
||||
return err
|
||||
}
|
||||
cm.reset()
|
||||
of.Remove()
|
||||
of = nil
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
mem.Reset()
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return errors.SetFile(err, file)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
} else {
|
||||
return errors.SetFile(err, file)
|
||||
}
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mem); err != nil {
|
||||
if strict || !errors.IsCorrupted(err) {
|
||||
return errors.SetFile(err, file)
|
||||
} else {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
|
||||
// Flush it if large enough.
|
||||
if mem.Size() >= writeBuffer {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
mem.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
of = file
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recover all journals.
|
||||
if len(journalFiles) > 0 {
|
||||
db.logf("journal@recovery F·%d", len(journalFiles))
|
||||
|
||||
// Mark file number as used.
|
||||
db.s.markFileNum(journalFiles[len(journalFiles)-1].Num())
|
||||
|
||||
mem = memdb.New(db.s.icmp, writeBuffer)
|
||||
for _, file := range journalFiles {
|
||||
if err := recoverJournal(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the last journal.
|
||||
if mem.Len() > 0 {
|
||||
if err := cm.flush(mem, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -608,10 +587,8 @@ func (db *DB) recoverJournal() error {
|
||||
}
|
||||
|
||||
// Commit.
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.seq)
|
||||
if err := db.s.commit(rec); err != nil {
|
||||
// Close journal on error.
|
||||
if err := cm.commit(db.journalFile.Num(), db.seq); err != nil {
|
||||
// Close journal.
|
||||
if db.journal != nil {
|
||||
db.journal.Close()
|
||||
db.journalWriter.Close()
|
||||
@ -627,103 +604,6 @@ func (db *DB) recoverJournal() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) recoverJournalRO() error {
|
||||
// Get all journals and sort it by file number.
|
||||
allJournalFiles, err := db.s.getFiles(storage.TypeJournal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files(allJournalFiles).sort()
|
||||
|
||||
// Journals that will be recovered.
|
||||
var recJournalFiles []storage.File
|
||||
for _, jf := range allJournalFiles {
|
||||
if jf.Num() >= db.s.stJournalNum || jf.Num() == db.s.stPrevJournalNum {
|
||||
recJournalFiles = append(recJournalFiles, jf)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Options.
|
||||
strict = db.s.o.GetStrict(opt.StrictJournal)
|
||||
checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
|
||||
writeBuffer = db.s.o.GetWriteBuffer()
|
||||
|
||||
mdb = memdb.New(db.s.icmp, writeBuffer)
|
||||
)
|
||||
|
||||
// Recover journals.
|
||||
if len(recJournalFiles) > 0 {
|
||||
db.logf("journal@recovery RO·Mode F·%d", len(recJournalFiles))
|
||||
|
||||
var (
|
||||
jr *journal.Reader
|
||||
buf = &util.Buffer{}
|
||||
batch = &Batch{}
|
||||
)
|
||||
|
||||
for _, jf := range recJournalFiles {
|
||||
db.logf("journal@recovery recovering @%d", jf.Num())
|
||||
|
||||
fr, err := jf.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create or reset journal reader instance.
|
||||
if jr == nil {
|
||||
jr = journal.NewReader(fr, dropper{db.s, jf}, strict, checksum)
|
||||
} else {
|
||||
jr.Reset(fr, dropper{db.s, jf}, strict, checksum)
|
||||
}
|
||||
|
||||
// Replay journal to memdb.
|
||||
for {
|
||||
r, err := jr.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
if _, err := buf.ReadFrom(r); err != nil {
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
// This is error returned due to corruption, with strict == false.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
if err := batch.memDecodeAndReplay(db.seq, buf.Bytes(), mdb); err != nil {
|
||||
if !strict && errors.IsCorrupted(err) {
|
||||
db.s.logf("journal error: %v (skipped)", err)
|
||||
// We won't apply sequence number as it might be corrupted.
|
||||
continue
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
return errors.SetFile(err, jf)
|
||||
}
|
||||
|
||||
// Save sequence number.
|
||||
db.seq = batch.seq + uint64(batch.Len())
|
||||
}
|
||||
|
||||
fr.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Set memDB.
|
||||
db.mem = &memDB{db: db, DB: mdb, ref: 1}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
|
||||
ikey := newIkey(key, seq, ktSeek)
|
||||
|
||||
@ -734,7 +614,7 @@ func (db *DB) get(key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, er
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
mk, mv, me := m.Find(ikey)
|
||||
mk, mv, me := m.mdb.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
@ -772,7 +652,7 @@ func (db *DB) has(key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err er
|
||||
}
|
||||
defer m.decref()
|
||||
|
||||
mk, _, me := m.Find(ikey)
|
||||
mk, _, me := m.mdb.Find(ikey)
|
||||
if me == nil {
|
||||
ukey, _, kt, kerr := parseIkey(mk)
|
||||
if kerr != nil {
|
||||
@ -904,7 +784,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
|
||||
const prefix = "leveldb."
|
||||
if !strings.HasPrefix(name, prefix) {
|
||||
return "", ErrNotFound
|
||||
return "", errors.New("leveldb: GetProperty: unknown property: " + name)
|
||||
}
|
||||
p := name[len(prefix):]
|
||||
|
||||
@ -918,7 +798,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
var rest string
|
||||
n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
|
||||
if n != 1 || int(level) >= db.s.o.GetNumLevel() {
|
||||
err = ErrNotFound
|
||||
err = errors.New("leveldb: GetProperty: invalid property: " + name)
|
||||
} else {
|
||||
value = fmt.Sprint(v.tLen(int(level)))
|
||||
}
|
||||
@ -957,7 +837,7 @@ func (db *DB) GetProperty(name string) (value string, err error) {
|
||||
case p == "aliveiters":
|
||||
value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
|
||||
default:
|
||||
err = ErrNotFound
|
||||
err = errors.New("leveldb: GetProperty: unknown property: " + name)
|
||||
}
|
||||
|
||||
return
|
||||
@ -1020,9 +900,6 @@ func (db *DB) Close() error {
|
||||
var err error
|
||||
select {
|
||||
case err = <-db.compErrC:
|
||||
if err == ErrReadOnly {
|
||||
err = nil
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
|
114
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
114
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_compaction.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
@ -61,8 +62,58 @@ func (p *cStatsStaging) stopTimer() {
|
||||
}
|
||||
}
|
||||
|
||||
type cMem struct {
|
||||
s *session
|
||||
level int
|
||||
rec *sessionRecord
|
||||
}
|
||||
|
||||
func newCMem(s *session) *cMem {
|
||||
return &cMem{s: s, rec: &sessionRecord{numLevel: s.o.GetNumLevel()}}
|
||||
}
|
||||
|
||||
func (c *cMem) flush(mem *memdb.DB, level int) error {
|
||||
s := c.s
|
||||
|
||||
// Write memdb to table.
|
||||
iter := mem.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
t, n, err := s.tops.createFrom(iter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Pick level.
|
||||
if level < 0 {
|
||||
v := s.version()
|
||||
level = v.pickLevel(t.imin.ukey(), t.imax.ukey())
|
||||
v.release()
|
||||
}
|
||||
c.rec.addTableFile(level, t)
|
||||
|
||||
s.logf("mem@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
|
||||
c.level = level
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *cMem) reset() {
|
||||
c.rec = &sessionRecord{numLevel: c.s.o.GetNumLevel()}
|
||||
}
|
||||
|
||||
func (c *cMem) commit(journal, seq uint64) error {
|
||||
c.rec.setJournalNum(journal)
|
||||
c.rec.setSeqNum(seq)
|
||||
|
||||
// Commit changes.
|
||||
return c.s.commit(c.rec)
|
||||
}
|
||||
|
||||
func (db *DB) compactionError() {
|
||||
var err error
|
||||
var (
|
||||
err error
|
||||
wlocked bool
|
||||
)
|
||||
noerr:
|
||||
// No error.
|
||||
for {
|
||||
@ -70,7 +121,7 @@ noerr:
|
||||
case err = <-db.compErrSetC:
|
||||
switch {
|
||||
case err == nil:
|
||||
case err == ErrReadOnly, errors.IsCorrupted(err):
|
||||
case errors.IsCorrupted(err):
|
||||
goto hasperr
|
||||
default:
|
||||
goto haserr
|
||||
@ -88,7 +139,7 @@ haserr:
|
||||
switch {
|
||||
case err == nil:
|
||||
goto noerr
|
||||
case err == ErrReadOnly, errors.IsCorrupted(err):
|
||||
case errors.IsCorrupted(err):
|
||||
goto hasperr
|
||||
default:
|
||||
}
|
||||
@ -104,9 +155,9 @@ hasperr:
|
||||
case db.compPerErrC <- err:
|
||||
case db.writeLockC <- struct{}{}:
|
||||
// Hold write lock, so that write won't pass-through.
|
||||
db.compWriteLocking = true
|
||||
wlocked = true
|
||||
case _, _ = <-db.closeC:
|
||||
if db.compWriteLocking {
|
||||
if wlocked {
|
||||
// We should release the lock or Close will hang.
|
||||
<-db.writeLockC
|
||||
}
|
||||
@ -236,18 +287,21 @@ func (db *DB) compactionExitTransact() {
|
||||
}
|
||||
|
||||
func (db *DB) memCompaction() {
|
||||
mdb := db.getFrozenMem()
|
||||
if mdb == nil {
|
||||
mem := db.getFrozenMem()
|
||||
if mem == nil {
|
||||
return
|
||||
}
|
||||
defer mdb.decref()
|
||||
defer mem.decref()
|
||||
|
||||
db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
|
||||
c := newCMem(db.s)
|
||||
stats := new(cStatsStaging)
|
||||
|
||||
db.logf("mem@flush N·%d S·%s", mem.mdb.Len(), shortenb(mem.mdb.Size()))
|
||||
|
||||
// Don't compact empty memdb.
|
||||
if mdb.Len() == 0 {
|
||||
db.logf("memdb@flush skipping")
|
||||
// drop frozen memdb
|
||||
if mem.mdb.Len() == 0 {
|
||||
db.logf("mem@flush skipping")
|
||||
// drop frozen mem
|
||||
db.dropFrozenMem()
|
||||
return
|
||||
}
|
||||
@ -263,20 +317,13 @@ func (db *DB) memCompaction() {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
rec = &sessionRecord{}
|
||||
stats = &cStatsStaging{}
|
||||
flushLevel int
|
||||
)
|
||||
|
||||
db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
db.compactionTransactFunc("mem@flush", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
flushLevel, err = db.s.flushMemdb(rec, mdb.DB, -1)
|
||||
stats.stopTimer()
|
||||
return
|
||||
defer stats.stopTimer()
|
||||
return c.flush(mem.mdb, -1)
|
||||
}, func() error {
|
||||
for _, r := range rec.addedTables {
|
||||
db.logf("memdb@flush revert @%d", r.num)
|
||||
for _, r := range c.rec.addedTables {
|
||||
db.logf("mem@flush revert @%d", r.num)
|
||||
f := db.s.getTableFile(r.num)
|
||||
if err := f.Remove(); err != nil {
|
||||
return err
|
||||
@ -285,23 +332,20 @@ func (db *DB) memCompaction() {
|
||||
return nil
|
||||
})
|
||||
|
||||
db.compactionTransactFunc("memdb@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
db.compactionTransactFunc("mem@commit", func(cnt *compactionTransactCounter) (err error) {
|
||||
stats.startTimer()
|
||||
rec.setJournalNum(db.journalFile.Num())
|
||||
rec.setSeqNum(db.frozenSeq)
|
||||
err = db.s.commit(rec)
|
||||
stats.stopTimer()
|
||||
return
|
||||
defer stats.stopTimer()
|
||||
return c.commit(db.journalFile.Num(), db.frozenSeq)
|
||||
}, nil)
|
||||
|
||||
db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
|
||||
db.logf("mem@flush committed F·%d T·%v", len(c.rec.addedTables), stats.duration)
|
||||
|
||||
for _, r := range rec.addedTables {
|
||||
for _, r := range c.rec.addedTables {
|
||||
stats.write += r.size
|
||||
}
|
||||
db.compStats[flushLevel].add(stats)
|
||||
db.compStats[c.level].add(stats)
|
||||
|
||||
// Drop frozen memdb.
|
||||
// Drop frozen mem.
|
||||
db.dropFrozenMem()
|
||||
|
||||
// Resume table compaction.
|
||||
@ -513,7 +557,7 @@ func (b *tableCompactionBuilder) revert() error {
|
||||
func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
|
||||
defer c.release()
|
||||
|
||||
rec := &sessionRecord{}
|
||||
rec := &sessionRecord{numLevel: db.s.o.GetNumLevel()}
|
||||
rec.addCompPtr(c.level, c.imax)
|
||||
|
||||
if !noTrivial && c.trivial() {
|
||||
|
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
4
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_iter.go
generated
vendored
@ -40,11 +40,11 @@ func (db *DB) newRawIterator(slice *util.Range, ro *opt.ReadOptions) iterator.It
|
||||
ti := v.getIterators(slice, ro)
|
||||
n := len(ti) + 2
|
||||
i := make([]iterator.Iterator, 0, n)
|
||||
emi := em.NewIterator(slice)
|
||||
emi := em.mdb.NewIterator(slice)
|
||||
emi.SetReleaser(&memdbReleaser{m: em})
|
||||
i = append(i, emi)
|
||||
if fm != nil {
|
||||
fmi := fm.NewIterator(slice)
|
||||
fmi := fm.mdb.NewIterator(slice)
|
||||
fmi.SetReleaser(&memdbReleaser{m: fm})
|
||||
i = append(i, fmi)
|
||||
}
|
||||
|
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_state.go
generated
vendored
@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
type memDB struct {
|
||||
db *DB
|
||||
*memdb.DB
|
||||
db *DB
|
||||
mdb *memdb.DB
|
||||
ref int32
|
||||
}
|
||||
|
||||
@ -27,12 +27,12 @@ func (m *memDB) incref() {
|
||||
func (m *memDB) decref() {
|
||||
if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
|
||||
// Only put back memdb with std capacity.
|
||||
if m.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.Reset()
|
||||
m.db.mpoolPut(m.DB)
|
||||
if m.mdb.Capacity() == m.db.s.o.GetWriteBuffer() {
|
||||
m.mdb.Reset()
|
||||
m.db.mpoolPut(m.mdb)
|
||||
}
|
||||
m.db = nil
|
||||
m.DB = nil
|
||||
m.mdb = nil
|
||||
} else if ref < 0 {
|
||||
panic("negative memdb ref")
|
||||
}
|
||||
@ -126,7 +126,7 @@ func (db *DB) newMem(n int) (mem *memDB, err error) {
|
||||
}
|
||||
mem = &memDB{
|
||||
db: db,
|
||||
DB: mdb,
|
||||
mdb: mdb,
|
||||
ref: 2,
|
||||
}
|
||||
db.mem = mem
|
||||
|
44
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
44
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_test.go
generated
vendored
@ -2445,7 +2445,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rec := &sessionRecord{}
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
rec.addTableFile(i, tf)
|
||||
if err := s.commit(rec); err != nil {
|
||||
t.Fatal(err)
|
||||
@ -2455,7 +2455,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
// Build grandparent.
|
||||
v := s.version()
|
||||
c := newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
|
||||
rec := &sessionRecord{}
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
b := &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
@ -2479,7 +2479,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
// Build level-1.
|
||||
v = s.version()
|
||||
c = newCompaction(s, v, 0, append(tFiles{}, v.tables[0]...))
|
||||
rec = &sessionRecord{}
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
b = &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
@ -2523,7 +2523,7 @@ func TestDB_TableCompactionBuilder(t *testing.T) {
|
||||
// Compaction with transient error.
|
||||
v = s.version()
|
||||
c = newCompaction(s, v, 1, append(tFiles{}, v.tables[1]...))
|
||||
rec = &sessionRecord{}
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
b = &tableCompactionBuilder{
|
||||
s: s,
|
||||
c: c,
|
||||
@ -2663,39 +2663,3 @@ func TestDB_IterTriggeredCompaction(t *testing.T) {
|
||||
func TestDB_IterTriggeredCompactionHalf(t *testing.T) {
|
||||
testDB_IterTriggeredCompaction(t, 2)
|
||||
}
|
||||
|
||||
func TestDB_ReadOnly(t *testing.T) {
|
||||
h := newDbHarness(t)
|
||||
defer h.close()
|
||||
|
||||
h.put("foo", "v1")
|
||||
h.put("bar", "v2")
|
||||
h.compactMem()
|
||||
|
||||
h.put("xfoo", "v1")
|
||||
h.put("xbar", "v2")
|
||||
|
||||
t.Log("Trigger read-only")
|
||||
if err := h.db.SetReadOnly(); err != nil {
|
||||
h.close()
|
||||
t.Fatalf("SetReadOnly error: %v", err)
|
||||
}
|
||||
|
||||
h.stor.SetEmuErr(storage.TypeAll, tsOpCreate, tsOpReplace, tsOpRemove, tsOpWrite, tsOpWrite, tsOpSync)
|
||||
|
||||
ro := func(key, value, wantValue string) {
|
||||
if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
h.getVal(key, wantValue)
|
||||
}
|
||||
|
||||
ro("foo", "vx", "v1")
|
||||
|
||||
h.o.ReadOnly = true
|
||||
h.reopenDB()
|
||||
|
||||
ro("foo", "vx", "v1")
|
||||
ro("bar", "vx", "v2")
|
||||
h.assertNumKeys(4)
|
||||
}
|
||||
|
72
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
72
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/db_write.go
generated
vendored
@ -63,24 +63,24 @@ func (db *DB) rotateMem(n int) (mem *memDB, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
|
||||
func (db *DB) flush(n int) (mem *memDB, nn int, err error) {
|
||||
delayed := false
|
||||
flush := func() (retry bool) {
|
||||
v := db.s.version()
|
||||
defer v.release()
|
||||
mdb = db.getEffectiveMem()
|
||||
mem = db.getEffectiveMem()
|
||||
defer func() {
|
||||
if retry {
|
||||
mdb.decref()
|
||||
mdb = nil
|
||||
mem.decref()
|
||||
mem = nil
|
||||
}
|
||||
}()
|
||||
mdbFree = mdb.Free()
|
||||
nn = mem.mdb.Free()
|
||||
switch {
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0SlowdownTrigger() && !delayed:
|
||||
delayed = true
|
||||
time.Sleep(time.Millisecond)
|
||||
case mdbFree >= n:
|
||||
case nn >= n:
|
||||
return false
|
||||
case v.tLen(0) >= db.s.o.GetWriteL0PauseTrigger():
|
||||
delayed = true
|
||||
@ -90,15 +90,15 @@ func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
|
||||
}
|
||||
default:
|
||||
// Allow memdb to grow if it has no entry.
|
||||
if mdb.Len() == 0 {
|
||||
mdbFree = n
|
||||
if mem.mdb.Len() == 0 {
|
||||
nn = n
|
||||
} else {
|
||||
mdb.decref()
|
||||
mdb, err = db.rotateMem(n)
|
||||
mem.decref()
|
||||
mem, err = db.rotateMem(n)
|
||||
if err == nil {
|
||||
mdbFree = mdb.Free()
|
||||
nn = mem.mdb.Free()
|
||||
} else {
|
||||
mdbFree = 0
|
||||
nn = 0
|
||||
}
|
||||
}
|
||||
return false
|
||||
@ -157,18 +157,18 @@ func (db *DB) Write(b *Batch, wo *opt.WriteOptions) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
mdb, mdbFree, err := db.flush(b.size())
|
||||
mem, memFree, err := db.flush(b.size())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer mdb.decref()
|
||||
defer mem.decref()
|
||||
|
||||
// Calculate maximum size of the batch.
|
||||
m := 1 << 20
|
||||
if x := b.size(); x <= 128<<10 {
|
||||
m = x + (128 << 10)
|
||||
}
|
||||
m = minInt(m, mdbFree)
|
||||
m = minInt(m, memFree)
|
||||
|
||||
// Merge with other batch.
|
||||
drain:
|
||||
@ -197,7 +197,7 @@ drain:
|
||||
select {
|
||||
case db.journalC <- b:
|
||||
// Write into memdb
|
||||
if berr := b.memReplay(mdb.DB); berr != nil {
|
||||
if berr := b.memReplay(mem.mdb); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
case err = <-db.compPerErrC:
|
||||
@ -211,7 +211,7 @@ drain:
|
||||
case err = <-db.journalAckC:
|
||||
if err != nil {
|
||||
// Revert memdb if error detected
|
||||
if berr := b.revertMemReplay(mdb.DB); berr != nil {
|
||||
if berr := b.revertMemReplay(mem.mdb); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
return
|
||||
@ -225,7 +225,7 @@ drain:
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if berr := b.memReplay(mdb.DB); berr != nil {
|
||||
if berr := b.memReplay(mem.mdb); berr != nil {
|
||||
panic(berr)
|
||||
}
|
||||
}
|
||||
@ -233,7 +233,7 @@ drain:
|
||||
// Set last seq number.
|
||||
db.addSeq(uint64(b.Len()))
|
||||
|
||||
if b.size() >= mdbFree {
|
||||
if b.size() >= memFree {
|
||||
db.rotateMem(0)
|
||||
}
|
||||
return
|
||||
@ -290,9 +290,9 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
}
|
||||
|
||||
// Check for overlaps in memdb.
|
||||
mdb := db.getEffectiveMem()
|
||||
defer mdb.decref()
|
||||
if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
|
||||
mem := db.getEffectiveMem()
|
||||
defer mem.decref()
|
||||
if isMemOverlaps(db.s.icmp, mem.mdb, r.Start, r.Limit) {
|
||||
// Memdb compaction.
|
||||
if _, err := db.rotateMem(0); err != nil {
|
||||
<-db.writeLockC
|
||||
@ -309,31 +309,3 @@ func (db *DB) CompactRange(r util.Range) error {
|
||||
// Table compaction.
|
||||
return db.compSendRange(db.tcompCmdC, -1, r.Start, r.Limit)
|
||||
}
|
||||
|
||||
// SetReadOnly makes DB read-only. It will stay read-only until reopened.
|
||||
func (db *DB) SetReadOnly() error {
|
||||
if err := db.ok(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Lock writer.
|
||||
select {
|
||||
case db.writeLockC <- struct{}{}:
|
||||
db.compWriteLocking = true
|
||||
case err := <-db.compPerErrC:
|
||||
return err
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
// Set compaction read-only.
|
||||
select {
|
||||
case db.compErrSetC <- ErrReadOnly:
|
||||
case perr := <-db.compPerErrC:
|
||||
return perr
|
||||
case _, _ = <-db.closeC:
|
||||
return ErrClosed
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
generated
vendored
1
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.ErrNotFound
|
||||
ErrReadOnly = errors.New("leveldb: read-only mode")
|
||||
ErrSnapshotReleased = errors.New("leveldb: snapshot released")
|
||||
ErrIterReleased = errors.New("leveldb: iterator released")
|
||||
ErrClosed = errors.New("leveldb: closed")
|
||||
|
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
9
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
generated
vendored
@ -206,7 +206,6 @@ func (p *DB) randHeight() (h int) {
|
||||
return
|
||||
}
|
||||
|
||||
// Must hold RW-lock if prev == true, as it use shared prevNode slice.
|
||||
func (p *DB) findGE(key []byte, prev bool) (int, bool) {
|
||||
node := 0
|
||||
h := p.maxHeight - 1
|
||||
@ -303,7 +302,7 @@ func (p *DB) Put(key []byte, value []byte) error {
|
||||
node := len(p.nodeData)
|
||||
p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
|
||||
for i, n := range p.prevNode[:h] {
|
||||
m := n + nNext + i
|
||||
m := n + 4 + i
|
||||
p.nodeData = append(p.nodeData, p.nodeData[m])
|
||||
p.nodeData[m] = node
|
||||
}
|
||||
@ -435,22 +434,20 @@ func (p *DB) Len() int {
|
||||
|
||||
// Reset resets the DB to initial empty state. Allows reuse the buffer.
|
||||
func (p *DB) Reset() {
|
||||
p.mu.Lock()
|
||||
p.rnd = rand.New(rand.NewSource(0xdeadbeef))
|
||||
p.maxHeight = 1
|
||||
p.n = 0
|
||||
p.kvSize = 0
|
||||
p.kvData = p.kvData[:0]
|
||||
p.nodeData = p.nodeData[:nNext+tMaxHeight]
|
||||
p.nodeData = p.nodeData[:4+tMaxHeight]
|
||||
p.nodeData[nKV] = 0
|
||||
p.nodeData[nKey] = 0
|
||||
p.nodeData[nVal] = 0
|
||||
p.nodeData[nHeight] = tMaxHeight
|
||||
for n := 0; n < tMaxHeight; n++ {
|
||||
p.nodeData[nNext+n] = 0
|
||||
p.nodeData[4+n] = 0
|
||||
p.prevNode[n] = 0
|
||||
}
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// New creates a new initalized in-memory key/value DB. The capacity
|
||||
|
31
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
31
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt/options.go
generated
vendored
@ -250,11 +250,6 @@ type Options struct {
|
||||
// The default value (DefaultCompression) uses snappy compression.
|
||||
Compression Compression
|
||||
|
||||
// DisableBufferPool allows disable use of util.BufferPool functionality.
|
||||
//
|
||||
// The default value is false.
|
||||
DisableBufferPool bool
|
||||
|
||||
// DisableBlockCache allows disable use of cache.Cache functionality on
|
||||
// 'sorted table' block.
|
||||
//
|
||||
@ -326,11 +321,6 @@ type Options struct {
|
||||
// The default value is 500.
|
||||
OpenFilesCacheCapacity int
|
||||
|
||||
// If true then opens DB in read-only mode.
|
||||
//
|
||||
// The default value is false.
|
||||
ReadOnly bool
|
||||
|
||||
// Strict defines the DB strict level.
|
||||
Strict Strict
|
||||
|
||||
@ -482,20 +472,6 @@ func (o *Options) GetCompression() Compression {
|
||||
return o.Compression
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableBufferPool() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.DisableBufferPool
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableBlockCache() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.DisableBlockCache
|
||||
}
|
||||
|
||||
func (o *Options) GetDisableCompactionBackoff() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
@ -572,13 +548,6 @@ func (o *Options) GetOpenFilesCacheCapacity() int {
|
||||
return o.OpenFilesCacheCapacity
|
||||
}
|
||||
|
||||
func (o *Options) GetReadOnly() bool {
|
||||
if o == nil {
|
||||
return false
|
||||
}
|
||||
return o.ReadOnly
|
||||
}
|
||||
|
||||
func (o *Options) GetStrict(strict Strict) bool {
|
||||
if o == nil || o.Strict == 0 {
|
||||
return DefaultStrict&strict != 0
|
||||
|
264
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
264
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session.go
generated
vendored
@ -11,8 +11,10 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/journal"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage"
|
||||
@ -125,16 +127,11 @@ func (s *session) recover() (err error) {
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
strict := s.o.GetStrict(opt.StrictManifest)
|
||||
jr := journal.NewReader(reader, dropper{s, m}, strict, true)
|
||||
|
||||
var (
|
||||
// Options.
|
||||
numLevel = s.o.GetNumLevel()
|
||||
strict = s.o.GetStrict(opt.StrictManifest)
|
||||
|
||||
jr = journal.NewReader(reader, dropper{s, m}, strict, true)
|
||||
rec = &sessionRecord{}
|
||||
staging = s.stVersion.newStaging()
|
||||
)
|
||||
staging := s.stVersion.newStaging()
|
||||
rec := &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
for {
|
||||
var r io.Reader
|
||||
r, err = jr.Next()
|
||||
@ -146,7 +143,7 @@ func (s *session) recover() (err error) {
|
||||
return errors.SetFile(err, m)
|
||||
}
|
||||
|
||||
err = rec.decode(r, numLevel)
|
||||
err = rec.decode(r)
|
||||
if err == nil {
|
||||
// save compact pointers
|
||||
for _, r := range rec.compPtrs {
|
||||
@ -209,3 +206,250 @@ func (s *session) commit(r *sessionRecord) (err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
v := s.version()
|
||||
|
||||
var level int
|
||||
var t0 tFiles
|
||||
if v.cScore >= 1 {
|
||||
level = v.cLevel
|
||||
cptr := s.stCompPtrs[level]
|
||||
tables := v.tables[level]
|
||||
for _, t := range tables {
|
||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
||||
t0 = append(t0, t)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(t0) == 0 {
|
||||
t0 = append(t0, tables[0])
|
||||
}
|
||||
} else {
|
||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
||||
ts := (*tSet)(p)
|
||||
level = ts.level
|
||||
t0 = append(t0, ts.table)
|
||||
} else {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
// Create compaction from given level and range; need external synchronization.
|
||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
v := s.version()
|
||||
|
||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
||||
if len(t0) == 0 {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid compacting too much in one shot in case the range is large.
|
||||
// But we cannot do this for level-0 since level-0 files can overlap
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if level > 0 {
|
||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
||||
total := uint64(0)
|
||||
for i, t := range t0 {
|
||||
total += t.size
|
||||
if total >= limit {
|
||||
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
|
||||
t0 = t0[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
||||
c := &compaction{
|
||||
s: s,
|
||||
v: v,
|
||||
level: level,
|
||||
tables: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
||||
}
|
||||
c.expand()
|
||||
c.save()
|
||||
return c
|
||||
}
|
||||
|
||||
// compaction represent a compaction state.
|
||||
type compaction struct {
|
||||
s *session
|
||||
v *version
|
||||
|
||||
level int
|
||||
tables [2]tFiles
|
||||
maxGPOverlaps uint64
|
||||
|
||||
gp tFiles
|
||||
gpi int
|
||||
seenKey bool
|
||||
gpOverlappedBytes uint64
|
||||
imin, imax iKey
|
||||
tPtrs []int
|
||||
released bool
|
||||
|
||||
snapGPI int
|
||||
snapSeenKey bool
|
||||
snapGPOverlappedBytes uint64
|
||||
snapTPtrs []int
|
||||
}
|
||||
|
||||
func (c *compaction) save() {
|
||||
c.snapGPI = c.gpi
|
||||
c.snapSeenKey = c.seenKey
|
||||
c.snapGPOverlappedBytes = c.gpOverlappedBytes
|
||||
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) restore() {
|
||||
c.gpi = c.snapGPI
|
||||
c.seenKey = c.snapSeenKey
|
||||
c.gpOverlappedBytes = c.snapGPOverlappedBytes
|
||||
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) release() {
|
||||
if !c.released {
|
||||
c.released = true
|
||||
c.v.release()
|
||||
}
|
||||
}
|
||||
|
||||
// Expand compacted tables; need external synchronization.
|
||||
func (c *compaction) expand() {
|
||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
imin, imax := t0.getRange(c.s.icmp)
|
||||
// We expand t0 here just incase ukey hop across tables.
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
||||
if len(t0) != len(c.tables[0]) {
|
||||
imin, imax = t0.getRange(c.s.icmp)
|
||||
}
|
||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
||||
// Get entire range covered by compaction.
|
||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
||||
if len(exp1) == len(t1) {
|
||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
imin, imax = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
amin, amax = append(t0, t1...).getRange(c.s.icmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if c.level+2 < c.s.o.GetNumLevel() {
|
||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
c.imin, c.imax = imin, imax
|
||||
}
|
||||
|
||||
// Check whether compaction is trivial.
|
||||
func (c *compaction) trivial() bool {
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
}
|
||||
|
||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
for level, tables := range c.v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tables) {
|
||||
t := tables[c.tPtrs[level]]
|
||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
||||
// We've advanced far enough.
|
||||
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level.
|
||||
return false
|
||||
}
|
||||
break
|
||||
}
|
||||
c.tPtrs[level]++
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
||||
gp := c.gp[c.gpi]
|
||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
||||
break
|
||||
}
|
||||
if c.seenKey {
|
||||
c.gpOverlappedBytes += gp.size
|
||||
}
|
||||
}
|
||||
c.seenKey = true
|
||||
|
||||
if c.gpOverlappedBytes > c.maxGPOverlaps {
|
||||
// Too much overlap for current output; start new output.
|
||||
c.gpOverlappedBytes = 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Creates an iterator.
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
// Creates iterator slice.
|
||||
icap := len(c.tables)
|
||||
if c.level == 0 {
|
||||
// Special case for level-0
|
||||
icap = len(c.tables[0]) + 1
|
||||
}
|
||||
its := make([]iterator.Iterator, 0, icap)
|
||||
|
||||
// Options.
|
||||
ro := &opt.ReadOptions{
|
||||
DontFillCache: true,
|
||||
Strict: opt.StrictOverride,
|
||||
}
|
||||
strict := c.s.o.GetStrict(opt.StrictCompaction)
|
||||
if strict {
|
||||
ro.Strict |= opt.StrictReader
|
||||
}
|
||||
|
||||
for i, tables := range c.tables {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Level-0 is not sorted and may overlaps each other.
|
||||
if c.level+i == 0 {
|
||||
for _, t := range tables {
|
||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
} else {
|
||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
|
||||
its = append(its, it)
|
||||
}
|
||||
}
|
||||
|
||||
return iterator.NewMergedIterator(its, c.s.icmp, strict)
|
||||
}
|
||||
|
287
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
generated
vendored
287
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_compaction.go
generated
vendored
@ -1,287 +0,0 @@
|
||||
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
|
||||
// All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/memdb"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/opt"
|
||||
)
|
||||
|
||||
func (s *session) pickMemdbLevel(umin, umax []byte) int {
|
||||
v := s.version()
|
||||
defer v.release()
|
||||
return v.pickMemdbLevel(umin, umax)
|
||||
}
|
||||
|
||||
func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, level int) (level_ int, err error) {
|
||||
// Create sorted table.
|
||||
iter := mdb.NewIterator(nil)
|
||||
defer iter.Release()
|
||||
t, n, err := s.tops.createFrom(iter)
|
||||
if err != nil {
|
||||
return level, err
|
||||
}
|
||||
|
||||
// Pick level and add to record.
|
||||
if level < 0 {
|
||||
level = s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey())
|
||||
}
|
||||
rec.addTableFile(level, t)
|
||||
|
||||
s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", level, t.file.Num(), n, shortenb(int(t.size)), t.imin, t.imax)
|
||||
return level, nil
|
||||
}
|
||||
|
||||
// Pick a compaction based on current state; need external synchronization.
|
||||
func (s *session) pickCompaction() *compaction {
|
||||
v := s.version()
|
||||
|
||||
var level int
|
||||
var t0 tFiles
|
||||
if v.cScore >= 1 {
|
||||
level = v.cLevel
|
||||
cptr := s.stCompPtrs[level]
|
||||
tables := v.tables[level]
|
||||
for _, t := range tables {
|
||||
if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
|
||||
t0 = append(t0, t)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(t0) == 0 {
|
||||
t0 = append(t0, tables[0])
|
||||
}
|
||||
} else {
|
||||
if p := atomic.LoadPointer(&v.cSeek); p != nil {
|
||||
ts := (*tSet)(p)
|
||||
level = ts.level
|
||||
t0 = append(t0, ts.table)
|
||||
} else {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
// Create compaction from given level and range; need external synchronization.
|
||||
func (s *session) getCompactionRange(level int, umin, umax []byte) *compaction {
|
||||
v := s.version()
|
||||
|
||||
t0 := v.tables[level].getOverlaps(nil, s.icmp, umin, umax, level == 0)
|
||||
if len(t0) == 0 {
|
||||
v.release()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Avoid compacting too much in one shot in case the range is large.
|
||||
// But we cannot do this for level-0 since level-0 files can overlap
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if level > 0 {
|
||||
limit := uint64(v.s.o.GetCompactionSourceLimit(level))
|
||||
total := uint64(0)
|
||||
for i, t := range t0 {
|
||||
total += t.size
|
||||
if total >= limit {
|
||||
s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
|
||||
t0 = t0[:i+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newCompaction(s, v, level, t0)
|
||||
}
|
||||
|
||||
func newCompaction(s *session, v *version, level int, t0 tFiles) *compaction {
|
||||
c := &compaction{
|
||||
s: s,
|
||||
v: v,
|
||||
level: level,
|
||||
tables: [2]tFiles{t0, nil},
|
||||
maxGPOverlaps: uint64(s.o.GetCompactionGPOverlaps(level)),
|
||||
tPtrs: make([]int, s.o.GetNumLevel()),
|
||||
}
|
||||
c.expand()
|
||||
c.save()
|
||||
return c
|
||||
}
|
||||
|
||||
// compaction represent a compaction state.
|
||||
type compaction struct {
|
||||
s *session
|
||||
v *version
|
||||
|
||||
level int
|
||||
tables [2]tFiles
|
||||
maxGPOverlaps uint64
|
||||
|
||||
gp tFiles
|
||||
gpi int
|
||||
seenKey bool
|
||||
gpOverlappedBytes uint64
|
||||
imin, imax iKey
|
||||
tPtrs []int
|
||||
released bool
|
||||
|
||||
snapGPI int
|
||||
snapSeenKey bool
|
||||
snapGPOverlappedBytes uint64
|
||||
snapTPtrs []int
|
||||
}
|
||||
|
||||
func (c *compaction) save() {
|
||||
c.snapGPI = c.gpi
|
||||
c.snapSeenKey = c.seenKey
|
||||
c.snapGPOverlappedBytes = c.gpOverlappedBytes
|
||||
c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) restore() {
|
||||
c.gpi = c.snapGPI
|
||||
c.seenKey = c.snapSeenKey
|
||||
c.gpOverlappedBytes = c.snapGPOverlappedBytes
|
||||
c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
|
||||
}
|
||||
|
||||
func (c *compaction) release() {
|
||||
if !c.released {
|
||||
c.released = true
|
||||
c.v.release()
|
||||
}
|
||||
}
|
||||
|
||||
// Expand compacted tables; need external synchronization.
|
||||
func (c *compaction) expand() {
|
||||
limit := uint64(c.s.o.GetCompactionExpandLimit(c.level))
|
||||
vt0, vt1 := c.v.tables[c.level], c.v.tables[c.level+1]
|
||||
|
||||
t0, t1 := c.tables[0], c.tables[1]
|
||||
imin, imax := t0.getRange(c.s.icmp)
|
||||
// We expand t0 here just incase ukey hop across tables.
|
||||
t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.level == 0)
|
||||
if len(t0) != len(c.tables[0]) {
|
||||
imin, imax = t0.getRange(c.s.icmp)
|
||||
}
|
||||
t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
|
||||
// Get entire range covered by compaction.
|
||||
amin, amax := append(t0, t1...).getRange(c.s.icmp)
|
||||
|
||||
// See if we can grow the number of inputs in "level" without
|
||||
// changing the number of "level+1" files we pick up.
|
||||
if len(t1) > 0 {
|
||||
exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.level == 0)
|
||||
if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
|
||||
xmin, xmax := exp0.getRange(c.s.icmp)
|
||||
exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
|
||||
if len(exp1) == len(t1) {
|
||||
c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
|
||||
c.level, c.level+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
|
||||
len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
|
||||
imin, imax = xmin, xmax
|
||||
t0, t1 = exp0, exp1
|
||||
amin, amax = append(t0, t1...).getRange(c.s.icmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the set of grandparent files that overlap this compaction
|
||||
// (parent == level+1; grandparent == level+2)
|
||||
if c.level+2 < c.s.o.GetNumLevel() {
|
||||
c.gp = c.v.tables[c.level+2].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
|
||||
}
|
||||
|
||||
c.tables[0], c.tables[1] = t0, t1
|
||||
c.imin, c.imax = imin, imax
|
||||
}
|
||||
|
||||
// Check whether compaction is trivial.
|
||||
func (c *compaction) trivial() bool {
|
||||
return len(c.tables[0]) == 1 && len(c.tables[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
|
||||
}
|
||||
|
||||
func (c *compaction) baseLevelForKey(ukey []byte) bool {
|
||||
for level, tables := range c.v.tables[c.level+2:] {
|
||||
for c.tPtrs[level] < len(tables) {
|
||||
t := tables[c.tPtrs[level]]
|
||||
if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
|
||||
// We've advanced far enough.
|
||||
if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
|
||||
// Key falls in this file's range, so definitely not base level.
|
||||
return false
|
||||
}
|
||||
break
|
||||
}
|
||||
c.tPtrs[level]++
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *compaction) shouldStopBefore(ikey iKey) bool {
|
||||
for ; c.gpi < len(c.gp); c.gpi++ {
|
||||
gp := c.gp[c.gpi]
|
||||
if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
|
||||
break
|
||||
}
|
||||
if c.seenKey {
|
||||
c.gpOverlappedBytes += gp.size
|
||||
}
|
||||
}
|
||||
c.seenKey = true
|
||||
|
||||
if c.gpOverlappedBytes > c.maxGPOverlaps {
|
||||
// Too much overlap for current output; start new output.
|
||||
c.gpOverlappedBytes = 0
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Creates an iterator.
|
||||
func (c *compaction) newIterator() iterator.Iterator {
|
||||
// Creates iterator slice.
|
||||
icap := len(c.tables)
|
||||
if c.level == 0 {
|
||||
// Special case for level-0.
|
||||
icap = len(c.tables[0]) + 1
|
||||
}
|
||||
its := make([]iterator.Iterator, 0, icap)
|
||||
|
||||
// Options.
|
||||
ro := &opt.ReadOptions{
|
||||
DontFillCache: true,
|
||||
Strict: opt.StrictOverride,
|
||||
}
|
||||
strict := c.s.o.GetStrict(opt.StrictCompaction)
|
||||
if strict {
|
||||
ro.Strict |= opt.StrictReader
|
||||
}
|
||||
|
||||
for i, tables := range c.tables {
|
||||
if len(tables) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Level-0 is not sorted and may overlaps each other.
|
||||
if c.level+i == 0 {
|
||||
for _, t := range tables {
|
||||
its = append(its, c.s.tops.newIterator(t, nil, ro))
|
||||
}
|
||||
} else {
|
||||
it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
|
||||
its = append(its, it)
|
||||
}
|
||||
}
|
||||
|
||||
return iterator.NewMergedIterator(its, c.s.icmp, strict)
|
||||
}
|
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
generated
vendored
14
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record.go
generated
vendored
@ -52,6 +52,8 @@ type dtRecord struct {
|
||||
}
|
||||
|
||||
type sessionRecord struct {
|
||||
numLevel int
|
||||
|
||||
hasRec int
|
||||
comparer string
|
||||
journalNum uint64
|
||||
@ -228,7 +230,7 @@ func (p *sessionRecord) readBytes(field string, r byteReader) []byte {
|
||||
return x
|
||||
}
|
||||
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) int {
|
||||
func (p *sessionRecord) readLevel(field string, r io.ByteReader) int {
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
@ -236,14 +238,14 @@ func (p *sessionRecord) readLevel(field string, r io.ByteReader, numLevel int) i
|
||||
if p.err != nil {
|
||||
return 0
|
||||
}
|
||||
if x >= uint64(numLevel) {
|
||||
if x >= uint64(p.numLevel) {
|
||||
p.err = errors.NewErrCorrupted(nil, &ErrManifestCorrupted{field, "invalid level number"})
|
||||
return 0
|
||||
}
|
||||
return int(x)
|
||||
}
|
||||
|
||||
func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
func (p *sessionRecord) decode(r io.Reader) error {
|
||||
br, ok := r.(byteReader)
|
||||
if !ok {
|
||||
br = bufio.NewReader(r)
|
||||
@ -284,13 +286,13 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
p.setSeqNum(x)
|
||||
}
|
||||
case recCompPtr:
|
||||
level := p.readLevel("comp-ptr.level", br, numLevel)
|
||||
level := p.readLevel("comp-ptr.level", br)
|
||||
ikey := p.readBytes("comp-ptr.ikey", br)
|
||||
if p.err == nil {
|
||||
p.addCompPtr(level, iKey(ikey))
|
||||
}
|
||||
case recAddTable:
|
||||
level := p.readLevel("add-table.level", br, numLevel)
|
||||
level := p.readLevel("add-table.level", br)
|
||||
num := p.readUvarint("add-table.num", br)
|
||||
size := p.readUvarint("add-table.size", br)
|
||||
imin := p.readBytes("add-table.imin", br)
|
||||
@ -299,7 +301,7 @@ func (p *sessionRecord) decode(r io.Reader, numLevel int) error {
|
||||
p.addTable(level, num, size, imin, imax)
|
||||
}
|
||||
case recDelTable:
|
||||
level := p.readLevel("del-table.level", br, numLevel)
|
||||
level := p.readLevel("del-table.level", br)
|
||||
num := p.readUvarint("del-table.num", br)
|
||||
if p.err == nil {
|
||||
p.delTable(level, num)
|
||||
|
6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
generated
vendored
6
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_record_test.go
generated
vendored
@ -19,8 +19,8 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
v2 := &sessionRecord{}
|
||||
err = v.decode(b, opt.DefaultNumLevel)
|
||||
v2 := &sessionRecord{numLevel: opt.DefaultNumLevel}
|
||||
err = v.decode(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@ -34,7 +34,7 @@ func decodeEncode(v *sessionRecord) (res bool, err error) {
|
||||
|
||||
func TestSessionRecord_EncodeDecode(t *testing.T) {
|
||||
big := uint64(1) << 50
|
||||
v := &sessionRecord{}
|
||||
v := &sessionRecord{numLevel: opt.DefaultNumLevel}
|
||||
i := uint64(0)
|
||||
test := func() {
|
||||
res, err := decodeEncode(v)
|
||||
|
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/session_util.go
generated
vendored
@ -180,7 +180,7 @@ func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
|
||||
defer v.release()
|
||||
}
|
||||
if rec == nil {
|
||||
rec = &sessionRecord{}
|
||||
rec = &sessionRecord{numLevel: s.o.GetNumLevel()}
|
||||
}
|
||||
s.fillRecord(rec, true)
|
||||
v.fillRecord(rec)
|
||||
|
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
10
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/storage_test.go
generated
vendored
@ -42,8 +42,6 @@ type tsOp uint
|
||||
const (
|
||||
tsOpOpen tsOp = iota
|
||||
tsOpCreate
|
||||
tsOpReplace
|
||||
tsOpRemove
|
||||
tsOpRead
|
||||
tsOpReadAt
|
||||
tsOpWrite
|
||||
@ -243,10 +241,6 @@ func (tf tsFile) Replace(newfile storage.File) (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if tf.shouldErr(tsOpReplace) {
|
||||
err = errors.New("leveldb.testStorage: emulated create error")
|
||||
return
|
||||
}
|
||||
err = tf.File.Replace(newfile.(tsFile).File)
|
||||
if err != nil {
|
||||
ts.t.Errorf("E: cannot replace file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
||||
@ -264,10 +258,6 @@ func (tf tsFile) Remove() (err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if tf.shouldErr(tsOpRemove) {
|
||||
err = errors.New("leveldb.testStorage: emulated create error")
|
||||
return
|
||||
}
|
||||
err = tf.File.Remove()
|
||||
if err != nil {
|
||||
ts.t.Errorf("E: cannot remove file, num=%d type=%v: %v", tf.Num(), tf.Type(), err)
|
||||
|
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
8
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/table.go
generated
vendored
@ -441,26 +441,22 @@ func newTableOps(s *session) *tOps {
|
||||
var (
|
||||
cacher cache.Cacher
|
||||
bcache *cache.Cache
|
||||
bpool *util.BufferPool
|
||||
)
|
||||
if s.o.GetOpenFilesCacheCapacity() > 0 {
|
||||
cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
|
||||
}
|
||||
if !s.o.GetDisableBlockCache() {
|
||||
if !s.o.DisableBlockCache {
|
||||
var bcacher cache.Cacher
|
||||
if s.o.GetBlockCacheCapacity() > 0 {
|
||||
bcacher = cache.NewLRU(s.o.GetBlockCacheCapacity())
|
||||
}
|
||||
bcache = cache.NewCache(bcacher)
|
||||
}
|
||||
if !s.o.GetDisableBufferPool() {
|
||||
bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
|
||||
}
|
||||
return &tOps{
|
||||
s: s,
|
||||
cache: cache.NewCache(cacher),
|
||||
bcache: bcache,
|
||||
bpool: bpool,
|
||||
bpool: util.NewBufferPool(s.o.GetBlockSize() + 5),
|
||||
}
|
||||
}
|
||||
|
||||
|
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
2
Godeps/_workspace/src/github.com/syndtr/goleveldb/leveldb/version.go
generated
vendored
@ -300,7 +300,7 @@ func (v *version) offsetOf(ikey iKey) (n uint64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (v *version) pickMemdbLevel(umin, umax []byte) (level int) {
|
||||
func (v *version) pickLevel(umin, umax []byte) (level int) {
|
||||
if !v.tables[0].overlaps(v.s.icmp, umin, umax, true) {
|
||||
var overlaps tFiles
|
||||
maxLevel := v.s.o.GetMaxMemCompationLevel()
|
||||
|
13
Godeps/_workspace/src/github.com/whyrusleeping/iptb/main.go
generated
vendored
13
Godeps/_workspace/src/github.com/whyrusleeping/iptb/main.go
generated
vendored
@ -12,15 +12,13 @@ import (
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
serial "github.com/ipfs/go-ipfs/repo/fsrepo/serialize"
|
||||
|
||||
ma "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
|
||||
manet "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
|
||||
serial "github.com/ipfs/go-ipfs/repo/fsrepo/serialize"
|
||||
)
|
||||
|
||||
// GetNumNodes returns the number of testbed nodes configured in the testbed directory
|
||||
@ -72,7 +70,6 @@ type initCfg struct {
|
||||
Force bool
|
||||
Bootstrap string
|
||||
PortStart int
|
||||
Mdns bool
|
||||
}
|
||||
|
||||
func (c *initCfg) swarmAddrForPeer(i int) string {
|
||||
@ -147,7 +144,6 @@ func starBootstrap(icfg *initCfg) error {
|
||||
bcfg.Addresses.Swarm = []string{icfg.swarmAddrForPeer(0)}
|
||||
bcfg.Addresses.API = icfg.apiAddrForPeer(0)
|
||||
bcfg.Addresses.Gateway = ""
|
||||
bcfg.Discovery.MDNS.Enabled = icfg.Mdns
|
||||
err = serial.WriteConfigFile(cfgpath, bcfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -160,11 +156,8 @@ func starBootstrap(icfg *initCfg) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ba := fmt.Sprintf("%s/ipfs/%s", bcfg.Addresses.Swarm[0], bcfg.Identity.PeerID)
|
||||
ba = strings.Replace(ba, "0.0.0.0", "127.0.0.1", -1)
|
||||
cfg.Bootstrap = []string{ba}
|
||||
cfg.Bootstrap = []string{fmt.Sprintf("%s/ipfs/%s", bcfg.Addresses.Swarm[0], bcfg.Identity.PeerID)}
|
||||
cfg.Addresses.Gateway = ""
|
||||
cfg.Discovery.MDNS.Enabled = icfg.Mdns
|
||||
cfg.Addresses.Swarm = []string{
|
||||
icfg.swarmAddrForPeer(i),
|
||||
}
|
||||
@ -189,7 +182,6 @@ func clearBootstrapping(icfg *initCfg) error {
|
||||
cfg.Addresses.Gateway = ""
|
||||
cfg.Addresses.Swarm = []string{icfg.swarmAddrForPeer(i)}
|
||||
cfg.Addresses.API = icfg.apiAddrForPeer(i)
|
||||
cfg.Discovery.MDNS.Enabled = icfg.Mdns
|
||||
err = serial.WriteConfigFile(cfgpath, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -448,7 +440,6 @@ func main() {
|
||||
flag.IntVar(&cfg.Count, "n", 0, "number of ipfs nodes to initialize")
|
||||
flag.IntVar(&cfg.PortStart, "p", 4002, "port to start allocations from")
|
||||
flag.BoolVar(&cfg.Force, "f", false, "force initialization (overwrite existing configs)")
|
||||
flag.BoolVar(&cfg.Mdns, "mdns", false, "turn on mdns for nodes")
|
||||
flag.StringVar(&cfg.Bootstrap, "bootstrap", "star", "select bootstrapping style for cluster")
|
||||
|
||||
wait := flag.Bool("wait", false, "wait for nodes to come fully online before exiting")
|
||||
|
78
Godeps/_workspace/src/golang.org/x/net/html/atom/atom.go
generated
vendored
78
Godeps/_workspace/src/golang.org/x/net/html/atom/atom.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package atom provides integer codes (also known as atoms) for a fixed set of
|
||||
// frequently occurring HTML strings: tag names and attribute keys such as "p"
|
||||
// and "id".
|
||||
//
|
||||
// Sharing an atom's name between all elements with the same tag can result in
|
||||
// fewer string allocations when tokenizing and parsing HTML. Integer
|
||||
// comparisons are also generally faster than string comparisons.
|
||||
//
|
||||
// The value of an atom's particular code is not guaranteed to stay the same
|
||||
// between versions of this package. Neither is any ordering guaranteed:
|
||||
// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to
|
||||
// be dense. The only guarantees are that e.g. looking up "div" will yield
|
||||
// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0.
|
||||
package atom
|
||||
|
||||
// Atom is an integer code for a string. The zero value maps to "".
|
||||
type Atom uint32
|
||||
|
||||
// String returns the atom's name.
|
||||
func (a Atom) String() string {
|
||||
start := uint32(a >> 8)
|
||||
n := uint32(a & 0xff)
|
||||
if start+n > uint32(len(atomText)) {
|
||||
return ""
|
||||
}
|
||||
return atomText[start : start+n]
|
||||
}
|
||||
|
||||
func (a Atom) string() string {
|
||||
return atomText[a>>8 : a>>8+a&0xff]
|
||||
}
|
||||
|
||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||
func fnv(h uint32, s []byte) uint32 {
|
||||
for i := range s {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func match(s string, t []byte) bool {
|
||||
for i, c := range t {
|
||||
if s[i] != c {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Lookup returns the atom whose name is s. It returns zero if there is no
|
||||
// such atom. The lookup is case sensitive.
|
||||
func Lookup(s []byte) Atom {
|
||||
if len(s) == 0 || len(s) > maxAtomLen {
|
||||
return 0
|
||||
}
|
||||
h := fnv(hash0, s)
|
||||
if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
||||
return a
|
||||
}
|
||||
if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) {
|
||||
return a
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// String returns a string whose contents are equal to s. In that sense, it is
|
||||
// equivalent to string(s) but may be more efficient.
|
||||
func String(s []byte) string {
|
||||
if a := Lookup(s); a != 0 {
|
||||
return a.String()
|
||||
}
|
||||
return string(s)
|
||||
}
|
109
Godeps/_workspace/src/golang.org/x/net/html/atom/atom_test.go
generated
vendored
109
Godeps/_workspace/src/golang.org/x/net/html/atom/atom_test.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package atom
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestKnown(t *testing.T) {
|
||||
for _, s := range testAtomList {
|
||||
if atom := Lookup([]byte(s)); atom.String() != s {
|
||||
t.Errorf("Lookup(%q) = %#x (%q)", s, uint32(atom), atom.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHits(t *testing.T) {
|
||||
for _, a := range table {
|
||||
if a == 0 {
|
||||
continue
|
||||
}
|
||||
got := Lookup([]byte(a.String()))
|
||||
if got != a {
|
||||
t.Errorf("Lookup(%q) = %#x, want %#x", a.String(), uint32(got), uint32(a))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMisses(t *testing.T) {
|
||||
testCases := []string{
|
||||
"",
|
||||
"\x00",
|
||||
"\xff",
|
||||
"A",
|
||||
"DIV",
|
||||
"Div",
|
||||
"dIV",
|
||||
"aa",
|
||||
"a\x00",
|
||||
"ab",
|
||||
"abb",
|
||||
"abbr0",
|
||||
"abbr ",
|
||||
" abbr",
|
||||
" a",
|
||||
"acceptcharset",
|
||||
"acceptCharset",
|
||||
"accept_charset",
|
||||
"h0",
|
||||
"h1h2",
|
||||
"h7",
|
||||
"onClick",
|
||||
"λ",
|
||||
// The following string has the same hash (0xa1d7fab7) as "onmouseover".
|
||||
"\x00\x00\x00\x00\x00\x50\x18\xae\x38\xd0\xb7",
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
got := Lookup([]byte(tc))
|
||||
if got != 0 {
|
||||
t.Errorf("Lookup(%q): got %d, want 0", tc, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForeignObject(t *testing.T) {
|
||||
const (
|
||||
afo = Foreignobject
|
||||
afO = ForeignObject
|
||||
sfo = "foreignobject"
|
||||
sfO = "foreignObject"
|
||||
)
|
||||
if got := Lookup([]byte(sfo)); got != afo {
|
||||
t.Errorf("Lookup(%q): got %#v, want %#v", sfo, got, afo)
|
||||
}
|
||||
if got := Lookup([]byte(sfO)); got != afO {
|
||||
t.Errorf("Lookup(%q): got %#v, want %#v", sfO, got, afO)
|
||||
}
|
||||
if got := afo.String(); got != sfo {
|
||||
t.Errorf("Atom(%#v).String(): got %q, want %q", afo, got, sfo)
|
||||
}
|
||||
if got := afO.String(); got != sfO {
|
||||
t.Errorf("Atom(%#v).String(): got %q, want %q", afO, got, sfO)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLookup(b *testing.B) {
|
||||
sortedTable := make([]string, 0, len(table))
|
||||
for _, a := range table {
|
||||
if a != 0 {
|
||||
sortedTable = append(sortedTable, a.String())
|
||||
}
|
||||
}
|
||||
sort.Strings(sortedTable)
|
||||
|
||||
x := make([][]byte, 1000)
|
||||
for i := range x {
|
||||
x[i] = []byte(sortedTable[i%len(sortedTable)])
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for _, s := range x {
|
||||
Lookup(s)
|
||||
}
|
||||
}
|
||||
}
|
648
Godeps/_workspace/src/golang.org/x/net/html/atom/gen.go
generated
vendored
648
Godeps/_workspace/src/golang.org/x/net/html/atom/gen.go
generated
vendored
@ -1,648 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// This program generates table.go and table_test.go.
|
||||
// Invoke as
|
||||
//
|
||||
// go run gen.go |gofmt >table.go
|
||||
// go run gen.go -test |gofmt >table_test.go
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// identifier converts s to a Go exported identifier.
|
||||
// It converts "div" to "Div" and "accept-charset" to "AcceptCharset".
|
||||
func identifier(s string) string {
|
||||
b := make([]byte, 0, len(s))
|
||||
cap := true
|
||||
for _, c := range s {
|
||||
if c == '-' {
|
||||
cap = true
|
||||
continue
|
||||
}
|
||||
if cap && 'a' <= c && c <= 'z' {
|
||||
c -= 'a' - 'A'
|
||||
}
|
||||
cap = false
|
||||
b = append(b, byte(c))
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
var test = flag.Bool("test", false, "generate table_test.go")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
var all []string
|
||||
all = append(all, elements...)
|
||||
all = append(all, attributes...)
|
||||
all = append(all, eventHandlers...)
|
||||
all = append(all, extra...)
|
||||
sort.Strings(all)
|
||||
|
||||
if *test {
|
||||
fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n")
|
||||
fmt.Printf("package atom\n\n")
|
||||
fmt.Printf("var testAtomList = []string{\n")
|
||||
for _, s := range all {
|
||||
fmt.Printf("\t%q,\n", s)
|
||||
}
|
||||
fmt.Printf("}\n")
|
||||
return
|
||||
}
|
||||
|
||||
// uniq - lists have dups
|
||||
// compute max len too
|
||||
maxLen := 0
|
||||
w := 0
|
||||
for _, s := range all {
|
||||
if w == 0 || all[w-1] != s {
|
||||
if maxLen < len(s) {
|
||||
maxLen = len(s)
|
||||
}
|
||||
all[w] = s
|
||||
w++
|
||||
}
|
||||
}
|
||||
all = all[:w]
|
||||
|
||||
// Find hash that minimizes table size.
|
||||
var best *table
|
||||
for i := 0; i < 1000000; i++ {
|
||||
if best != nil && 1<<(best.k-1) < len(all) {
|
||||
break
|
||||
}
|
||||
h := rand.Uint32()
|
||||
for k := uint(0); k <= 16; k++ {
|
||||
if best != nil && k >= best.k {
|
||||
break
|
||||
}
|
||||
var t table
|
||||
if t.init(h, k, all) {
|
||||
best = &t
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if best == nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to construct string table\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Lay out strings, using overlaps when possible.
|
||||
layout := append([]string{}, all...)
|
||||
|
||||
// Remove strings that are substrings of other strings
|
||||
for changed := true; changed; {
|
||||
changed = false
|
||||
for i, s := range layout {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
for j, t := range layout {
|
||||
if i != j && t != "" && strings.Contains(s, t) {
|
||||
changed = true
|
||||
layout[j] = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Join strings where one suffix matches another prefix.
|
||||
for {
|
||||
// Find best i, j, k such that layout[i][len-k:] == layout[j][:k],
|
||||
// maximizing overlap length k.
|
||||
besti := -1
|
||||
bestj := -1
|
||||
bestk := 0
|
||||
for i, s := range layout {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
for j, t := range layout {
|
||||
if i == j {
|
||||
continue
|
||||
}
|
||||
for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
|
||||
if s[len(s)-k:] == t[:k] {
|
||||
besti = i
|
||||
bestj = j
|
||||
bestk = k
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestk > 0 {
|
||||
layout[besti] += layout[bestj][bestk:]
|
||||
layout[bestj] = ""
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
text := strings.Join(layout, "")
|
||||
|
||||
atom := map[string]uint32{}
|
||||
for _, s := range all {
|
||||
off := strings.Index(text, s)
|
||||
if off < 0 {
|
||||
panic("lost string " + s)
|
||||
}
|
||||
atom[s] = uint32(off<<8 | len(s))
|
||||
}
|
||||
|
||||
// Generate the Go code.
|
||||
fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n")
|
||||
fmt.Printf("package atom\n\nconst (\n")
|
||||
for _, s := range all {
|
||||
fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s])
|
||||
}
|
||||
fmt.Printf(")\n\n")
|
||||
|
||||
fmt.Printf("const hash0 = %#x\n\n", best.h0)
|
||||
fmt.Printf("const maxAtomLen = %d\n\n", maxLen)
|
||||
|
||||
fmt.Printf("var table = [1<<%d]Atom{\n", best.k)
|
||||
for i, s := range best.tab {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s)
|
||||
}
|
||||
fmt.Printf("}\n")
|
||||
datasize := (1 << best.k) * 4
|
||||
|
||||
fmt.Printf("const atomText =\n")
|
||||
textsize := len(text)
|
||||
for len(text) > 60 {
|
||||
fmt.Printf("\t%q +\n", text[:60])
|
||||
text = text[60:]
|
||||
}
|
||||
fmt.Printf("\t%q\n\n", text)
|
||||
|
||||
fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize)
|
||||
}
|
||||
|
||||
type byLen []string
|
||||
|
||||
func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) }
|
||||
func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byLen) Len() int { return len(x) }
|
||||
|
||||
// fnv computes the FNV hash with an arbitrary starting value h.
|
||||
func fnv(h uint32, s string) uint32 {
|
||||
for i := 0; i < len(s); i++ {
|
||||
h ^= uint32(s[i])
|
||||
h *= 16777619
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// A table represents an attempt at constructing the lookup table.
|
||||
// The lookup table uses cuckoo hashing, meaning that each string
|
||||
// can be found in one of two positions.
|
||||
type table struct {
|
||||
h0 uint32
|
||||
k uint
|
||||
mask uint32
|
||||
tab []string
|
||||
}
|
||||
|
||||
// hash returns the two hashes for s.
|
||||
func (t *table) hash(s string) (h1, h2 uint32) {
|
||||
h := fnv(t.h0, s)
|
||||
h1 = h & t.mask
|
||||
h2 = (h >> 16) & t.mask
|
||||
return
|
||||
}
|
||||
|
||||
// init initializes the table with the given parameters.
|
||||
// h0 is the initial hash value,
|
||||
// k is the number of bits of hash value to use, and
|
||||
// x is the list of strings to store in the table.
|
||||
// init returns false if the table cannot be constructed.
|
||||
func (t *table) init(h0 uint32, k uint, x []string) bool {
|
||||
t.h0 = h0
|
||||
t.k = k
|
||||
t.tab = make([]string, 1<<k)
|
||||
t.mask = 1<<k - 1
|
||||
for _, s := range x {
|
||||
if !t.insert(s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts s in the table.
|
||||
func (t *table) insert(s string) bool {
|
||||
h1, h2 := t.hash(s)
|
||||
if t.tab[h1] == "" {
|
||||
t.tab[h1] = s
|
||||
return true
|
||||
}
|
||||
if t.tab[h2] == "" {
|
||||
t.tab[h2] = s
|
||||
return true
|
||||
}
|
||||
if t.push(h1, 0) {
|
||||
t.tab[h1] = s
|
||||
return true
|
||||
}
|
||||
if t.push(h2, 0) {
|
||||
t.tab[h2] = s
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// push attempts to push aside the entry in slot i.
|
||||
func (t *table) push(i uint32, depth int) bool {
|
||||
if depth > len(t.tab) {
|
||||
return false
|
||||
}
|
||||
s := t.tab[i]
|
||||
h1, h2 := t.hash(s)
|
||||
j := h1 + h2 - i
|
||||
if t.tab[j] != "" && !t.push(j, depth+1) {
|
||||
return false
|
||||
}
|
||||
t.tab[j] = s
|
||||
return true
|
||||
}
|
||||
|
||||
// The lists of element names and attribute keys were taken from
|
||||
// https://html.spec.whatwg.org/multipage/indices.html#index
|
||||
// as of the "HTML Living Standard - Last Updated 21 February 2015" version.
|
||||
|
||||
var elements = []string{
|
||||
"a",
|
||||
"abbr",
|
||||
"address",
|
||||
"area",
|
||||
"article",
|
||||
"aside",
|
||||
"audio",
|
||||
"b",
|
||||
"base",
|
||||
"bdi",
|
||||
"bdo",
|
||||
"blockquote",
|
||||
"body",
|
||||
"br",
|
||||
"button",
|
||||
"canvas",
|
||||
"caption",
|
||||
"cite",
|
||||
"code",
|
||||
"col",
|
||||
"colgroup",
|
||||
"command",
|
||||
"data",
|
||||
"datalist",
|
||||
"dd",
|
||||
"del",
|
||||
"details",
|
||||
"dfn",
|
||||
"dialog",
|
||||
"div",
|
||||
"dl",
|
||||
"dt",
|
||||
"em",
|
||||
"embed",
|
||||
"fieldset",
|
||||
"figcaption",
|
||||
"figure",
|
||||
"footer",
|
||||
"form",
|
||||
"h1",
|
||||
"h2",
|
||||
"h3",
|
||||
"h4",
|
||||
"h5",
|
||||
"h6",
|
||||
"head",
|
||||
"header",
|
||||
"hgroup",
|
||||
"hr",
|
||||
"html",
|
||||
"i",
|
||||
"iframe",
|
||||
"img",
|
||||
"input",
|
||||
"ins",
|
||||
"kbd",
|
||||
"keygen",
|
||||
"label",
|
||||
"legend",
|
||||
"li",
|
||||
"link",
|
||||
"map",
|
||||
"mark",
|
||||
"menu",
|
||||
"menuitem",
|
||||
"meta",
|
||||
"meter",
|
||||
"nav",
|
||||
"noscript",
|
||||
"object",
|
||||
"ol",
|
||||
"optgroup",
|
||||
"option",
|
||||
"output",
|
||||
"p",
|
||||
"param",
|
||||
"pre",
|
||||
"progress",
|
||||
"q",
|
||||
"rp",
|
||||
"rt",
|
||||
"ruby",
|
||||
"s",
|
||||
"samp",
|
||||
"script",
|
||||
"section",
|
||||
"select",
|
||||
"small",
|
||||
"source",
|
||||
"span",
|
||||
"strong",
|
||||
"style",
|
||||
"sub",
|
||||
"summary",
|
||||
"sup",
|
||||
"table",
|
||||
"tbody",
|
||||
"td",
|
||||
"template",
|
||||
"textarea",
|
||||
"tfoot",
|
||||
"th",
|
||||
"thead",
|
||||
"time",
|
||||
"title",
|
||||
"tr",
|
||||
"track",
|
||||
"u",
|
||||
"ul",
|
||||
"var",
|
||||
"video",
|
||||
"wbr",
|
||||
}
|
||||
|
||||
// https://html.spec.whatwg.org/multipage/indices.html#attributes-3
|
||||
|
||||
var attributes = []string{
|
||||
"abbr",
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accesskey",
|
||||
"action",
|
||||
"alt",
|
||||
"async",
|
||||
"autocomplete",
|
||||
"autofocus",
|
||||
"autoplay",
|
||||
"challenge",
|
||||
"charset",
|
||||
"checked",
|
||||
"cite",
|
||||
"class",
|
||||
"cols",
|
||||
"colspan",
|
||||
"command",
|
||||
"content",
|
||||
"contenteditable",
|
||||
"contextmenu",
|
||||
"controls",
|
||||
"coords",
|
||||
"crossorigin",
|
||||
"data",
|
||||
"datetime",
|
||||
"default",
|
||||
"defer",
|
||||
"dir",
|
||||
"dirname",
|
||||
"disabled",
|
||||
"download",
|
||||
"draggable",
|
||||
"dropzone",
|
||||
"enctype",
|
||||
"for",
|
||||
"form",
|
||||
"formaction",
|
||||
"formenctype",
|
||||
"formmethod",
|
||||
"formnovalidate",
|
||||
"formtarget",
|
||||
"headers",
|
||||
"height",
|
||||
"hidden",
|
||||
"high",
|
||||
"href",
|
||||
"hreflang",
|
||||
"http-equiv",
|
||||
"icon",
|
||||
"id",
|
||||
"inputmode",
|
||||
"ismap",
|
||||
"itemid",
|
||||
"itemprop",
|
||||
"itemref",
|
||||
"itemscope",
|
||||
"itemtype",
|
||||
"keytype",
|
||||
"kind",
|
||||
"label",
|
||||
"lang",
|
||||
"list",
|
||||
"loop",
|
||||
"low",
|
||||
"manifest",
|
||||
"max",
|
||||
"maxlength",
|
||||
"media",
|
||||
"mediagroup",
|
||||
"method",
|
||||
"min",
|
||||
"minlength",
|
||||
"multiple",
|
||||
"muted",
|
||||
"name",
|
||||
"novalidate",
|
||||
"open",
|
||||
"optimum",
|
||||
"pattern",
|
||||
"ping",
|
||||
"placeholder",
|
||||
"poster",
|
||||
"preload",
|
||||
"radiogroup",
|
||||
"readonly",
|
||||
"rel",
|
||||
"required",
|
||||
"reversed",
|
||||
"rows",
|
||||
"rowspan",
|
||||
"sandbox",
|
||||
"spellcheck",
|
||||
"scope",
|
||||
"scoped",
|
||||
"seamless",
|
||||
"selected",
|
||||
"shape",
|
||||
"size",
|
||||
"sizes",
|
||||
"sortable",
|
||||
"sorted",
|
||||
"span",
|
||||
"src",
|
||||
"srcdoc",
|
||||
"srclang",
|
||||
"start",
|
||||
"step",
|
||||
"style",
|
||||
"tabindex",
|
||||
"target",
|
||||
"title",
|
||||
"translate",
|
||||
"type",
|
||||
"typemustmatch",
|
||||
"usemap",
|
||||
"value",
|
||||
"width",
|
||||
"wrap",
|
||||
}
|
||||
|
||||
var eventHandlers = []string{
|
||||
"onabort",
|
||||
"onautocomplete",
|
||||
"onautocompleteerror",
|
||||
"onafterprint",
|
||||
"onbeforeprint",
|
||||
"onbeforeunload",
|
||||
"onblur",
|
||||
"oncancel",
|
||||
"oncanplay",
|
||||
"oncanplaythrough",
|
||||
"onchange",
|
||||
"onclick",
|
||||
"onclose",
|
||||
"oncontextmenu",
|
||||
"oncuechange",
|
||||
"ondblclick",
|
||||
"ondrag",
|
||||
"ondragend",
|
||||
"ondragenter",
|
||||
"ondragleave",
|
||||
"ondragover",
|
||||
"ondragstart",
|
||||
"ondrop",
|
||||
"ondurationchange",
|
||||
"onemptied",
|
||||
"onended",
|
||||
"onerror",
|
||||
"onfocus",
|
||||
"onhashchange",
|
||||
"oninput",
|
||||
"oninvalid",
|
||||
"onkeydown",
|
||||
"onkeypress",
|
||||
"onkeyup",
|
||||
"onlanguagechange",
|
||||
"onload",
|
||||
"onloadeddata",
|
||||
"onloadedmetadata",
|
||||
"onloadstart",
|
||||
"onmessage",
|
||||
"onmousedown",
|
||||
"onmousemove",
|
||||
"onmouseout",
|
||||
"onmouseover",
|
||||
"onmouseup",
|
||||
"onmousewheel",
|
||||
"onoffline",
|
||||
"ononline",
|
||||
"onpagehide",
|
||||
"onpageshow",
|
||||
"onpause",
|
||||
"onplay",
|
||||
"onplaying",
|
||||
"onpopstate",
|
||||
"onprogress",
|
||||
"onratechange",
|
||||
"onreset",
|
||||
"onresize",
|
||||
"onscroll",
|
||||
"onseeked",
|
||||
"onseeking",
|
||||
"onselect",
|
||||
"onshow",
|
||||
"onsort",
|
||||
"onstalled",
|
||||
"onstorage",
|
||||
"onsubmit",
|
||||
"onsuspend",
|
||||
"ontimeupdate",
|
||||
"ontoggle",
|
||||
"onunload",
|
||||
"onvolumechange",
|
||||
"onwaiting",
|
||||
}
|
||||
|
||||
// extra are ad-hoc values not covered by any of the lists above.
|
||||
var extra = []string{
|
||||
"align",
|
||||
"annotation",
|
||||
"annotation-xml",
|
||||
"applet",
|
||||
"basefont",
|
||||
"bgsound",
|
||||
"big",
|
||||
"blink",
|
||||
"center",
|
||||
"color",
|
||||
"desc",
|
||||
"face",
|
||||
"font",
|
||||
"foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive.
|
||||
"foreignobject",
|
||||
"frame",
|
||||
"frameset",
|
||||
"image",
|
||||
"isindex",
|
||||
"listing",
|
||||
"malignmark",
|
||||
"marquee",
|
||||
"math",
|
||||
"mglyph",
|
||||
"mi",
|
||||
"mn",
|
||||
"mo",
|
||||
"ms",
|
||||
"mtext",
|
||||
"nobr",
|
||||
"noembed",
|
||||
"noframes",
|
||||
"plaintext",
|
||||
"prompt",
|
||||
"public",
|
||||
"spacer",
|
||||
"strike",
|
||||
"svg",
|
||||
"system",
|
||||
"tt",
|
||||
"xmp",
|
||||
}
|
713
Godeps/_workspace/src/golang.org/x/net/html/atom/table.go
generated
vendored
713
Godeps/_workspace/src/golang.org/x/net/html/atom/table.go
generated
vendored
@ -1,713 +0,0 @@
|
||||
// generated by go run gen.go; DO NOT EDIT
|
||||
|
||||
package atom
|
||||
|
||||
const (
|
||||
A Atom = 0x1
|
||||
Abbr Atom = 0x4
|
||||
Accept Atom = 0x2106
|
||||
AcceptCharset Atom = 0x210e
|
||||
Accesskey Atom = 0x3309
|
||||
Action Atom = 0x1f606
|
||||
Address Atom = 0x4f307
|
||||
Align Atom = 0x1105
|
||||
Alt Atom = 0x4503
|
||||
Annotation Atom = 0x1670a
|
||||
AnnotationXml Atom = 0x1670e
|
||||
Applet Atom = 0x2b306
|
||||
Area Atom = 0x2fa04
|
||||
Article Atom = 0x38807
|
||||
Aside Atom = 0x8305
|
||||
Async Atom = 0x7b05
|
||||
Audio Atom = 0xa605
|
||||
Autocomplete Atom = 0x1fc0c
|
||||
Autofocus Atom = 0xb309
|
||||
Autoplay Atom = 0xce08
|
||||
B Atom = 0x101
|
||||
Base Atom = 0xd604
|
||||
Basefont Atom = 0xd608
|
||||
Bdi Atom = 0x1a03
|
||||
Bdo Atom = 0xe703
|
||||
Bgsound Atom = 0x11807
|
||||
Big Atom = 0x12403
|
||||
Blink Atom = 0x12705
|
||||
Blockquote Atom = 0x12c0a
|
||||
Body Atom = 0x2f04
|
||||
Br Atom = 0x202
|
||||
Button Atom = 0x13606
|
||||
Canvas Atom = 0x7f06
|
||||
Caption Atom = 0x1bb07
|
||||
Center Atom = 0x5b506
|
||||
Challenge Atom = 0x21f09
|
||||
Charset Atom = 0x2807
|
||||
Checked Atom = 0x32807
|
||||
Cite Atom = 0x3c804
|
||||
Class Atom = 0x4de05
|
||||
Code Atom = 0x14904
|
||||
Col Atom = 0x15003
|
||||
Colgroup Atom = 0x15008
|
||||
Color Atom = 0x15d05
|
||||
Cols Atom = 0x16204
|
||||
Colspan Atom = 0x16207
|
||||
Command Atom = 0x17507
|
||||
Content Atom = 0x42307
|
||||
Contenteditable Atom = 0x4230f
|
||||
Contextmenu Atom = 0x3310b
|
||||
Controls Atom = 0x18808
|
||||
Coords Atom = 0x19406
|
||||
Crossorigin Atom = 0x19f0b
|
||||
Data Atom = 0x44a04
|
||||
Datalist Atom = 0x44a08
|
||||
Datetime Atom = 0x23c08
|
||||
Dd Atom = 0x26702
|
||||
Default Atom = 0x8607
|
||||
Defer Atom = 0x14b05
|
||||
Del Atom = 0x3ef03
|
||||
Desc Atom = 0x4db04
|
||||
Details Atom = 0x4807
|
||||
Dfn Atom = 0x6103
|
||||
Dialog Atom = 0x1b06
|
||||
Dir Atom = 0x6903
|
||||
Dirname Atom = 0x6907
|
||||
Disabled Atom = 0x10c08
|
||||
Div Atom = 0x11303
|
||||
Dl Atom = 0x11e02
|
||||
Download Atom = 0x40008
|
||||
Draggable Atom = 0x17b09
|
||||
Dropzone Atom = 0x39108
|
||||
Dt Atom = 0x50902
|
||||
Em Atom = 0x6502
|
||||
Embed Atom = 0x6505
|
||||
Enctype Atom = 0x21107
|
||||
Face Atom = 0x5b304
|
||||
Fieldset Atom = 0x1b008
|
||||
Figcaption Atom = 0x1b80a
|
||||
Figure Atom = 0x1cc06
|
||||
Font Atom = 0xda04
|
||||
Footer Atom = 0x8d06
|
||||
For Atom = 0x1d803
|
||||
ForeignObject Atom = 0x1d80d
|
||||
Foreignobject Atom = 0x1e50d
|
||||
Form Atom = 0x1f204
|
||||
Formaction Atom = 0x1f20a
|
||||
Formenctype Atom = 0x20d0b
|
||||
Formmethod Atom = 0x2280a
|
||||
Formnovalidate Atom = 0x2320e
|
||||
Formtarget Atom = 0x2470a
|
||||
Frame Atom = 0x9a05
|
||||
Frameset Atom = 0x9a08
|
||||
H1 Atom = 0x26e02
|
||||
H2 Atom = 0x29402
|
||||
H3 Atom = 0x2a702
|
||||
H4 Atom = 0x2e902
|
||||
H5 Atom = 0x2f302
|
||||
H6 Atom = 0x50b02
|
||||
Head Atom = 0x2d504
|
||||
Header Atom = 0x2d506
|
||||
Headers Atom = 0x2d507
|
||||
Height Atom = 0x25106
|
||||
Hgroup Atom = 0x25906
|
||||
Hidden Atom = 0x26506
|
||||
High Atom = 0x26b04
|
||||
Hr Atom = 0x27002
|
||||
Href Atom = 0x27004
|
||||
Hreflang Atom = 0x27008
|
||||
Html Atom = 0x25504
|
||||
HttpEquiv Atom = 0x2780a
|
||||
I Atom = 0x601
|
||||
Icon Atom = 0x42204
|
||||
Id Atom = 0x8502
|
||||
Iframe Atom = 0x29606
|
||||
Image Atom = 0x29c05
|
||||
Img Atom = 0x2a103
|
||||
Input Atom = 0x3e805
|
||||
Inputmode Atom = 0x3e809
|
||||
Ins Atom = 0x1a803
|
||||
Isindex Atom = 0x2a907
|
||||
Ismap Atom = 0x2b005
|
||||
Itemid Atom = 0x33c06
|
||||
Itemprop Atom = 0x3c908
|
||||
Itemref Atom = 0x5ad07
|
||||
Itemscope Atom = 0x2b909
|
||||
Itemtype Atom = 0x2c308
|
||||
Kbd Atom = 0x1903
|
||||
Keygen Atom = 0x3906
|
||||
Keytype Atom = 0x53707
|
||||
Kind Atom = 0x10904
|
||||
Label Atom = 0xf005
|
||||
Lang Atom = 0x27404
|
||||
Legend Atom = 0x18206
|
||||
Li Atom = 0x1202
|
||||
Link Atom = 0x12804
|
||||
List Atom = 0x44e04
|
||||
Listing Atom = 0x44e07
|
||||
Loop Atom = 0xf404
|
||||
Low Atom = 0x11f03
|
||||
Malignmark Atom = 0x100a
|
||||
Manifest Atom = 0x5f108
|
||||
Map Atom = 0x2b203
|
||||
Mark Atom = 0x1604
|
||||
Marquee Atom = 0x2cb07
|
||||
Math Atom = 0x2d204
|
||||
Max Atom = 0x2e103
|
||||
Maxlength Atom = 0x2e109
|
||||
Media Atom = 0x6e05
|
||||
Mediagroup Atom = 0x6e0a
|
||||
Menu Atom = 0x33804
|
||||
Menuitem Atom = 0x33808
|
||||
Meta Atom = 0x45d04
|
||||
Meter Atom = 0x24205
|
||||
Method Atom = 0x22c06
|
||||
Mglyph Atom = 0x2a206
|
||||
Mi Atom = 0x2eb02
|
||||
Min Atom = 0x2eb03
|
||||
Minlength Atom = 0x2eb09
|
||||
Mn Atom = 0x23502
|
||||
Mo Atom = 0x3ed02
|
||||
Ms Atom = 0x2bc02
|
||||
Mtext Atom = 0x2f505
|
||||
Multiple Atom = 0x30308
|
||||
Muted Atom = 0x30b05
|
||||
Name Atom = 0x6c04
|
||||
Nav Atom = 0x3e03
|
||||
Nobr Atom = 0x5704
|
||||
Noembed Atom = 0x6307
|
||||
Noframes Atom = 0x9808
|
||||
Noscript Atom = 0x3d208
|
||||
Novalidate Atom = 0x2360a
|
||||
Object Atom = 0x1ec06
|
||||
Ol Atom = 0xc902
|
||||
Onabort Atom = 0x13a07
|
||||
Onafterprint Atom = 0x1c00c
|
||||
Onautocomplete Atom = 0x1fa0e
|
||||
Onautocompleteerror Atom = 0x1fa13
|
||||
Onbeforeprint Atom = 0x6040d
|
||||
Onbeforeunload Atom = 0x4e70e
|
||||
Onblur Atom = 0xaa06
|
||||
Oncancel Atom = 0xe908
|
||||
Oncanplay Atom = 0x28509
|
||||
Oncanplaythrough Atom = 0x28510
|
||||
Onchange Atom = 0x3a708
|
||||
Onclick Atom = 0x31007
|
||||
Onclose Atom = 0x31707
|
||||
Oncontextmenu Atom = 0x32f0d
|
||||
Oncuechange Atom = 0x3420b
|
||||
Ondblclick Atom = 0x34d0a
|
||||
Ondrag Atom = 0x35706
|
||||
Ondragend Atom = 0x35709
|
||||
Ondragenter Atom = 0x3600b
|
||||
Ondragleave Atom = 0x36b0b
|
||||
Ondragover Atom = 0x3760a
|
||||
Ondragstart Atom = 0x3800b
|
||||
Ondrop Atom = 0x38f06
|
||||
Ondurationchange Atom = 0x39f10
|
||||
Onemptied Atom = 0x39609
|
||||
Onended Atom = 0x3af07
|
||||
Onerror Atom = 0x3b607
|
||||
Onfocus Atom = 0x3bd07
|
||||
Onhashchange Atom = 0x3da0c
|
||||
Oninput Atom = 0x3e607
|
||||
Oninvalid Atom = 0x3f209
|
||||
Onkeydown Atom = 0x3fb09
|
||||
Onkeypress Atom = 0x4080a
|
||||
Onkeyup Atom = 0x41807
|
||||
Onlanguagechange Atom = 0x43210
|
||||
Onload Atom = 0x44206
|
||||
Onloadeddata Atom = 0x4420c
|
||||
Onloadedmetadata Atom = 0x45510
|
||||
Onloadstart Atom = 0x46b0b
|
||||
Onmessage Atom = 0x47609
|
||||
Onmousedown Atom = 0x47f0b
|
||||
Onmousemove Atom = 0x48a0b
|
||||
Onmouseout Atom = 0x4950a
|
||||
Onmouseover Atom = 0x4a20b
|
||||
Onmouseup Atom = 0x4ad09
|
||||
Onmousewheel Atom = 0x4b60c
|
||||
Onoffline Atom = 0x4c209
|
||||
Ononline Atom = 0x4cb08
|
||||
Onpagehide Atom = 0x4d30a
|
||||
Onpageshow Atom = 0x4fe0a
|
||||
Onpause Atom = 0x50d07
|
||||
Onplay Atom = 0x51706
|
||||
Onplaying Atom = 0x51709
|
||||
Onpopstate Atom = 0x5200a
|
||||
Onprogress Atom = 0x52a0a
|
||||
Onratechange Atom = 0x53e0c
|
||||
Onreset Atom = 0x54a07
|
||||
Onresize Atom = 0x55108
|
||||
Onscroll Atom = 0x55f08
|
||||
Onseeked Atom = 0x56708
|
||||
Onseeking Atom = 0x56f09
|
||||
Onselect Atom = 0x57808
|
||||
Onshow Atom = 0x58206
|
||||
Onsort Atom = 0x58b06
|
||||
Onstalled Atom = 0x59509
|
||||
Onstorage Atom = 0x59e09
|
||||
Onsubmit Atom = 0x5a708
|
||||
Onsuspend Atom = 0x5bb09
|
||||
Ontimeupdate Atom = 0xdb0c
|
||||
Ontoggle Atom = 0x5c408
|
||||
Onunload Atom = 0x5cc08
|
||||
Onvolumechange Atom = 0x5d40e
|
||||
Onwaiting Atom = 0x5e209
|
||||
Open Atom = 0x3cf04
|
||||
Optgroup Atom = 0xf608
|
||||
Optimum Atom = 0x5eb07
|
||||
Option Atom = 0x60006
|
||||
Output Atom = 0x49c06
|
||||
P Atom = 0xc01
|
||||
Param Atom = 0xc05
|
||||
Pattern Atom = 0x5107
|
||||
Ping Atom = 0x7704
|
||||
Placeholder Atom = 0xc30b
|
||||
Plaintext Atom = 0xfd09
|
||||
Poster Atom = 0x15706
|
||||
Pre Atom = 0x25e03
|
||||
Preload Atom = 0x25e07
|
||||
Progress Atom = 0x52c08
|
||||
Prompt Atom = 0x5fa06
|
||||
Public Atom = 0x41e06
|
||||
Q Atom = 0x13101
|
||||
Radiogroup Atom = 0x30a
|
||||
Readonly Atom = 0x2fb08
|
||||
Rel Atom = 0x25f03
|
||||
Required Atom = 0x1d008
|
||||
Reversed Atom = 0x5a08
|
||||
Rows Atom = 0x9204
|
||||
Rowspan Atom = 0x9207
|
||||
Rp Atom = 0x1c602
|
||||
Rt Atom = 0x13f02
|
||||
Ruby Atom = 0xaf04
|
||||
S Atom = 0x2c01
|
||||
Samp Atom = 0x4e04
|
||||
Sandbox Atom = 0xbb07
|
||||
Scope Atom = 0x2bd05
|
||||
Scoped Atom = 0x2bd06
|
||||
Script Atom = 0x3d406
|
||||
Seamless Atom = 0x31c08
|
||||
Section Atom = 0x4e207
|
||||
Select Atom = 0x57a06
|
||||
Selected Atom = 0x57a08
|
||||
Shape Atom = 0x4f905
|
||||
Size Atom = 0x55504
|
||||
Sizes Atom = 0x55505
|
||||
Small Atom = 0x18f05
|
||||
Sortable Atom = 0x58d08
|
||||
Sorted Atom = 0x19906
|
||||
Source Atom = 0x1aa06
|
||||
Spacer Atom = 0x2db06
|
||||
Span Atom = 0x9504
|
||||
Spellcheck Atom = 0x3230a
|
||||
Src Atom = 0x3c303
|
||||
Srcdoc Atom = 0x3c306
|
||||
Srclang Atom = 0x41107
|
||||
Start Atom = 0x38605
|
||||
Step Atom = 0x5f704
|
||||
Strike Atom = 0x53306
|
||||
Strong Atom = 0x55906
|
||||
Style Atom = 0x61105
|
||||
Sub Atom = 0x5a903
|
||||
Summary Atom = 0x61607
|
||||
Sup Atom = 0x61d03
|
||||
Svg Atom = 0x62003
|
||||
System Atom = 0x62306
|
||||
Tabindex Atom = 0x46308
|
||||
Table Atom = 0x42d05
|
||||
Target Atom = 0x24b06
|
||||
Tbody Atom = 0x2e05
|
||||
Td Atom = 0x4702
|
||||
Template Atom = 0x62608
|
||||
Textarea Atom = 0x2f608
|
||||
Tfoot Atom = 0x8c05
|
||||
Th Atom = 0x22e02
|
||||
Thead Atom = 0x2d405
|
||||
Time Atom = 0xdd04
|
||||
Title Atom = 0xa105
|
||||
Tr Atom = 0x10502
|
||||
Track Atom = 0x10505
|
||||
Translate Atom = 0x14009
|
||||
Tt Atom = 0x5302
|
||||
Type Atom = 0x21404
|
||||
Typemustmatch Atom = 0x2140d
|
||||
U Atom = 0xb01
|
||||
Ul Atom = 0x8a02
|
||||
Usemap Atom = 0x51106
|
||||
Value Atom = 0x4005
|
||||
Var Atom = 0x11503
|
||||
Video Atom = 0x28105
|
||||
Wbr Atom = 0x12103
|
||||
Width Atom = 0x50705
|
||||
Wrap Atom = 0x58704
|
||||
Xmp Atom = 0xc103
|
||||
)
|
||||
|
||||
const hash0 = 0xc17da63e
|
||||
|
||||
const maxAtomLen = 19
|
||||
|
||||
var table = [1 << 9]Atom{
|
||||
0x1: 0x48a0b, // onmousemove
|
||||
0x2: 0x5e209, // onwaiting
|
||||
0x3: 0x1fa13, // onautocompleteerror
|
||||
0x4: 0x5fa06, // prompt
|
||||
0x7: 0x5eb07, // optimum
|
||||
0x8: 0x1604, // mark
|
||||
0xa: 0x5ad07, // itemref
|
||||
0xb: 0x4fe0a, // onpageshow
|
||||
0xc: 0x57a06, // select
|
||||
0xd: 0x17b09, // draggable
|
||||
0xe: 0x3e03, // nav
|
||||
0xf: 0x17507, // command
|
||||
0x11: 0xb01, // u
|
||||
0x14: 0x2d507, // headers
|
||||
0x15: 0x44a08, // datalist
|
||||
0x17: 0x4e04, // samp
|
||||
0x1a: 0x3fb09, // onkeydown
|
||||
0x1b: 0x55f08, // onscroll
|
||||
0x1c: 0x15003, // col
|
||||
0x20: 0x3c908, // itemprop
|
||||
0x21: 0x2780a, // http-equiv
|
||||
0x22: 0x61d03, // sup
|
||||
0x24: 0x1d008, // required
|
||||
0x2b: 0x25e07, // preload
|
||||
0x2c: 0x6040d, // onbeforeprint
|
||||
0x2d: 0x3600b, // ondragenter
|
||||
0x2e: 0x50902, // dt
|
||||
0x2f: 0x5a708, // onsubmit
|
||||
0x30: 0x27002, // hr
|
||||
0x31: 0x32f0d, // oncontextmenu
|
||||
0x33: 0x29c05, // image
|
||||
0x34: 0x50d07, // onpause
|
||||
0x35: 0x25906, // hgroup
|
||||
0x36: 0x7704, // ping
|
||||
0x37: 0x57808, // onselect
|
||||
0x3a: 0x11303, // div
|
||||
0x3b: 0x1fa0e, // onautocomplete
|
||||
0x40: 0x2eb02, // mi
|
||||
0x41: 0x31c08, // seamless
|
||||
0x42: 0x2807, // charset
|
||||
0x43: 0x8502, // id
|
||||
0x44: 0x5200a, // onpopstate
|
||||
0x45: 0x3ef03, // del
|
||||
0x46: 0x2cb07, // marquee
|
||||
0x47: 0x3309, // accesskey
|
||||
0x49: 0x8d06, // footer
|
||||
0x4a: 0x44e04, // list
|
||||
0x4b: 0x2b005, // ismap
|
||||
0x51: 0x33804, // menu
|
||||
0x52: 0x2f04, // body
|
||||
0x55: 0x9a08, // frameset
|
||||
0x56: 0x54a07, // onreset
|
||||
0x57: 0x12705, // blink
|
||||
0x58: 0xa105, // title
|
||||
0x59: 0x38807, // article
|
||||
0x5b: 0x22e02, // th
|
||||
0x5d: 0x13101, // q
|
||||
0x5e: 0x3cf04, // open
|
||||
0x5f: 0x2fa04, // area
|
||||
0x61: 0x44206, // onload
|
||||
0x62: 0xda04, // font
|
||||
0x63: 0xd604, // base
|
||||
0x64: 0x16207, // colspan
|
||||
0x65: 0x53707, // keytype
|
||||
0x66: 0x11e02, // dl
|
||||
0x68: 0x1b008, // fieldset
|
||||
0x6a: 0x2eb03, // min
|
||||
0x6b: 0x11503, // var
|
||||
0x6f: 0x2d506, // header
|
||||
0x70: 0x13f02, // rt
|
||||
0x71: 0x15008, // colgroup
|
||||
0x72: 0x23502, // mn
|
||||
0x74: 0x13a07, // onabort
|
||||
0x75: 0x3906, // keygen
|
||||
0x76: 0x4c209, // onoffline
|
||||
0x77: 0x21f09, // challenge
|
||||
0x78: 0x2b203, // map
|
||||
0x7a: 0x2e902, // h4
|
||||
0x7b: 0x3b607, // onerror
|
||||
0x7c: 0x2e109, // maxlength
|
||||
0x7d: 0x2f505, // mtext
|
||||
0x7e: 0xbb07, // sandbox
|
||||
0x7f: 0x58b06, // onsort
|
||||
0x80: 0x100a, // malignmark
|
||||
0x81: 0x45d04, // meta
|
||||
0x82: 0x7b05, // async
|
||||
0x83: 0x2a702, // h3
|
||||
0x84: 0x26702, // dd
|
||||
0x85: 0x27004, // href
|
||||
0x86: 0x6e0a, // mediagroup
|
||||
0x87: 0x19406, // coords
|
||||
0x88: 0x41107, // srclang
|
||||
0x89: 0x34d0a, // ondblclick
|
||||
0x8a: 0x4005, // value
|
||||
0x8c: 0xe908, // oncancel
|
||||
0x8e: 0x3230a, // spellcheck
|
||||
0x8f: 0x9a05, // frame
|
||||
0x91: 0x12403, // big
|
||||
0x94: 0x1f606, // action
|
||||
0x95: 0x6903, // dir
|
||||
0x97: 0x2fb08, // readonly
|
||||
0x99: 0x42d05, // table
|
||||
0x9a: 0x61607, // summary
|
||||
0x9b: 0x12103, // wbr
|
||||
0x9c: 0x30a, // radiogroup
|
||||
0x9d: 0x6c04, // name
|
||||
0x9f: 0x62306, // system
|
||||
0xa1: 0x15d05, // color
|
||||
0xa2: 0x7f06, // canvas
|
||||
0xa3: 0x25504, // html
|
||||
0xa5: 0x56f09, // onseeking
|
||||
0xac: 0x4f905, // shape
|
||||
0xad: 0x25f03, // rel
|
||||
0xae: 0x28510, // oncanplaythrough
|
||||
0xaf: 0x3760a, // ondragover
|
||||
0xb0: 0x62608, // template
|
||||
0xb1: 0x1d80d, // foreignObject
|
||||
0xb3: 0x9204, // rows
|
||||
0xb6: 0x44e07, // listing
|
||||
0xb7: 0x49c06, // output
|
||||
0xb9: 0x3310b, // contextmenu
|
||||
0xbb: 0x11f03, // low
|
||||
0xbc: 0x1c602, // rp
|
||||
0xbd: 0x5bb09, // onsuspend
|
||||
0xbe: 0x13606, // button
|
||||
0xbf: 0x4db04, // desc
|
||||
0xc1: 0x4e207, // section
|
||||
0xc2: 0x52a0a, // onprogress
|
||||
0xc3: 0x59e09, // onstorage
|
||||
0xc4: 0x2d204, // math
|
||||
0xc5: 0x4503, // alt
|
||||
0xc7: 0x8a02, // ul
|
||||
0xc8: 0x5107, // pattern
|
||||
0xc9: 0x4b60c, // onmousewheel
|
||||
0xca: 0x35709, // ondragend
|
||||
0xcb: 0xaf04, // ruby
|
||||
0xcc: 0xc01, // p
|
||||
0xcd: 0x31707, // onclose
|
||||
0xce: 0x24205, // meter
|
||||
0xcf: 0x11807, // bgsound
|
||||
0xd2: 0x25106, // height
|
||||
0xd4: 0x101, // b
|
||||
0xd5: 0x2c308, // itemtype
|
||||
0xd8: 0x1bb07, // caption
|
||||
0xd9: 0x10c08, // disabled
|
||||
0xdb: 0x33808, // menuitem
|
||||
0xdc: 0x62003, // svg
|
||||
0xdd: 0x18f05, // small
|
||||
0xde: 0x44a04, // data
|
||||
0xe0: 0x4cb08, // ononline
|
||||
0xe1: 0x2a206, // mglyph
|
||||
0xe3: 0x6505, // embed
|
||||
0xe4: 0x10502, // tr
|
||||
0xe5: 0x46b0b, // onloadstart
|
||||
0xe7: 0x3c306, // srcdoc
|
||||
0xeb: 0x5c408, // ontoggle
|
||||
0xed: 0xe703, // bdo
|
||||
0xee: 0x4702, // td
|
||||
0xef: 0x8305, // aside
|
||||
0xf0: 0x29402, // h2
|
||||
0xf1: 0x52c08, // progress
|
||||
0xf2: 0x12c0a, // blockquote
|
||||
0xf4: 0xf005, // label
|
||||
0xf5: 0x601, // i
|
||||
0xf7: 0x9207, // rowspan
|
||||
0xfb: 0x51709, // onplaying
|
||||
0xfd: 0x2a103, // img
|
||||
0xfe: 0xf608, // optgroup
|
||||
0xff: 0x42307, // content
|
||||
0x101: 0x53e0c, // onratechange
|
||||
0x103: 0x3da0c, // onhashchange
|
||||
0x104: 0x4807, // details
|
||||
0x106: 0x40008, // download
|
||||
0x109: 0x14009, // translate
|
||||
0x10b: 0x4230f, // contenteditable
|
||||
0x10d: 0x36b0b, // ondragleave
|
||||
0x10e: 0x2106, // accept
|
||||
0x10f: 0x57a08, // selected
|
||||
0x112: 0x1f20a, // formaction
|
||||
0x113: 0x5b506, // center
|
||||
0x115: 0x45510, // onloadedmetadata
|
||||
0x116: 0x12804, // link
|
||||
0x117: 0xdd04, // time
|
||||
0x118: 0x19f0b, // crossorigin
|
||||
0x119: 0x3bd07, // onfocus
|
||||
0x11a: 0x58704, // wrap
|
||||
0x11b: 0x42204, // icon
|
||||
0x11d: 0x28105, // video
|
||||
0x11e: 0x4de05, // class
|
||||
0x121: 0x5d40e, // onvolumechange
|
||||
0x122: 0xaa06, // onblur
|
||||
0x123: 0x2b909, // itemscope
|
||||
0x124: 0x61105, // style
|
||||
0x127: 0x41e06, // public
|
||||
0x129: 0x2320e, // formnovalidate
|
||||
0x12a: 0x58206, // onshow
|
||||
0x12c: 0x51706, // onplay
|
||||
0x12d: 0x3c804, // cite
|
||||
0x12e: 0x2bc02, // ms
|
||||
0x12f: 0xdb0c, // ontimeupdate
|
||||
0x130: 0x10904, // kind
|
||||
0x131: 0x2470a, // formtarget
|
||||
0x135: 0x3af07, // onended
|
||||
0x136: 0x26506, // hidden
|
||||
0x137: 0x2c01, // s
|
||||
0x139: 0x2280a, // formmethod
|
||||
0x13a: 0x3e805, // input
|
||||
0x13c: 0x50b02, // h6
|
||||
0x13d: 0xc902, // ol
|
||||
0x13e: 0x3420b, // oncuechange
|
||||
0x13f: 0x1e50d, // foreignobject
|
||||
0x143: 0x4e70e, // onbeforeunload
|
||||
0x144: 0x2bd05, // scope
|
||||
0x145: 0x39609, // onemptied
|
||||
0x146: 0x14b05, // defer
|
||||
0x147: 0xc103, // xmp
|
||||
0x148: 0x39f10, // ondurationchange
|
||||
0x149: 0x1903, // kbd
|
||||
0x14c: 0x47609, // onmessage
|
||||
0x14d: 0x60006, // option
|
||||
0x14e: 0x2eb09, // minlength
|
||||
0x14f: 0x32807, // checked
|
||||
0x150: 0xce08, // autoplay
|
||||
0x152: 0x202, // br
|
||||
0x153: 0x2360a, // novalidate
|
||||
0x156: 0x6307, // noembed
|
||||
0x159: 0x31007, // onclick
|
||||
0x15a: 0x47f0b, // onmousedown
|
||||
0x15b: 0x3a708, // onchange
|
||||
0x15e: 0x3f209, // oninvalid
|
||||
0x15f: 0x2bd06, // scoped
|
||||
0x160: 0x18808, // controls
|
||||
0x161: 0x30b05, // muted
|
||||
0x162: 0x58d08, // sortable
|
||||
0x163: 0x51106, // usemap
|
||||
0x164: 0x1b80a, // figcaption
|
||||
0x165: 0x35706, // ondrag
|
||||
0x166: 0x26b04, // high
|
||||
0x168: 0x3c303, // src
|
||||
0x169: 0x15706, // poster
|
||||
0x16b: 0x1670e, // annotation-xml
|
||||
0x16c: 0x5f704, // step
|
||||
0x16d: 0x4, // abbr
|
||||
0x16e: 0x1b06, // dialog
|
||||
0x170: 0x1202, // li
|
||||
0x172: 0x3ed02, // mo
|
||||
0x175: 0x1d803, // for
|
||||
0x176: 0x1a803, // ins
|
||||
0x178: 0x55504, // size
|
||||
0x179: 0x43210, // onlanguagechange
|
||||
0x17a: 0x8607, // default
|
||||
0x17b: 0x1a03, // bdi
|
||||
0x17c: 0x4d30a, // onpagehide
|
||||
0x17d: 0x6907, // dirname
|
||||
0x17e: 0x21404, // type
|
||||
0x17f: 0x1f204, // form
|
||||
0x181: 0x28509, // oncanplay
|
||||
0x182: 0x6103, // dfn
|
||||
0x183: 0x46308, // tabindex
|
||||
0x186: 0x6502, // em
|
||||
0x187: 0x27404, // lang
|
||||
0x189: 0x39108, // dropzone
|
||||
0x18a: 0x4080a, // onkeypress
|
||||
0x18b: 0x23c08, // datetime
|
||||
0x18c: 0x16204, // cols
|
||||
0x18d: 0x1, // a
|
||||
0x18e: 0x4420c, // onloadeddata
|
||||
0x190: 0xa605, // audio
|
||||
0x192: 0x2e05, // tbody
|
||||
0x193: 0x22c06, // method
|
||||
0x195: 0xf404, // loop
|
||||
0x196: 0x29606, // iframe
|
||||
0x198: 0x2d504, // head
|
||||
0x19e: 0x5f108, // manifest
|
||||
0x19f: 0xb309, // autofocus
|
||||
0x1a0: 0x14904, // code
|
||||
0x1a1: 0x55906, // strong
|
||||
0x1a2: 0x30308, // multiple
|
||||
0x1a3: 0xc05, // param
|
||||
0x1a6: 0x21107, // enctype
|
||||
0x1a7: 0x5b304, // face
|
||||
0x1a8: 0xfd09, // plaintext
|
||||
0x1a9: 0x26e02, // h1
|
||||
0x1aa: 0x59509, // onstalled
|
||||
0x1ad: 0x3d406, // script
|
||||
0x1ae: 0x2db06, // spacer
|
||||
0x1af: 0x55108, // onresize
|
||||
0x1b0: 0x4a20b, // onmouseover
|
||||
0x1b1: 0x5cc08, // onunload
|
||||
0x1b2: 0x56708, // onseeked
|
||||
0x1b4: 0x2140d, // typemustmatch
|
||||
0x1b5: 0x1cc06, // figure
|
||||
0x1b6: 0x4950a, // onmouseout
|
||||
0x1b7: 0x25e03, // pre
|
||||
0x1b8: 0x50705, // width
|
||||
0x1b9: 0x19906, // sorted
|
||||
0x1bb: 0x5704, // nobr
|
||||
0x1be: 0x5302, // tt
|
||||
0x1bf: 0x1105, // align
|
||||
0x1c0: 0x3e607, // oninput
|
||||
0x1c3: 0x41807, // onkeyup
|
||||
0x1c6: 0x1c00c, // onafterprint
|
||||
0x1c7: 0x210e, // accept-charset
|
||||
0x1c8: 0x33c06, // itemid
|
||||
0x1c9: 0x3e809, // inputmode
|
||||
0x1cb: 0x53306, // strike
|
||||
0x1cc: 0x5a903, // sub
|
||||
0x1cd: 0x10505, // track
|
||||
0x1ce: 0x38605, // start
|
||||
0x1d0: 0xd608, // basefont
|
||||
0x1d6: 0x1aa06, // source
|
||||
0x1d7: 0x18206, // legend
|
||||
0x1d8: 0x2d405, // thead
|
||||
0x1da: 0x8c05, // tfoot
|
||||
0x1dd: 0x1ec06, // object
|
||||
0x1de: 0x6e05, // media
|
||||
0x1df: 0x1670a, // annotation
|
||||
0x1e0: 0x20d0b, // formenctype
|
||||
0x1e2: 0x3d208, // noscript
|
||||
0x1e4: 0x55505, // sizes
|
||||
0x1e5: 0x1fc0c, // autocomplete
|
||||
0x1e6: 0x9504, // span
|
||||
0x1e7: 0x9808, // noframes
|
||||
0x1e8: 0x24b06, // target
|
||||
0x1e9: 0x38f06, // ondrop
|
||||
0x1ea: 0x2b306, // applet
|
||||
0x1ec: 0x5a08, // reversed
|
||||
0x1f0: 0x2a907, // isindex
|
||||
0x1f3: 0x27008, // hreflang
|
||||
0x1f5: 0x2f302, // h5
|
||||
0x1f6: 0x4f307, // address
|
||||
0x1fa: 0x2e103, // max
|
||||
0x1fb: 0xc30b, // placeholder
|
||||
0x1fc: 0x2f608, // textarea
|
||||
0x1fe: 0x4ad09, // onmouseup
|
||||
0x1ff: 0x3800b, // ondragstart
|
||||
}
|
||||
|
||||
const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" +
|
||||
"genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" +
|
||||
"ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" +
|
||||
"utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" +
|
||||
"labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" +
|
||||
"blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" +
|
||||
"nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" +
|
||||
"originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" +
|
||||
"bjectforeignobjectformactionautocompleteerrorformenctypemust" +
|
||||
"matchallengeformmethodformnovalidatetimeterformtargetheightm" +
|
||||
"lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" +
|
||||
"h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" +
|
||||
"eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" +
|
||||
"utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" +
|
||||
"hangeondblclickondragendondragenterondragleaveondragoverondr" +
|
||||
"agstarticleondropzonemptiedondurationchangeonendedonerroronf" +
|
||||
"ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" +
|
||||
"nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" +
|
||||
"uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" +
|
||||
"rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" +
|
||||
"ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" +
|
||||
"oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" +
|
||||
"teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" +
|
||||
"ollonseekedonseekingonselectedonshowraponsortableonstalledon" +
|
||||
"storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" +
|
||||
"changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" +
|
||||
"mmarysupsvgsystemplate"
|
351
Godeps/_workspace/src/golang.org/x/net/html/atom/table_test.go
generated
vendored
351
Godeps/_workspace/src/golang.org/x/net/html/atom/table_test.go
generated
vendored
@ -1,351 +0,0 @@
|
||||
// generated by go run gen.go -test; DO NOT EDIT
|
||||
|
||||
package atom
|
||||
|
||||
var testAtomList = []string{
|
||||
"a",
|
||||
"abbr",
|
||||
"abbr",
|
||||
"accept",
|
||||
"accept-charset",
|
||||
"accesskey",
|
||||
"action",
|
||||
"address",
|
||||
"align",
|
||||
"alt",
|
||||
"annotation",
|
||||
"annotation-xml",
|
||||
"applet",
|
||||
"area",
|
||||
"article",
|
||||
"aside",
|
||||
"async",
|
||||
"audio",
|
||||
"autocomplete",
|
||||
"autofocus",
|
||||
"autoplay",
|
||||
"b",
|
||||
"base",
|
||||
"basefont",
|
||||
"bdi",
|
||||
"bdo",
|
||||
"bgsound",
|
||||
"big",
|
||||
"blink",
|
||||
"blockquote",
|
||||
"body",
|
||||
"br",
|
||||
"button",
|
||||
"canvas",
|
||||
"caption",
|
||||
"center",
|
||||
"challenge",
|
||||
"charset",
|
||||
"checked",
|
||||
"cite",
|
||||
"cite",
|
||||
"class",
|
||||
"code",
|
||||
"col",
|
||||
"colgroup",
|
||||
"color",
|
||||
"cols",
|
||||
"colspan",
|
||||
"command",
|
||||
"command",
|
||||
"content",
|
||||
"contenteditable",
|
||||
"contextmenu",
|
||||
"controls",
|
||||
"coords",
|
||||
"crossorigin",
|
||||
"data",
|
||||
"data",
|
||||
"datalist",
|
||||
"datetime",
|
||||
"dd",
|
||||
"default",
|
||||
"defer",
|
||||
"del",
|
||||
"desc",
|
||||
"details",
|
||||
"dfn",
|
||||
"dialog",
|
||||
"dir",
|
||||
"dirname",
|
||||
"disabled",
|
||||
"div",
|
||||
"dl",
|
||||
"download",
|
||||
"draggable",
|
||||
"dropzone",
|
||||
"dt",
|
||||
"em",
|
||||
"embed",
|
||||
"enctype",
|
||||
"face",
|
||||
"fieldset",
|
||||
"figcaption",
|
||||
"figure",
|
||||
"font",
|
||||
"footer",
|
||||
"for",
|
||||
"foreignObject",
|
||||
"foreignobject",
|
||||
"form",
|
||||
"form",
|
||||
"formaction",
|
||||
"formenctype",
|
||||
"formmethod",
|
||||
"formnovalidate",
|
||||
"formtarget",
|
||||
"frame",
|
||||
"frameset",
|
||||
"h1",
|
||||
"h2",
|
||||
"h3",
|
||||
"h4",
|
||||
"h5",
|
||||
"h6",
|
||||
"head",
|
||||
"header",
|
||||
"headers",
|
||||
"height",
|
||||
"hgroup",
|
||||
"hidden",
|
||||
"high",
|
||||
"hr",
|
||||
"href",
|
||||
"hreflang",
|
||||
"html",
|
||||
"http-equiv",
|
||||
"i",
|
||||
"icon",
|
||||
"id",
|
||||
"iframe",
|
||||
"image",
|
||||
"img",
|
||||
"input",
|
||||
"inputmode",
|
||||
"ins",
|
||||
"isindex",
|
||||
"ismap",
|
||||
"itemid",
|
||||
"itemprop",
|
||||
"itemref",
|
||||
"itemscope",
|
||||
"itemtype",
|
||||
"kbd",
|
||||
"keygen",
|
||||
"keytype",
|
||||
"kind",
|
||||
"label",
|
||||
"label",
|
||||
"lang",
|
||||
"legend",
|
||||
"li",
|
||||
"link",
|
||||
"list",
|
||||
"listing",
|
||||
"loop",
|
||||
"low",
|
||||
"malignmark",
|
||||
"manifest",
|
||||
"map",
|
||||
"mark",
|
||||
"marquee",
|
||||
"math",
|
||||
"max",
|
||||
"maxlength",
|
||||
"media",
|
||||
"mediagroup",
|
||||
"menu",
|
||||
"menuitem",
|
||||
"meta",
|
||||
"meter",
|
||||
"method",
|
||||
"mglyph",
|
||||
"mi",
|
||||
"min",
|
||||
"minlength",
|
||||
"mn",
|
||||
"mo",
|
||||
"ms",
|
||||
"mtext",
|
||||
"multiple",
|
||||
"muted",
|
||||
"name",
|
||||
"nav",
|
||||
"nobr",
|
||||
"noembed",
|
||||
"noframes",
|
||||
"noscript",
|
||||
"novalidate",
|
||||
"object",
|
||||
"ol",
|
||||
"onabort",
|
||||
"onafterprint",
|
||||
"onautocomplete",
|
||||
"onautocompleteerror",
|
||||
"onbeforeprint",
|
||||
"onbeforeunload",
|
||||
"onblur",
|
||||
"oncancel",
|
||||
"oncanplay",
|
||||
"oncanplaythrough",
|
||||
"onchange",
|
||||
"onclick",
|
||||
"onclose",
|
||||
"oncontextmenu",
|
||||
"oncuechange",
|
||||
"ondblclick",
|
||||
"ondrag",
|
||||
"ondragend",
|
||||
"ondragenter",
|
||||
"ondragleave",
|
||||
"ondragover",
|
||||
"ondragstart",
|
||||
"ondrop",
|
||||
"ondurationchange",
|
||||
"onemptied",
|
||||
"onended",
|
||||
"onerror",
|
||||
"onfocus",
|
||||
"onhashchange",
|
||||
"oninput",
|
||||
"oninvalid",
|
||||
"onkeydown",
|
||||
"onkeypress",
|
||||
"onkeyup",
|
||||
"onlanguagechange",
|
||||
"onload",
|
||||
"onloadeddata",
|
||||
"onloadedmetadata",
|
||||
"onloadstart",
|
||||
"onmessage",
|
||||
"onmousedown",
|
||||
"onmousemove",
|
||||
"onmouseout",
|
||||
"onmouseover",
|
||||
"onmouseup",
|
||||
"onmousewheel",
|
||||
"onoffline",
|
||||
"ononline",
|
||||
"onpagehide",
|
||||
"onpageshow",
|
||||
"onpause",
|
||||
"onplay",
|
||||
"onplaying",
|
||||
"onpopstate",
|
||||
"onprogress",
|
||||
"onratechange",
|
||||
"onreset",
|
||||
"onresize",
|
||||
"onscroll",
|
||||
"onseeked",
|
||||
"onseeking",
|
||||
"onselect",
|
||||
"onshow",
|
||||
"onsort",
|
||||
"onstalled",
|
||||
"onstorage",
|
||||
"onsubmit",
|
||||
"onsuspend",
|
||||
"ontimeupdate",
|
||||
"ontoggle",
|
||||
"onunload",
|
||||
"onvolumechange",
|
||||
"onwaiting",
|
||||
"open",
|
||||
"optgroup",
|
||||
"optimum",
|
||||
"option",
|
||||
"output",
|
||||
"p",
|
||||
"param",
|
||||
"pattern",
|
||||
"ping",
|
||||
"placeholder",
|
||||
"plaintext",
|
||||
"poster",
|
||||
"pre",
|
||||
"preload",
|
||||
"progress",
|
||||
"prompt",
|
||||
"public",
|
||||
"q",
|
||||
"radiogroup",
|
||||
"readonly",
|
||||
"rel",
|
||||
"required",
|
||||
"reversed",
|
||||
"rows",
|
||||
"rowspan",
|
||||
"rp",
|
||||
"rt",
|
||||
"ruby",
|
||||
"s",
|
||||
"samp",
|
||||
"sandbox",
|
||||
"scope",
|
||||
"scoped",
|
||||
"script",
|
||||
"seamless",
|
||||
"section",
|
||||
"select",
|
||||
"selected",
|
||||
"shape",
|
||||
"size",
|
||||
"sizes",
|
||||
"small",
|
||||
"sortable",
|
||||
"sorted",
|
||||
"source",
|
||||
"spacer",
|
||||
"span",
|
||||
"span",
|
||||
"spellcheck",
|
||||
"src",
|
||||
"srcdoc",
|
||||
"srclang",
|
||||
"start",
|
||||
"step",
|
||||
"strike",
|
||||
"strong",
|
||||
"style",
|
||||
"style",
|
||||
"sub",
|
||||
"summary",
|
||||
"sup",
|
||||
"svg",
|
||||
"system",
|
||||
"tabindex",
|
||||
"table",
|
||||
"target",
|
||||
"tbody",
|
||||
"td",
|
||||
"template",
|
||||
"textarea",
|
||||
"tfoot",
|
||||
"th",
|
||||
"thead",
|
||||
"time",
|
||||
"title",
|
||||
"title",
|
||||
"tr",
|
||||
"track",
|
||||
"translate",
|
||||
"tt",
|
||||
"type",
|
||||
"typemustmatch",
|
||||
"u",
|
||||
"ul",
|
||||
"usemap",
|
||||
"value",
|
||||
"var",
|
||||
"video",
|
||||
"wbr",
|
||||
"width",
|
||||
"wrap",
|
||||
"xmp",
|
||||
}
|
244
Godeps/_workspace/src/golang.org/x/net/html/charset/charset.go
generated
vendored
244
Godeps/_workspace/src/golang.org/x/net/html/charset/charset.go
generated
vendored
@ -1,244 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package charset provides common text encodings for HTML documents.
|
||||
//
|
||||
// The mapping from encoding labels to encodings is defined at
|
||||
// https://encoding.spec.whatwg.org/.
|
||||
package charset
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/html"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/charmap"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// Lookup returns the encoding with the specified label, and its canonical
|
||||
// name. It returns nil and the empty string if label is not one of the
|
||||
// standard encodings for HTML. Matching is case-insensitive and ignores
|
||||
// leading and trailing whitespace.
|
||||
func Lookup(label string) (e encoding.Encoding, name string) {
|
||||
label = strings.ToLower(strings.Trim(label, "\t\n\r\f "))
|
||||
enc := encodings[label]
|
||||
return enc.e, enc.name
|
||||
}
|
||||
|
||||
// DetermineEncoding determines the encoding of an HTML document by examining
|
||||
// up to the first 1024 bytes of content and the declared Content-Type.
|
||||
//
|
||||
// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding
|
||||
func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) {
|
||||
if len(content) > 1024 {
|
||||
content = content[:1024]
|
||||
}
|
||||
|
||||
for _, b := range boms {
|
||||
if bytes.HasPrefix(content, b.bom) {
|
||||
e, name = Lookup(b.enc)
|
||||
return e, name, true
|
||||
}
|
||||
}
|
||||
|
||||
if _, params, err := mime.ParseMediaType(contentType); err == nil {
|
||||
if cs, ok := params["charset"]; ok {
|
||||
if e, name = Lookup(cs); e != nil {
|
||||
return e, name, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(content) > 0 {
|
||||
e, name = prescan(content)
|
||||
if e != nil {
|
||||
return e, name, false
|
||||
}
|
||||
}
|
||||
|
||||
// Try to detect UTF-8.
|
||||
// First eliminate any partial rune at the end.
|
||||
for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- {
|
||||
b := content[i]
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
if utf8.RuneStart(b) {
|
||||
content = content[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
hasHighBit := false
|
||||
for _, c := range content {
|
||||
if c >= 0x80 {
|
||||
hasHighBit = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if hasHighBit && utf8.Valid(content) {
|
||||
return encoding.Nop, "utf-8", false
|
||||
}
|
||||
|
||||
// TODO: change default depending on user's locale?
|
||||
return charmap.Windows1252, "windows-1252", false
|
||||
}
|
||||
|
||||
// NewReader returns an io.Reader that converts the content of r to UTF-8.
|
||||
// It calls DetermineEncoding to find out what r's encoding is.
|
||||
func NewReader(r io.Reader, contentType string) (io.Reader, error) {
|
||||
preview := make([]byte, 1024)
|
||||
n, err := io.ReadFull(r, preview)
|
||||
switch {
|
||||
case err == io.ErrUnexpectedEOF:
|
||||
preview = preview[:n]
|
||||
r = bytes.NewReader(preview)
|
||||
case err != nil:
|
||||
return nil, err
|
||||
default:
|
||||
r = io.MultiReader(bytes.NewReader(preview), r)
|
||||
}
|
||||
|
||||
if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop {
|
||||
r = transform.NewReader(r, e.NewDecoder())
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// NewReaderLabel returns a reader that converts from the specified charset to
|
||||
// UTF-8. It uses Lookup to find the encoding that corresponds to label, and
|
||||
// returns an error if Lookup returns nil. It is suitable for use as
|
||||
// encoding/xml.Decoder's CharsetReader function.
|
||||
func NewReaderLabel(label string, input io.Reader) (io.Reader, error) {
|
||||
e, _ := Lookup(label)
|
||||
if e == nil {
|
||||
return nil, fmt.Errorf("unsupported charset: %q", label)
|
||||
}
|
||||
return transform.NewReader(input, e.NewDecoder()), nil
|
||||
}
|
||||
|
||||
func prescan(content []byte) (e encoding.Encoding, name string) {
|
||||
z := html.NewTokenizer(bytes.NewReader(content))
|
||||
for {
|
||||
switch z.Next() {
|
||||
case html.ErrorToken:
|
||||
return nil, ""
|
||||
|
||||
case html.StartTagToken, html.SelfClosingTagToken:
|
||||
tagName, hasAttr := z.TagName()
|
||||
if !bytes.Equal(tagName, []byte("meta")) {
|
||||
continue
|
||||
}
|
||||
attrList := make(map[string]bool)
|
||||
gotPragma := false
|
||||
|
||||
const (
|
||||
dontKnow = iota
|
||||
doNeedPragma
|
||||
doNotNeedPragma
|
||||
)
|
||||
needPragma := dontKnow
|
||||
|
||||
name = ""
|
||||
e = nil
|
||||
for hasAttr {
|
||||
var key, val []byte
|
||||
key, val, hasAttr = z.TagAttr()
|
||||
ks := string(key)
|
||||
if attrList[ks] {
|
||||
continue
|
||||
}
|
||||
attrList[ks] = true
|
||||
for i, c := range val {
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
val[i] = c + 0x20
|
||||
}
|
||||
}
|
||||
|
||||
switch ks {
|
||||
case "http-equiv":
|
||||
if bytes.Equal(val, []byte("content-type")) {
|
||||
gotPragma = true
|
||||
}
|
||||
|
||||
case "content":
|
||||
if e == nil {
|
||||
name = fromMetaElement(string(val))
|
||||
if name != "" {
|
||||
e, name = Lookup(name)
|
||||
if e != nil {
|
||||
needPragma = doNeedPragma
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "charset":
|
||||
e, name = Lookup(string(val))
|
||||
needPragma = doNotNeedPragma
|
||||
}
|
||||
}
|
||||
|
||||
if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "utf-16") {
|
||||
name = "utf-8"
|
||||
e = encoding.Nop
|
||||
}
|
||||
|
||||
if e != nil {
|
||||
return e, name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fromMetaElement(s string) string {
|
||||
for s != "" {
|
||||
csLoc := strings.Index(s, "charset")
|
||||
if csLoc == -1 {
|
||||
return ""
|
||||
}
|
||||
s = s[csLoc+len("charset"):]
|
||||
s = strings.TrimLeft(s, " \t\n\f\r")
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
continue
|
||||
}
|
||||
s = s[1:]
|
||||
s = strings.TrimLeft(s, " \t\n\f\r")
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
if q := s[0]; q == '"' || q == '\'' {
|
||||
s = s[1:]
|
||||
closeQuote := strings.IndexRune(s, rune(q))
|
||||
if closeQuote == -1 {
|
||||
return ""
|
||||
}
|
||||
return s[:closeQuote]
|
||||
}
|
||||
|
||||
end := strings.IndexAny(s, "; \t\n\f\r")
|
||||
if end == -1 {
|
||||
end = len(s)
|
||||
}
|
||||
return s[:end]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var boms = []struct {
|
||||
bom []byte
|
||||
enc string
|
||||
}{
|
||||
{[]byte{0xfe, 0xff}, "utf-16be"},
|
||||
{[]byte{0xff, 0xfe}, "utf-16le"},
|
||||
{[]byte{0xef, 0xbb, 0xbf}, "utf-8"},
|
||||
}
|
236
Godeps/_workspace/src/golang.org/x/net/html/charset/charset_test.go
generated
vendored
236
Godeps/_workspace/src/golang.org/x/net/html/charset/charset_test.go
generated
vendored
@ -1,236 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package charset
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
func transformString(t transform.Transformer, s string) (string, error) {
|
||||
r := transform.NewReader(strings.NewReader(s), t)
|
||||
b, err := ioutil.ReadAll(r)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
var testCases = []struct {
|
||||
utf8, other, otherEncoding string
|
||||
}{
|
||||
{"Résumé", "Résumé", "utf8"},
|
||||
{"Résumé", "R\xe9sum\xe9", "latin1"},
|
||||
{"これは漢字です。", "S0\x8c0o0\"oW[g0Y0\x020", "UTF-16LE"},
|
||||
{"これは漢字です。", "0S0\x8c0oo\"[W0g0Y0\x02", "UTF-16BE"},
|
||||
{"Hello, world", "Hello, world", "ASCII"},
|
||||
{"Gdańsk", "Gda\xf1sk", "ISO-8859-2"},
|
||||
{"Ââ Čč Đđ Ŋŋ Õõ Šš Žž Åå Ää", "\xc2\xe2 \xc8\xe8 \xa9\xb9 \xaf\xbf \xd5\xf5 \xaa\xba \xac\xbc \xc5\xe5 \xc4\xe4", "ISO-8859-10"},
|
||||
{"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "ISO-8859-11"},
|
||||
{"latviešu", "latvie\xf0u", "ISO-8859-13"},
|
||||
{"Seònaid", "Se\xf2naid", "ISO-8859-14"},
|
||||
{"€1 is cheap", "\xa41 is cheap", "ISO-8859-15"},
|
||||
{"românește", "rom\xe2ne\xbate", "ISO-8859-16"},
|
||||
{"nutraĵo", "nutra\xbco", "ISO-8859-3"},
|
||||
{"Kalâdlit", "Kal\xe2dlit", "ISO-8859-4"},
|
||||
{"русский", "\xe0\xe3\xe1\xe1\xda\xd8\xd9", "ISO-8859-5"},
|
||||
{"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "ISO-8859-7"},
|
||||
{"Kağan", "Ka\xf0an", "ISO-8859-9"},
|
||||
{"Résumé", "R\x8esum\x8e", "macintosh"},
|
||||
{"Gdańsk", "Gda\xf1sk", "windows-1250"},
|
||||
{"русский", "\xf0\xf3\xf1\xf1\xea\xe8\xe9", "windows-1251"},
|
||||
{"Résumé", "R\xe9sum\xe9", "windows-1252"},
|
||||
{"ελληνικά", "\xe5\xeb\xeb\xe7\xed\xe9\xea\xdc", "windows-1253"},
|
||||
{"Kağan", "Ka\xf0an", "windows-1254"},
|
||||
{"עִבְרִית", "\xf2\xc4\xe1\xc0\xf8\xc4\xe9\xfa", "windows-1255"},
|
||||
{"العربية", "\xc7\xe1\xda\xd1\xc8\xed\xc9", "windows-1256"},
|
||||
{"latviešu", "latvie\xf0u", "windows-1257"},
|
||||
{"Việt", "Vi\xea\xf2t", "windows-1258"},
|
||||
{"สำหรับ", "\xca\xd3\xcb\xc3\u047a", "windows-874"},
|
||||
{"русский", "\xd2\xd5\xd3\xd3\xcb\xc9\xca", "KOI8-R"},
|
||||
{"українська", "\xd5\xcb\xd2\xc1\xa7\xce\xd3\xd8\xcb\xc1", "KOI8-U"},
|
||||
{"Hello 常用國字標準字體表", "Hello \xb1`\xa5\u03b0\xea\xa6r\xbc\u0437\u01e6r\xc5\xe9\xaa\xed", "big5"},
|
||||
{"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gbk"},
|
||||
{"Hello 常用國字標準字體表", "Hello \xb3\xa3\xd3\xc3\x87\xf8\xd7\xd6\x98\xcb\x9c\xca\xd7\xd6\xf3\x77\xb1\xed", "gb18030"},
|
||||
{"עִבְרִית", "\x81\x30\xfb\x30\x81\x30\xf6\x34\x81\x30\xf9\x33\x81\x30\xf6\x30\x81\x30\xfb\x36\x81\x30\xf6\x34\x81\x30\xfa\x31\x81\x30\xfb\x38", "gb18030"},
|
||||
{"㧯", "\x82\x31\x89\x38", "gb18030"},
|
||||
{"これは漢字です。", "\x82\xb1\x82\xea\x82\xcd\x8a\xbf\x8e\x9a\x82\xc5\x82\xb7\x81B", "SJIS"},
|
||||
{"Hello, 世界!", "Hello, \x90\xa2\x8aE!", "SJIS"},
|
||||
{"イウエオカ", "\xb2\xb3\xb4\xb5\xb6", "SJIS"},
|
||||
{"これは漢字です。", "\xa4\xb3\xa4\xec\xa4\u03f4\xc1\xbb\xfa\xa4\u01e4\xb9\xa1\xa3", "EUC-JP"},
|
||||
{"Hello, 世界!", "Hello, \x1b$B@$3&\x1b(B!", "ISO-2022-JP"},
|
||||
{"네이트 | 즐거움의 시작, 슈파스(Spaβ) NATE", "\xb3\xd7\xc0\xcc\xc6\xae | \xc1\xf1\xb0\xc5\xbf\xf2\xc0\xc7 \xbd\xc3\xc0\xdb, \xbd\xb4\xc6\xc4\xbd\xba(Spa\xa5\xe2) NATE", "EUC-KR"},
|
||||
}
|
||||
|
||||
func TestDecode(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
e, _ := Lookup(tc.otherEncoding)
|
||||
if e == nil {
|
||||
t.Errorf("%s: not found", tc.otherEncoding)
|
||||
continue
|
||||
}
|
||||
s, err := transformString(e.NewDecoder(), tc.other)
|
||||
if err != nil {
|
||||
t.Errorf("%s: decode %q: %v", tc.otherEncoding, tc.other, err)
|
||||
continue
|
||||
}
|
||||
if s != tc.utf8 {
|
||||
t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.utf8)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncode(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
e, _ := Lookup(tc.otherEncoding)
|
||||
if e == nil {
|
||||
t.Errorf("%s: not found", tc.otherEncoding)
|
||||
continue
|
||||
}
|
||||
s, err := transformString(e.NewEncoder(), tc.utf8)
|
||||
if err != nil {
|
||||
t.Errorf("%s: encode %q: %s", tc.otherEncoding, tc.utf8, err)
|
||||
continue
|
||||
}
|
||||
if s != tc.other {
|
||||
t.Errorf("%s: got %q, want %q", tc.otherEncoding, s, tc.other)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestNames verifies that you can pass an encoding's name to Lookup and get
|
||||
// the same encoding back (except for "replacement").
|
||||
func TestNames(t *testing.T) {
|
||||
for _, e := range encodings {
|
||||
if e.name == "replacement" {
|
||||
continue
|
||||
}
|
||||
_, got := Lookup(e.name)
|
||||
if got != e.name {
|
||||
t.Errorf("got %q, want %q", got, e.name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var sniffTestCases = []struct {
|
||||
filename, declared, want string
|
||||
}{
|
||||
{"HTTP-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
|
||||
{"UTF-16LE-BOM.html", "", "utf-16le"},
|
||||
{"UTF-16BE-BOM.html", "", "utf-16be"},
|
||||
{"meta-content-attribute.html", "text/html", "iso-8859-15"},
|
||||
{"meta-charset-attribute.html", "text/html", "iso-8859-15"},
|
||||
{"No-encoding-declaration.html", "text/html", "utf-8"},
|
||||
{"HTTP-vs-UTF-8-BOM.html", "text/html; charset=iso-8859-15", "utf-8"},
|
||||
{"HTTP-vs-meta-content.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
|
||||
{"HTTP-vs-meta-charset.html", "text/html; charset=iso-8859-15", "iso-8859-15"},
|
||||
{"UTF-8-BOM-vs-meta-content.html", "text/html", "utf-8"},
|
||||
{"UTF-8-BOM-vs-meta-charset.html", "text/html", "utf-8"},
|
||||
}
|
||||
|
||||
func TestSniff(t *testing.T) {
|
||||
switch runtime.GOOS {
|
||||
case "nacl": // platforms that don't permit direct file system access
|
||||
t.Skipf("not supported on %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
for _, tc := range sniffTestCases {
|
||||
content, err := ioutil.ReadFile("testdata/" + tc.filename)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error reading file: %v", tc.filename, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, name, _ := DetermineEncoding(content, tc.declared)
|
||||
if name != tc.want {
|
||||
t.Errorf("%s: got %q, want %q", tc.filename, name, tc.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReader(t *testing.T) {
|
||||
switch runtime.GOOS {
|
||||
case "nacl": // platforms that don't permit direct file system access
|
||||
t.Skipf("not supported on %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
for _, tc := range sniffTestCases {
|
||||
content, err := ioutil.ReadFile("testdata/" + tc.filename)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error reading file: %v", tc.filename, err)
|
||||
continue
|
||||
}
|
||||
|
||||
r, err := NewReader(bytes.NewReader(content), tc.declared)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error creating reader: %v", tc.filename, err)
|
||||
continue
|
||||
}
|
||||
|
||||
got, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Errorf("%s: error reading from charset.NewReader: %v", tc.filename, err)
|
||||
continue
|
||||
}
|
||||
|
||||
e, _ := Lookup(tc.want)
|
||||
want, err := ioutil.ReadAll(transform.NewReader(bytes.NewReader(content), e.NewDecoder()))
|
||||
if err != nil {
|
||||
t.Errorf("%s: error decoding with hard-coded charset name: %v", tc.filename, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("%s: got %q, want %q", tc.filename, got, want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var metaTestCases = []struct {
|
||||
meta, want string
|
||||
}{
|
||||
{"", ""},
|
||||
{"text/html", ""},
|
||||
{"text/html; charset utf-8", ""},
|
||||
{"text/html; charset=latin-2", "latin-2"},
|
||||
{"text/html; charset; charset = utf-8", "utf-8"},
|
||||
{`charset="big5"`, "big5"},
|
||||
{"charset='shift_jis'", "shift_jis"},
|
||||
}
|
||||
|
||||
func TestFromMeta(t *testing.T) {
|
||||
for _, tc := range metaTestCases {
|
||||
got := fromMetaElement(tc.meta)
|
||||
if got != tc.want {
|
||||
t.Errorf("%q: got %q, want %q", tc.meta, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestXML(t *testing.T) {
|
||||
const s = "<?xml version=\"1.0\" encoding=\"windows-1252\"?><a><Word>r\xe9sum\xe9</Word></a>"
|
||||
|
||||
d := xml.NewDecoder(strings.NewReader(s))
|
||||
d.CharsetReader = NewReaderLabel
|
||||
|
||||
var a struct {
|
||||
Word string
|
||||
}
|
||||
err := d.Decode(&a)
|
||||
if err != nil {
|
||||
t.Fatalf("Decode: %v", err)
|
||||
}
|
||||
|
||||
want := "résumé"
|
||||
if a.Word != want {
|
||||
t.Errorf("got %q, want %q", a.Word, want)
|
||||
}
|
||||
}
|
111
Godeps/_workspace/src/golang.org/x/net/html/charset/gen.go
generated
vendored
111
Godeps/_workspace/src/golang.org/x/net/html/charset/gen.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// Download https://encoding.spec.whatwg.org/encodings.json and use it to
|
||||
// generate table.go.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type enc struct {
|
||||
Name string
|
||||
Labels []string
|
||||
}
|
||||
|
||||
type group struct {
|
||||
Encodings []enc
|
||||
Heading string
|
||||
}
|
||||
|
||||
const specURL = "https://encoding.spec.whatwg.org/encodings.json"
|
||||
|
||||
func main() {
|
||||
resp, err := http.Get(specURL)
|
||||
if err != nil {
|
||||
log.Fatalf("error fetching %s: %s", specURL, err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatalf("error fetching %s: HTTP status %s", specURL, resp.Status)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var groups []group
|
||||
d := json.NewDecoder(resp.Body)
|
||||
err = d.Decode(&groups)
|
||||
if err != nil {
|
||||
log.Fatalf("error reading encodings.json: %s", err)
|
||||
}
|
||||
|
||||
fmt.Println("// generated by go run gen.go; DO NOT EDIT")
|
||||
fmt.Println()
|
||||
fmt.Println("package charset")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("import (")
|
||||
fmt.Println(`"golang.org/x/text/encoding"`)
|
||||
for _, pkg := range []string{"charmap", "japanese", "korean", "simplifiedchinese", "traditionalchinese", "unicode"} {
|
||||
fmt.Printf("\"golang.org/x/text/encoding/%s\"\n", pkg)
|
||||
}
|
||||
fmt.Println(")")
|
||||
fmt.Println()
|
||||
|
||||
fmt.Println("var encodings = map[string]struct{e encoding.Encoding; name string} {")
|
||||
for _, g := range groups {
|
||||
for _, e := range g.Encodings {
|
||||
goName, ok := miscNames[e.Name]
|
||||
if !ok {
|
||||
for k, v := range prefixes {
|
||||
if strings.HasPrefix(e.Name, k) {
|
||||
goName = v + e.Name[len(k):]
|
||||
break
|
||||
}
|
||||
}
|
||||
if goName == "" {
|
||||
log.Fatalf("unrecognized encoding name: %s", e.Name)
|
||||
}
|
||||
}
|
||||
|
||||
for _, label := range e.Labels {
|
||||
fmt.Printf("%q: {%s, %q},\n", label, goName, e.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println("}")
|
||||
}
|
||||
|
||||
var prefixes = map[string]string{
|
||||
"iso-8859-": "charmap.ISO8859_",
|
||||
"windows-": "charmap.Windows",
|
||||
}
|
||||
|
||||
var miscNames = map[string]string{
|
||||
"utf-8": "encoding.Nop",
|
||||
"ibm866": "charmap.CodePage866",
|
||||
"iso-8859-8-i": "charmap.ISO8859_8",
|
||||
"koi8-r": "charmap.KOI8R",
|
||||
"koi8-u": "charmap.KOI8U",
|
||||
"macintosh": "charmap.Macintosh",
|
||||
"x-mac-cyrillic": "charmap.MacintoshCyrillic",
|
||||
"gbk": "simplifiedchinese.GBK",
|
||||
"gb18030": "simplifiedchinese.GB18030",
|
||||
"hz-gb-2312": "simplifiedchinese.HZGB2312",
|
||||
"big5": "traditionalchinese.Big5",
|
||||
"euc-jp": "japanese.EUCJP",
|
||||
"iso-2022-jp": "japanese.ISO2022JP",
|
||||
"shift_jis": "japanese.ShiftJIS",
|
||||
"euc-kr": "korean.EUCKR",
|
||||
"replacement": "encoding.Replacement",
|
||||
"utf-16be": "unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM)",
|
||||
"utf-16le": "unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)",
|
||||
"x-user-defined": "charmap.XUserDefined",
|
||||
}
|
235
Godeps/_workspace/src/golang.org/x/net/html/charset/table.go
generated
vendored
235
Godeps/_workspace/src/golang.org/x/net/html/charset/table.go
generated
vendored
@ -1,235 +0,0 @@
|
||||
// generated by go run gen.go; DO NOT EDIT
|
||||
|
||||
package charset
|
||||
|
||||
import (
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/charmap"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/japanese"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/korean"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/simplifiedchinese"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/traditionalchinese"
|
||||
"github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/text/encoding/unicode"
|
||||
)
|
||||
|
||||
var encodings = map[string]struct {
|
||||
e encoding.Encoding
|
||||
name string
|
||||
}{
|
||||
"unicode-1-1-utf-8": {encoding.Nop, "utf-8"},
|
||||
"utf-8": {encoding.Nop, "utf-8"},
|
||||
"utf8": {encoding.Nop, "utf-8"},
|
||||
"866": {charmap.CodePage866, "ibm866"},
|
||||
"cp866": {charmap.CodePage866, "ibm866"},
|
||||
"csibm866": {charmap.CodePage866, "ibm866"},
|
||||
"ibm866": {charmap.CodePage866, "ibm866"},
|
||||
"csisolatin2": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"iso-8859-2": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"iso-ir-101": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"iso8859-2": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"iso88592": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"iso_8859-2": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"iso_8859-2:1987": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"l2": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"latin2": {charmap.ISO8859_2, "iso-8859-2"},
|
||||
"csisolatin3": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"iso-8859-3": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"iso-ir-109": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"iso8859-3": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"iso88593": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"iso_8859-3": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"iso_8859-3:1988": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"l3": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"latin3": {charmap.ISO8859_3, "iso-8859-3"},
|
||||
"csisolatin4": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"iso-8859-4": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"iso-ir-110": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"iso8859-4": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"iso88594": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"iso_8859-4": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"iso_8859-4:1988": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"l4": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"latin4": {charmap.ISO8859_4, "iso-8859-4"},
|
||||
"csisolatincyrillic": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"cyrillic": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"iso-8859-5": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"iso-ir-144": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"iso8859-5": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"iso88595": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"iso_8859-5": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"iso_8859-5:1988": {charmap.ISO8859_5, "iso-8859-5"},
|
||||
"arabic": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"asmo-708": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"csiso88596e": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"csiso88596i": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"csisolatinarabic": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"ecma-114": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso-8859-6": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso-8859-6-e": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso-8859-6-i": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso-ir-127": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso8859-6": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso88596": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso_8859-6": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"iso_8859-6:1987": {charmap.ISO8859_6, "iso-8859-6"},
|
||||
"csisolatingreek": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"ecma-118": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"elot_928": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"greek": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"greek8": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"iso-8859-7": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"iso-ir-126": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"iso8859-7": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"iso88597": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"iso_8859-7": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"iso_8859-7:1987": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"sun_eu_greek": {charmap.ISO8859_7, "iso-8859-7"},
|
||||
"csiso88598e": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"csisolatinhebrew": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"hebrew": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso-8859-8": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso-8859-8-e": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso-ir-138": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso8859-8": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso88598": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso_8859-8": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"iso_8859-8:1988": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"visual": {charmap.ISO8859_8, "iso-8859-8"},
|
||||
"csiso88598i": {charmap.ISO8859_8, "iso-8859-8-i"},
|
||||
"iso-8859-8-i": {charmap.ISO8859_8, "iso-8859-8-i"},
|
||||
"logical": {charmap.ISO8859_8, "iso-8859-8-i"},
|
||||
"csisolatin6": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"iso-8859-10": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"iso-ir-157": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"iso8859-10": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"iso885910": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"l6": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"latin6": {charmap.ISO8859_10, "iso-8859-10"},
|
||||
"iso-8859-13": {charmap.ISO8859_13, "iso-8859-13"},
|
||||
"iso8859-13": {charmap.ISO8859_13, "iso-8859-13"},
|
||||
"iso885913": {charmap.ISO8859_13, "iso-8859-13"},
|
||||
"iso-8859-14": {charmap.ISO8859_14, "iso-8859-14"},
|
||||
"iso8859-14": {charmap.ISO8859_14, "iso-8859-14"},
|
||||
"iso885914": {charmap.ISO8859_14, "iso-8859-14"},
|
||||
"csisolatin9": {charmap.ISO8859_15, "iso-8859-15"},
|
||||
"iso-8859-15": {charmap.ISO8859_15, "iso-8859-15"},
|
||||
"iso8859-15": {charmap.ISO8859_15, "iso-8859-15"},
|
||||
"iso885915": {charmap.ISO8859_15, "iso-8859-15"},
|
||||
"iso_8859-15": {charmap.ISO8859_15, "iso-8859-15"},
|
||||
"l9": {charmap.ISO8859_15, "iso-8859-15"},
|
||||
"iso-8859-16": {charmap.ISO8859_16, "iso-8859-16"},
|
||||
"cskoi8r": {charmap.KOI8R, "koi8-r"},
|
||||
"koi": {charmap.KOI8R, "koi8-r"},
|
||||
"koi8": {charmap.KOI8R, "koi8-r"},
|
||||
"koi8-r": {charmap.KOI8R, "koi8-r"},
|
||||
"koi8_r": {charmap.KOI8R, "koi8-r"},
|
||||
"koi8-u": {charmap.KOI8U, "koi8-u"},
|
||||
"csmacintosh": {charmap.Macintosh, "macintosh"},
|
||||
"mac": {charmap.Macintosh, "macintosh"},
|
||||
"macintosh": {charmap.Macintosh, "macintosh"},
|
||||
"x-mac-roman": {charmap.Macintosh, "macintosh"},
|
||||
"dos-874": {charmap.Windows874, "windows-874"},
|
||||
"iso-8859-11": {charmap.Windows874, "windows-874"},
|
||||
"iso8859-11": {charmap.Windows874, "windows-874"},
|
||||
"iso885911": {charmap.Windows874, "windows-874"},
|
||||
"tis-620": {charmap.Windows874, "windows-874"},
|
||||
"windows-874": {charmap.Windows874, "windows-874"},
|
||||
"cp1250": {charmap.Windows1250, "windows-1250"},
|
||||
"windows-1250": {charmap.Windows1250, "windows-1250"},
|
||||
"x-cp1250": {charmap.Windows1250, "windows-1250"},
|
||||
"cp1251": {charmap.Windows1251, "windows-1251"},
|
||||
"windows-1251": {charmap.Windows1251, "windows-1251"},
|
||||
"x-cp1251": {charmap.Windows1251, "windows-1251"},
|
||||
"ansi_x3.4-1968": {charmap.Windows1252, "windows-1252"},
|
||||
"ascii": {charmap.Windows1252, "windows-1252"},
|
||||
"cp1252": {charmap.Windows1252, "windows-1252"},
|
||||
"cp819": {charmap.Windows1252, "windows-1252"},
|
||||
"csisolatin1": {charmap.Windows1252, "windows-1252"},
|
||||
"ibm819": {charmap.Windows1252, "windows-1252"},
|
||||
"iso-8859-1": {charmap.Windows1252, "windows-1252"},
|
||||
"iso-ir-100": {charmap.Windows1252, "windows-1252"},
|
||||
"iso8859-1": {charmap.Windows1252, "windows-1252"},
|
||||
"iso88591": {charmap.Windows1252, "windows-1252"},
|
||||
"iso_8859-1": {charmap.Windows1252, "windows-1252"},
|
||||
"iso_8859-1:1987": {charmap.Windows1252, "windows-1252"},
|
||||
"l1": {charmap.Windows1252, "windows-1252"},
|
||||
"latin1": {charmap.Windows1252, "windows-1252"},
|
||||
"us-ascii": {charmap.Windows1252, "windows-1252"},
|
||||
"windows-1252": {charmap.Windows1252, "windows-1252"},
|
||||
"x-cp1252": {charmap.Windows1252, "windows-1252"},
|
||||
"cp1253": {charmap.Windows1253, "windows-1253"},
|
||||
"windows-1253": {charmap.Windows1253, "windows-1253"},
|
||||
"x-cp1253": {charmap.Windows1253, "windows-1253"},
|
||||
"cp1254": {charmap.Windows1254, "windows-1254"},
|
||||
"csisolatin5": {charmap.Windows1254, "windows-1254"},
|
||||
"iso-8859-9": {charmap.Windows1254, "windows-1254"},
|
||||
"iso-ir-148": {charmap.Windows1254, "windows-1254"},
|
||||
"iso8859-9": {charmap.Windows1254, "windows-1254"},
|
||||
"iso88599": {charmap.Windows1254, "windows-1254"},
|
||||
"iso_8859-9": {charmap.Windows1254, "windows-1254"},
|
||||
"iso_8859-9:1989": {charmap.Windows1254, "windows-1254"},
|
||||
"l5": {charmap.Windows1254, "windows-1254"},
|
||||
"latin5": {charmap.Windows1254, "windows-1254"},
|
||||
"windows-1254": {charmap.Windows1254, "windows-1254"},
|
||||
"x-cp1254": {charmap.Windows1254, "windows-1254"},
|
||||
"cp1255": {charmap.Windows1255, "windows-1255"},
|
||||
"windows-1255": {charmap.Windows1255, "windows-1255"},
|
||||
"x-cp1255": {charmap.Windows1255, "windows-1255"},
|
||||
"cp1256": {charmap.Windows1256, "windows-1256"},
|
||||
"windows-1256": {charmap.Windows1256, "windows-1256"},
|
||||
"x-cp1256": {charmap.Windows1256, "windows-1256"},
|
||||
"cp1257": {charmap.Windows1257, "windows-1257"},
|
||||
"windows-1257": {charmap.Windows1257, "windows-1257"},
|
||||
"x-cp1257": {charmap.Windows1257, "windows-1257"},
|
||||
"cp1258": {charmap.Windows1258, "windows-1258"},
|
||||
"windows-1258": {charmap.Windows1258, "windows-1258"},
|
||||
"x-cp1258": {charmap.Windows1258, "windows-1258"},
|
||||
"x-mac-cyrillic": {charmap.MacintoshCyrillic, "x-mac-cyrillic"},
|
||||
"x-mac-ukrainian": {charmap.MacintoshCyrillic, "x-mac-cyrillic"},
|
||||
"chinese": {simplifiedchinese.GBK, "gbk"},
|
||||
"csgb2312": {simplifiedchinese.GBK, "gbk"},
|
||||
"csiso58gb231280": {simplifiedchinese.GBK, "gbk"},
|
||||
"gb2312": {simplifiedchinese.GBK, "gbk"},
|
||||
"gb_2312": {simplifiedchinese.GBK, "gbk"},
|
||||
"gb_2312-80": {simplifiedchinese.GBK, "gbk"},
|
||||
"gbk": {simplifiedchinese.GBK, "gbk"},
|
||||
"iso-ir-58": {simplifiedchinese.GBK, "gbk"},
|
||||
"x-gbk": {simplifiedchinese.GBK, "gbk"},
|
||||
"gb18030": {simplifiedchinese.GB18030, "gb18030"},
|
||||
"hz-gb-2312": {simplifiedchinese.HZGB2312, "hz-gb-2312"},
|
||||
"big5": {traditionalchinese.Big5, "big5"},
|
||||
"big5-hkscs": {traditionalchinese.Big5, "big5"},
|
||||
"cn-big5": {traditionalchinese.Big5, "big5"},
|
||||
"csbig5": {traditionalchinese.Big5, "big5"},
|
||||
"x-x-big5": {traditionalchinese.Big5, "big5"},
|
||||
"cseucpkdfmtjapanese": {japanese.EUCJP, "euc-jp"},
|
||||
"euc-jp": {japanese.EUCJP, "euc-jp"},
|
||||
"x-euc-jp": {japanese.EUCJP, "euc-jp"},
|
||||
"csiso2022jp": {japanese.ISO2022JP, "iso-2022-jp"},
|
||||
"iso-2022-jp": {japanese.ISO2022JP, "iso-2022-jp"},
|
||||
"csshiftjis": {japanese.ShiftJIS, "shift_jis"},
|
||||
"ms_kanji": {japanese.ShiftJIS, "shift_jis"},
|
||||
"shift-jis": {japanese.ShiftJIS, "shift_jis"},
|
||||
"shift_jis": {japanese.ShiftJIS, "shift_jis"},
|
||||
"sjis": {japanese.ShiftJIS, "shift_jis"},
|
||||
"windows-31j": {japanese.ShiftJIS, "shift_jis"},
|
||||
"x-sjis": {japanese.ShiftJIS, "shift_jis"},
|
||||
"cseuckr": {korean.EUCKR, "euc-kr"},
|
||||
"csksc56011987": {korean.EUCKR, "euc-kr"},
|
||||
"euc-kr": {korean.EUCKR, "euc-kr"},
|
||||
"iso-ir-149": {korean.EUCKR, "euc-kr"},
|
||||
"korean": {korean.EUCKR, "euc-kr"},
|
||||
"ks_c_5601-1987": {korean.EUCKR, "euc-kr"},
|
||||
"ks_c_5601-1989": {korean.EUCKR, "euc-kr"},
|
||||
"ksc5601": {korean.EUCKR, "euc-kr"},
|
||||
"ksc_5601": {korean.EUCKR, "euc-kr"},
|
||||
"windows-949": {korean.EUCKR, "euc-kr"},
|
||||
"csiso2022kr": {encoding.Replacement, "replacement"},
|
||||
"iso-2022-kr": {encoding.Replacement, "replacement"},
|
||||
"iso-2022-cn": {encoding.Replacement, "replacement"},
|
||||
"iso-2022-cn-ext": {encoding.Replacement, "replacement"},
|
||||
"utf-16be": {unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM), "utf-16be"},
|
||||
"utf-16": {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"},
|
||||
"utf-16le": {unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM), "utf-16le"},
|
||||
"x-user-defined": {charmap.XUserDefined, "x-user-defined"},
|
||||
}
|
48
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-charset.html
generated
vendored
48
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-charset.html
generated
vendored
@ -1,48 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<title>HTTP charset</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="The character encoding of a page can be set using the HTTP header charset declaration.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>HTTP charset</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">The character encoding of a page can be set using the HTTP header charset declaration.</p>
|
||||
<div class="notes"><p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ÜÀÚ</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p><p>The only character encoding declaration for this HTML file is in the HTTP header, which sets the encoding to ISO 8859-15.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-003">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-001<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-001" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
48
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
generated
vendored
48
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-UTF-8-BOM.html
generated
vendored
@ -1,48 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<title>HTTP vs UTF-8 BOM</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>HTTP vs UTF-8 BOM</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">A character encoding set in the HTTP header has lower precedence than the UTF-8 signature.</p>
|
||||
<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ýäè</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p><p>If the test is unsuccessful, the characters  should appear at the top of the page. These represent the bytes that make up the UTF-8 signature when encountered in the ISO 8859-15 encoding.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-022">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-034<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-034" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
49
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
generated
vendored
49
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-charset.html
generated
vendored
@ -1,49 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<meta charset="iso-8859-1" > <title>HTTP vs meta charset</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }.test div { width: 90px; }
|
||||
</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>HTTP vs meta charset</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta charset attribute.</p>
|
||||
<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ÜÀÚ</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-037">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-018<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-018" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
49
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
generated
vendored
49
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/HTTP-vs-meta-content.html
generated
vendored
@ -1,49 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=iso-8859-1" > <title>HTTP vs meta content</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }.test div { width: 90px; }
|
||||
</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>HTTP vs meta content</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">The HTTP header has a higher precedence than an encoding declaration in a meta content attribute.</p>
|
||||
<div class="notes"><p><p>The HTTP header attempts to set the character encoding to ISO 8859-15. The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-1.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ÜÀÚ</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-018">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-016<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-016" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
@ -1,47 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<title>No encoding declaration</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>No encoding declaration</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">A page with no encoding information in HTTP, BOM, XML declaration or meta element will be treated as UTF-8.</p>
|
||||
<div class="notes"><p><p>The test on this page contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ýäè</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-034">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-015<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-015" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
9
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/README
generated
vendored
9
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/README
generated
vendored
@ -1,9 +0,0 @@
|
||||
These test cases come from
|
||||
http://www.w3.org/International/tests/repository/html5/the-input-byte-stream/results-basics
|
||||
|
||||
Distributed under both the W3C Test Suite License
|
||||
(http://www.w3.org/Consortium/Legal/2008/04-testsuite-license)
|
||||
and the W3C 3-clause BSD License
|
||||
(http://www.w3.org/Consortium/Legal/2008/03-bsd-license).
|
||||
To contribute to a W3C Test Suite, see the policies and contribution
|
||||
forms (http://www.w3.org/2004/10/27-testcases).
|
BIN
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
generated
vendored
BIN
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16BE-BOM.html
generated
vendored
Binary file not shown.
BIN
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
generated
vendored
BIN
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/UTF-16LE-BOM.html
generated
vendored
Binary file not shown.
@ -1,49 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<meta charset="iso-8859-15"> <title>UTF-8 BOM vs meta charset</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }.test div { width: 90px; }
|
||||
</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>UTF-8 BOM vs meta charset</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta charset attribute declares a different encoding.</p>
|
||||
<div class="notes"><p><p>The page contains an encoding declaration in a meta charset attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ýäè</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-024">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-038<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-038" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
@ -1,48 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html; charset=iso-8859-15"> <title>UTF-8 BOM vs meta content</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-utf8.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>UTF-8 BOM vs meta content</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">A page with a UTF-8 BOM will be recognized as UTF-8 even if the meta content attribute declares a different encoding.</p>
|
||||
<div class="notes"><p><p>The page contains an encoding declaration in a meta content attribute that attempts to set the character encoding to ISO 8859-15, but the file starts with a UTF-8 signature.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ýäè</code>. This matches the sequence of bytes above when they are interpreted as UTF-8. If the class name matches the selector then the test will pass.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-038">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-037<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#precedence" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-037" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
48
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
generated
vendored
48
Godeps/_workspace/src/golang.org/x/net/html/charset/testdata/meta-charset-attribute.html
generated
vendored
@ -1,48 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" >
|
||||
<head>
|
||||
<meta charset="iso-8859-15"> <title>meta charset attribute</title>
|
||||
<link rel='author' title='Richard Ishida' href='mailto:ishida@w3.org'>
|
||||
<link rel='help' href='http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream'>
|
||||
<link rel="stylesheet" type="text/css" href="./generatedtests.css">
|
||||
<script src="http://w3c-test.org/resources/testharness.js"></script>
|
||||
<script src="http://w3c-test.org/resources/testharnessreport.js"></script>
|
||||
<meta name='flags' content='http'>
|
||||
<meta name="assert" content="The character encoding of the page can be set by a meta element with charset attribute.">
|
||||
<style type='text/css'>
|
||||
.test div { width: 50px; }</style>
|
||||
<link rel="stylesheet" type="text/css" href="the-input-byte-stream/support/encodingtests-15.css">
|
||||
</head>
|
||||
<body>
|
||||
<p class='title'>meta charset attribute</p>
|
||||
|
||||
|
||||
<div id='log'></div>
|
||||
|
||||
|
||||
<div class='test'><div id='box' class='ýäè'> </div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class='description'>
|
||||
<p class="assertion" title="Assertion">The character encoding of the page can be set by a meta element with charset attribute.</p>
|
||||
<div class="notes"><p><p>The only character encoding declaration for this HTML file is in the charset attribute of the meta element, which declares the encoding to be ISO 8859-15.</p><p>The test contains a div with a class name that contains the following sequence of bytes: 0xC3 0xBD 0xC3 0xA4 0xC3 0xA8. These represent different sequences of characters in ISO 8859-15, ISO 8859-1 and UTF-8. The external, UTF-8-encoded stylesheet contains a selector <code>.test div.ÜÀÚ</code>. This matches the sequence of bytes above when they are interpreted as ISO 8859-15. If the class name matches the selector then the test will pass.</p></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="nexttest"><div><a href="generate?test=the-input-byte-stream-015">Next test</a></div><div class="doctype">HTML5</div>
|
||||
<p class="jump">the-input-byte-stream-009<br /><a href="/International/tests/html5/the-input-byte-stream/results-basics#basics" target="_blank">Result summary & related tests</a><br /><a href="http://w3c-test.org/framework/details/i18n-html5/the-input-byte-stream-009" target="_blank">Detailed results for this test</a><br/> <a href="http://www.w3.org/TR/html5/syntax.html#the-input-byte-stream" target="_blank">Link to spec</a></p>
|
||||
<div class='prereq'>Assumptions: <ul><li>The default encoding for the browser you are testing is not set to ISO 8859-15.</li>
|
||||
<li>The test is read from a server that supports HTTP.</li></ul></div>
|
||||
</div>
|
||||
<script>
|
||||
test(function() {
|
||||
assert_equals(document.getElementById('box').offsetWidth, 100);
|
||||
}, " ");
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user