mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-01 19:01:34 +08:00
vendor inconshreveable/go-update and it's deps
This commit is contained in:
13
Godeps/Godeps.json
generated
13
Godeps/Godeps.json
generated
@ -9,6 +9,11 @@
|
|||||||
"ImportPath": "bazil.org/fuse",
|
"ImportPath": "bazil.org/fuse",
|
||||||
"Rev": "a04507d54fc3610d38ee951402d8c4acab56c7b1"
|
"Rev": "a04507d54fc3610d38ee951402d8c4acab56c7b1"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "bitbucket.org/kardianos/osext",
|
||||||
|
"Comment": "null-15",
|
||||||
|
"Rev": "44140c5fc69ecf1102c5ef451d73cd98ef59b178"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "code.google.com/p/go-uuid/uuid",
|
"ImportPath": "code.google.com/p/go-uuid/uuid",
|
||||||
"Comment": "null-12",
|
"Comment": "null-12",
|
||||||
@ -62,6 +67,10 @@
|
|||||||
"ImportPath": "github.com/gorilla/mux",
|
"ImportPath": "github.com/gorilla/mux",
|
||||||
"Rev": "4b8fbc56f3b2400a7c7ea3dba9b3539787c486b6"
|
"Rev": "4b8fbc56f3b2400a7c7ea3dba9b3539787c486b6"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/inconshreveable/go-update",
|
||||||
|
"Rev": "221d034a558b4c21b0624b2a450c076913854a57"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/jbenet/commander",
|
"ImportPath": "github.com/jbenet/commander",
|
||||||
"Rev": "e0cf317891f0ab6f1ac64dfcb754b4fb5e69f7df"
|
"Rev": "e0cf317891f0ab6f1ac64dfcb754b4fb5e69f7df"
|
||||||
@ -92,6 +101,10 @@
|
|||||||
"Comment": "0.1.0-5-g1976046",
|
"Comment": "0.1.0-5-g1976046",
|
||||||
"Rev": "1976046c2b0db0b668791b3e541d76a38b7c1af7"
|
"Rev": "1976046c2b0db0b668791b3e541d76a38b7c1af7"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/kr/binarydist",
|
||||||
|
"Rev": "9955b0ab8708602d411341e55fffd7e0700f86bd"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||||
"Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52"
|
"Rev": "7d2d8c8a4e078ce3c58736ab521a40b37a504c52"
|
||||||
|
27
Godeps/_workspace/src/bitbucket.org/kardianos/osext/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/bitbucket.org/kardianos/osext/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
32
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext.go
generated
vendored
Normal file
32
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Extensions to the standard "os" package.
|
||||||
|
package osext
|
||||||
|
|
||||||
|
import "path/filepath"
|
||||||
|
|
||||||
|
// Executable returns an absolute path that can be used to
|
||||||
|
// re-invoke the current program.
|
||||||
|
// It may not be valid after the current program exits.
|
||||||
|
func Executable() (string, error) {
|
||||||
|
p, err := executable()
|
||||||
|
return filepath.Clean(p), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns same path as Executable, returns just the folder
|
||||||
|
// path. Excludes the executable name.
|
||||||
|
func ExecutableFolder() (string, error) {
|
||||||
|
p, err := Executable()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
folder, _ := filepath.Split(p)
|
||||||
|
return folder, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Depricated. Same as Executable().
|
||||||
|
func GetExePath() (exePath string, err error) {
|
||||||
|
return Executable()
|
||||||
|
}
|
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
Normal file
20
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_plan9.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package osext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
func executable() (string, error) {
|
||||||
|
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
return syscall.Fd2path(int(f.Fd()))
|
||||||
|
}
|
28
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_procfs.go
generated
vendored
Normal file
28
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_procfs.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux netbsd openbsd solaris
|
||||||
|
|
||||||
|
package osext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func executable() (string, error) {
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "linux":
|
||||||
|
return os.Readlink("/proc/self/exe")
|
||||||
|
case "netbsd":
|
||||||
|
return os.Readlink("/proc/curproc/exe")
|
||||||
|
case "openbsd":
|
||||||
|
return os.Readlink("/proc/curproc/file")
|
||||||
|
case "solaris":
|
||||||
|
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||||
|
}
|
||||||
|
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||||
|
}
|
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
Normal file
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_sysctl.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin freebsd
|
||||||
|
|
||||||
|
package osext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var initCwd, initCwdErr = os.Getwd()
|
||||||
|
|
||||||
|
func executable() (string, error) {
|
||||||
|
var mib [4]int32
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "freebsd":
|
||||||
|
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||||
|
case "darwin":
|
||||||
|
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||||
|
}
|
||||||
|
|
||||||
|
n := uintptr(0)
|
||||||
|
// Get length.
|
||||||
|
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||||
|
if errNum != 0 {
|
||||||
|
return "", errNum
|
||||||
|
}
|
||||||
|
if n == 0 { // This shouldn't happen.
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
buf := make([]byte, n)
|
||||||
|
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||||
|
if errNum != 0 {
|
||||||
|
return "", errNum
|
||||||
|
}
|
||||||
|
if n == 0 { // This shouldn't happen.
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
for i, v := range buf {
|
||||||
|
if v == 0 {
|
||||||
|
buf = buf[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
execPath := string(buf)
|
||||||
|
// execPath will not be empty due to above checks.
|
||||||
|
// Try to get the absolute path if the execPath is not rooted.
|
||||||
|
if execPath[0] != '/' {
|
||||||
|
execPath, err = getAbs(execPath)
|
||||||
|
if err != nil {
|
||||||
|
return execPath, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||||
|
// actual executable.
|
||||||
|
if runtime.GOOS == "darwin" {
|
||||||
|
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||||
|
return execPath, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return execPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAbs(execPath string) (string, error) {
|
||||||
|
if initCwdErr != nil {
|
||||||
|
return execPath, initCwdErr
|
||||||
|
}
|
||||||
|
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||||
|
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||||
|
// the generic Join function.
|
||||||
|
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||||
|
}
|
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_test.go
generated
vendored
Normal file
79
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_test.go
generated
vendored
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build darwin linux freebsd netbsd windows
|
||||||
|
|
||||||
|
package osext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
oexec "os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const execPath_EnvVar = "OSTEST_OUTPUT_EXECPATH"
|
||||||
|
|
||||||
|
func TestExecPath(t *testing.T) {
|
||||||
|
ep, err := Executable()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ExecPath failed: %v", err)
|
||||||
|
}
|
||||||
|
// we want fn to be of the form "dir/prog"
|
||||||
|
dir := filepath.Dir(filepath.Dir(ep))
|
||||||
|
fn, err := filepath.Rel(dir, ep)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("filepath.Rel: %v", err)
|
||||||
|
}
|
||||||
|
cmd := &oexec.Cmd{}
|
||||||
|
// make child start with a relative program path
|
||||||
|
cmd.Dir = dir
|
||||||
|
cmd.Path = fn
|
||||||
|
// forge argv[0] for child, so that we can verify we could correctly
|
||||||
|
// get real path of the executable without influenced by argv[0].
|
||||||
|
cmd.Args = []string{"-", "-test.run=XXXX"}
|
||||||
|
cmd.Env = []string{fmt.Sprintf("%s=1", execPath_EnvVar)}
|
||||||
|
out, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("exec(self) failed: %v", err)
|
||||||
|
}
|
||||||
|
outs := string(out)
|
||||||
|
if !filepath.IsAbs(outs) {
|
||||||
|
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||||
|
}
|
||||||
|
if !sameFile(outs, ep) {
|
||||||
|
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameFile(fn1, fn2 string) bool {
|
||||||
|
fi1, err := os.Stat(fn1)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
fi2, err := os.Stat(fn2)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return os.SameFile(fi1, fi2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if e := os.Getenv(execPath_EnvVar); e != "" {
|
||||||
|
// first chdir to another path
|
||||||
|
dir := "/"
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
dir = filepath.VolumeName(".")
|
||||||
|
}
|
||||||
|
os.Chdir(dir)
|
||||||
|
if ep, err := Executable(); err != nil {
|
||||||
|
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||||
|
} else {
|
||||||
|
fmt.Fprint(os.Stderr, ep)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
34
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_windows.go
generated
vendored
Normal file
34
Godeps/_workspace/src/bitbucket.org/kardianos/osext/osext_windows.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package osext
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unicode/utf16"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||||
|
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetModuleFileName() with hModule = NULL
|
||||||
|
func executable() (exePath string, err error) {
|
||||||
|
return getModuleFileName()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getModuleFileName() (string, error) {
|
||||||
|
var n uint32
|
||||||
|
b := make([]uint16, syscall.MAX_PATH)
|
||||||
|
size := uint32(len(b))
|
||||||
|
|
||||||
|
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
||||||
|
n = uint32(r0)
|
||||||
|
if n == 0 {
|
||||||
|
return "", e1
|
||||||
|
}
|
||||||
|
return string(utf16.Decode(b[0:n])), nil
|
||||||
|
}
|
13
Godeps/_workspace/src/github.com/inconshreveable/go-update/LICENSE
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/inconshreveable/go-update/LICENSE
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
Copyright 2014 Alan Shreve
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
37
Godeps/_workspace/src/github.com/inconshreveable/go-update/README.md
generated
vendored
Normal file
37
Godeps/_workspace/src/github.com/inconshreveable/go-update/README.md
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# go-update: Automatically update Go programs from the internet
|
||||||
|
|
||||||
|
go-update allows a program to update itself by replacing its executable file
|
||||||
|
with a new version. It provides the flexibility to implement different updating user experiences
|
||||||
|
like auto-updating, or manual user-initiated updates. It also boasts
|
||||||
|
advanced features like binary patching and code signing verification.
|
||||||
|
|
||||||
|
Updating your program to a new version is as easy as:
|
||||||
|
|
||||||
|
err, errRecover := update.New().FromUrl("http://release.example.com/2.0/myprogram")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Update failed: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
## Documentation and API Reference
|
||||||
|
|
||||||
|
Comprehensive API documentation and code examples are available in the code documentation available on godoc.org:
|
||||||
|
|
||||||
|
[](https://godoc.org/github.com/inconshreveable/go-update)
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Cross platform support (Windows too!)
|
||||||
|
- Binary patch application
|
||||||
|
- Checksum verification
|
||||||
|
- Code signing verification
|
||||||
|
- Support for updating arbitrary files
|
||||||
|
|
||||||
|
## [equinox.io](https://equinox.io)
|
||||||
|
go-update provides the primitives for building self-updating applications, but there a number of other challenges
|
||||||
|
involved in a complete updating solution such as hosting, code signing, update channels, gradual rollout,
|
||||||
|
dynamically computing binary patches, tracking update metrics like versions and failures, plus more.
|
||||||
|
|
||||||
|
I provide this service, a complete solution, free for open source projects, at [equinox.io](https://equinox.io).
|
||||||
|
|
||||||
|
## License
|
||||||
|
Apache
|
209
Godeps/_workspace/src/github.com/inconshreveable/go-update/check/check.go
generated
vendored
Normal file
209
Godeps/_workspace/src/github.com/inconshreveable/go-update/check/check.go
generated
vendored
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
package check
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
_ "crypto/sha512" // for tls cipher support
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bitbucket.org/kardianos/osext"
|
||||||
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/inconshreveable/go-update"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Initiative string
|
||||||
|
|
||||||
|
const (
|
||||||
|
INITIATIVE_NEVER Initiative = "never"
|
||||||
|
INITIATIVE_AUTO = "auto"
|
||||||
|
INITIATIVE_MANUAL = "manual"
|
||||||
|
)
|
||||||
|
|
||||||
|
var NoUpdateAvailable error = fmt.Errorf("No update available")
|
||||||
|
|
||||||
|
type Params struct {
|
||||||
|
// protocol version
|
||||||
|
Version int `json:"version"`
|
||||||
|
// identifier of the application to update
|
||||||
|
AppId string `json:"app_id"`
|
||||||
|
// version of the application updating itself
|
||||||
|
AppVersion string `json:"app_version"`
|
||||||
|
// operating system of target platform
|
||||||
|
OS string `json:"-"`
|
||||||
|
// hardware architecture of target platform
|
||||||
|
Arch string `json:"-"`
|
||||||
|
// application-level user identifier
|
||||||
|
UserId string `json:"user_id"`
|
||||||
|
// checksum of the binary to replace (used for returning diff patches)
|
||||||
|
Checksum string `json:"checksum"`
|
||||||
|
// release channel (empty string means 'stable')
|
||||||
|
Channel string `json:"-"`
|
||||||
|
// tags for custom update channels
|
||||||
|
Tags map[string]string `json:"tags"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Result struct {
|
||||||
|
up *update.Update
|
||||||
|
|
||||||
|
// should the update be applied automatically/manually
|
||||||
|
Initiative Initiative `json:"initiative"`
|
||||||
|
// url where to download the updated application
|
||||||
|
Url string `json:"url"`
|
||||||
|
// a URL to a patch to apply
|
||||||
|
PatchUrl string `json:"patch_url"`
|
||||||
|
// the patch format (only bsdiff supported at the moment)
|
||||||
|
PatchType update.PatchType `json:"patch_type"`
|
||||||
|
// version of the new application
|
||||||
|
Version string `json:"version"`
|
||||||
|
// expected checksum of the new application
|
||||||
|
Checksum string `json:"checksum"`
|
||||||
|
// signature for verifying update authenticity
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckForUpdate makes an HTTP post to a URL with the JSON serialized
|
||||||
|
// representation of Params. It returns the deserialized result object
|
||||||
|
// returned by the remote endpoint or an error. If you do not set
|
||||||
|
// OS/Arch, CheckForUpdate will populate them for you. Similarly, if
|
||||||
|
// Version is 0, it will be set to 1. Lastly, if Checksum is the empty
|
||||||
|
// string, it will be automatically be computed for the running program's
|
||||||
|
// executable file.
|
||||||
|
func (p *Params) CheckForUpdate(url string, up *update.Update) (*Result, error) {
|
||||||
|
if p.Tags == nil {
|
||||||
|
p.Tags = make(map[string]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Channel == "" {
|
||||||
|
p.Channel = "stable"
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.OS == "" {
|
||||||
|
p.OS = runtime.GOOS
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Arch == "" {
|
||||||
|
p.Arch = runtime.GOARCH
|
||||||
|
}
|
||||||
|
|
||||||
|
if p.Version == 0 {
|
||||||
|
p.Version = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore errors auto-populating the checksum
|
||||||
|
// if it fails, you just won't be able to patch
|
||||||
|
if up.TargetPath == "" {
|
||||||
|
p.Checksum = defaultChecksum()
|
||||||
|
} else {
|
||||||
|
checksum, err := update.ChecksumForFile(up.TargetPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
p.Checksum = hex.EncodeToString(checksum)
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Tags["os"] = p.OS
|
||||||
|
p.Tags["arch"] = p.Arch
|
||||||
|
p.Tags["channel"] = p.Channel
|
||||||
|
|
||||||
|
body, err := json.Marshal(p)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Post(url, "application/json", bytes.NewReader(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// no content means no available update
|
||||||
|
if resp.StatusCode == 204 {
|
||||||
|
return nil, NoUpdateAvailable
|
||||||
|
}
|
||||||
|
|
||||||
|
defer resp.Body.Close()
|
||||||
|
respBytes, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := &Result{up: up}
|
||||||
|
if err := json.Unmarshal(respBytes, result); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Params) CheckAndApplyUpdate(url string, up *update.Update) (result *Result, err error, errRecover error) {
|
||||||
|
// check for an update
|
||||||
|
result, err = p.CheckForUpdate(url, up)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the available update
|
||||||
|
err, errRecover = result.Update()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Result) Update() (err error, errRecover error) {
|
||||||
|
if r.Checksum != "" {
|
||||||
|
r.up.Checksum, err = hex.DecodeString(r.Checksum)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Signature != "" {
|
||||||
|
r.up.Signature, err = hex.DecodeString(r.Signature)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.PatchType != "" {
|
||||||
|
r.up.PatchType = r.PatchType
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Url == "" && r.PatchUrl == "" {
|
||||||
|
err = fmt.Errorf("Result does not contain an update url or patch update url")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.PatchUrl != "" {
|
||||||
|
err, errRecover = r.up.FromUrl(r.PatchUrl)
|
||||||
|
if err == nil {
|
||||||
|
// success!
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
// failed to update from patch URL, try with the whole thing
|
||||||
|
if r.Url == "" || errRecover != nil {
|
||||||
|
// we can't try updating from a URL with the full contents
|
||||||
|
// in these cases, so fail
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
r.up.PatchType = update.PATCHTYPE_NONE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// try updating from a URL with the full contents
|
||||||
|
return r.up.FromUrl(r.Url)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultChecksum() string {
|
||||||
|
path, err := osext.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
checksum, err := update.ChecksumForFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(checksum)
|
||||||
|
}
|
230
Godeps/_workspace/src/github.com/inconshreveable/go-update/download/download.go
generated
vendored
Normal file
230
Godeps/_workspace/src/github.com/inconshreveable/go-update/download/download.go
generated
vendored
Normal file
@ -0,0 +1,230 @@
|
|||||||
|
package download
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
type roundTripper struct {
|
||||||
|
RoundTripFn func(*http.Request) (*http.Response, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rt *roundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||||
|
return rt.RoundTripFn(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download encapsulates the state and parameters to download content
|
||||||
|
// from a URL which:
|
||||||
|
//
|
||||||
|
// - Publishes the percentage of the download completed to a channel.
|
||||||
|
// - May resume a previous download that was partially completed.
|
||||||
|
//
|
||||||
|
// Create an instance with the New() factory function.
|
||||||
|
type Download struct {
|
||||||
|
// net/http.Client to use when downloading the update.
|
||||||
|
// If nil, a default http.Client is used
|
||||||
|
HttpClient *http.Client
|
||||||
|
|
||||||
|
// As bytes are downloaded, they are written to Target.
|
||||||
|
// Download also uses the Target's Seek method to determine
|
||||||
|
// the size of partial-downloads so that it may properly
|
||||||
|
// request the remaining bytes to resume the download.
|
||||||
|
Target Target
|
||||||
|
|
||||||
|
// Progress returns the percentage of the download
|
||||||
|
// completed as an integer between 0 and 100
|
||||||
|
Progress chan (int)
|
||||||
|
|
||||||
|
// HTTP Method to use in the download request. Default is "GET"
|
||||||
|
Method string
|
||||||
|
|
||||||
|
// HTTP URL to issue the download request to
|
||||||
|
Url string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New initializes a new Download object which will download
|
||||||
|
// the content from url into target.
|
||||||
|
func New(url string, target Target) *Download {
|
||||||
|
return &Download{
|
||||||
|
HttpClient: new(http.Client),
|
||||||
|
Progress: make(chan int),
|
||||||
|
Method: "GET",
|
||||||
|
Url: url,
|
||||||
|
Target: target,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get() downloads the content of a url to a target destination.
|
||||||
|
//
|
||||||
|
// Only HTTP/1.1 servers that implement the Range header support resuming a
|
||||||
|
// partially completed download.
|
||||||
|
//
|
||||||
|
// On success, the server must return 200 and the content, or 206 when resuming a partial download.
|
||||||
|
// If the HTTP server returns a 3XX redirect, it will be followed according to d.HttpClient's redirect policy.
|
||||||
|
//
|
||||||
|
func (d *Download) Get() (err error) {
|
||||||
|
// Close the progress channel whenever this function completes
|
||||||
|
defer close(d.Progress)
|
||||||
|
|
||||||
|
// determine the size of the download target to determine if we're resuming a partial download
|
||||||
|
offset, err := d.Target.Size()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// create the download request
|
||||||
|
req, err := http.NewRequest(d.Method, d.Url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// we have to add headers like this so they get used across redirects
|
||||||
|
trans := d.HttpClient.Transport
|
||||||
|
if trans == nil {
|
||||||
|
trans = http.DefaultTransport
|
||||||
|
}
|
||||||
|
|
||||||
|
d.HttpClient.Transport = &roundTripper{
|
||||||
|
RoundTripFn: func(r *http.Request) (*http.Response, error) {
|
||||||
|
// add header for download continuation
|
||||||
|
if offset > 0 {
|
||||||
|
r.Header.Add("Range", fmt.Sprintf("%d-", offset))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ask for gzipped content so that net/http won't unzip it for us
|
||||||
|
// and destroy the content length header we need for progress calculations
|
||||||
|
r.Header.Add("Accept-Encoding", "gzip")
|
||||||
|
|
||||||
|
return trans.RoundTrip(r)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// issue the download request
|
||||||
|
resp, err := d.HttpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
switch resp.StatusCode {
|
||||||
|
// ok
|
||||||
|
case 200, 206:
|
||||||
|
|
||||||
|
// server error
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("Non 2XX response when downloading update: %s", resp.Status)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine how much we have to download
|
||||||
|
// net/http sets this to -1 when it is unknown
|
||||||
|
clength := resp.ContentLength
|
||||||
|
|
||||||
|
// Read the content from the response body
|
||||||
|
rd := resp.Body
|
||||||
|
|
||||||
|
// meter the rate at which we download content for
|
||||||
|
// progress reporting if we know how much to expect
|
||||||
|
if clength > 0 {
|
||||||
|
rd = &meteredReader{rd: rd, totalSize: clength, progress: d.Progress}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decompress the content if necessary
|
||||||
|
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||||
|
rd, err = gzip.NewReader(rd)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download the update
|
||||||
|
_, err = io.Copy(d.Target, rd)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// meteredReader wraps a ReadCloser. Calls to a meteredReader's Read() method
|
||||||
|
// publish updates to a progress channel with the percentage read so far.
|
||||||
|
type meteredReader struct {
|
||||||
|
rd io.ReadCloser
|
||||||
|
totalSize int64
|
||||||
|
progress chan int
|
||||||
|
totalRead int64
|
||||||
|
ticks int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *meteredReader) Close() error {
|
||||||
|
return m.rd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *meteredReader) Read(b []byte) (n int, err error) {
|
||||||
|
chunkSize := (m.totalSize / 100) + 1
|
||||||
|
lenB := int64(len(b))
|
||||||
|
|
||||||
|
var nChunk int
|
||||||
|
for start := int64(0); start < lenB; start += int64(nChunk) {
|
||||||
|
end := start + chunkSize
|
||||||
|
if end > lenB {
|
||||||
|
end = lenB
|
||||||
|
}
|
||||||
|
|
||||||
|
nChunk, err = m.rd.Read(b[start:end])
|
||||||
|
|
||||||
|
n += nChunk
|
||||||
|
m.totalRead += int64(nChunk)
|
||||||
|
|
||||||
|
if m.totalRead > (m.ticks * chunkSize) {
|
||||||
|
m.ticks += 1
|
||||||
|
// try to send on channel, but don't block if it's full
|
||||||
|
select {
|
||||||
|
case m.progress <- int(m.ticks + 1):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
// give the progress channel consumer a chance to run
|
||||||
|
runtime.Gosched()
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Target is what you can supply to Download,
|
||||||
|
// it's just an io.Writer with a Size() method so that
|
||||||
|
// the a Download can "resume" an interrupted download
|
||||||
|
type Target interface {
|
||||||
|
io.Writer
|
||||||
|
Size() (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileTarget struct {
|
||||||
|
*os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *FileTarget) Size() (int, error) {
|
||||||
|
if fi, err := t.File.Stat(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
} else {
|
||||||
|
return int(fi.Size()), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type MemoryTarget struct {
|
||||||
|
bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *MemoryTarget) Size() (int, error) {
|
||||||
|
return t.Buffer.Len(), nil
|
||||||
|
}
|
7
Godeps/_workspace/src/github.com/inconshreveable/go-update/hide_noop.go
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/inconshreveable/go-update/hide_noop.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package update
|
||||||
|
|
||||||
|
func hideFile(path string) error {
|
||||||
|
return nil
|
||||||
|
}
|
19
Godeps/_workspace/src/github.com/inconshreveable/go-update/hide_windows.go
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/inconshreveable/go-update/hide_windows.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
package update
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func hideFile(path string) error {
|
||||||
|
kernel32 := syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
setFileAttributes := kernel32.NewProc("SetFileAttributesW")
|
||||||
|
|
||||||
|
r1, _, err := setFileAttributes.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), 2)
|
||||||
|
|
||||||
|
if r1 == 0 {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
486
Godeps/_workspace/src/github.com/inconshreveable/go-update/update.go
generated
vendored
Normal file
486
Godeps/_workspace/src/github.com/inconshreveable/go-update/update.go
generated
vendored
Normal file
@ -0,0 +1,486 @@
|
|||||||
|
/*
|
||||||
|
go-update allows a program to update itself by replacing its executable file
|
||||||
|
with a new version. It provides the flexibility to implement different updating user experiences
|
||||||
|
like auto-updating, or manual user-initiated updates. It also boasts
|
||||||
|
advanced features like binary patching and code signing verification.
|
||||||
|
|
||||||
|
Updating your program to a new version is as easy as:
|
||||||
|
|
||||||
|
err, errRecover := update.New().FromUrl("http://release.example.com/2.0/myprogram")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Update failed: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
You may also choose to update from other data sources such as a file or an io.Reader:
|
||||||
|
|
||||||
|
err, errRecover := update.New().FromFile("/path/to/update")
|
||||||
|
|
||||||
|
Binary Diff Patching
|
||||||
|
|
||||||
|
Binary diff updates are supported and easy to use:
|
||||||
|
|
||||||
|
up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF)
|
||||||
|
err, errRecover := up.FromUrl("http://release.example.com/2.0/mypatch")
|
||||||
|
|
||||||
|
Checksum Verification
|
||||||
|
|
||||||
|
You should also verify the checksum of new updates as well as verify
|
||||||
|
the digital signature of an update. Note that even when you choose to apply
|
||||||
|
a patch, the checksum is verified against the complete update after that patch
|
||||||
|
has been applied.
|
||||||
|
|
||||||
|
up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
|
||||||
|
err, errRecover := up.FromUrl("http://release.example.com/2.0/mypatch")
|
||||||
|
|
||||||
|
Updating other files
|
||||||
|
|
||||||
|
Updating arbitrary files is also supported. You may update files which are
|
||||||
|
not the currently running program:
|
||||||
|
|
||||||
|
up := update.New().Target("/usr/local/bin/some-program")
|
||||||
|
err, errRecover := up.FromUrl("http://release.example.com/2.0/some-program")
|
||||||
|
|
||||||
|
Code Signing
|
||||||
|
|
||||||
|
Truly secure updates use code signing to verify that the update was issued by a trusted party.
|
||||||
|
To do this, you'll need to generate a public/private key pair. You can do this with openssl,
|
||||||
|
or the equinox.io client (https://equinox.io/client) can easily generate one for you:
|
||||||
|
|
||||||
|
# with equinox client
|
||||||
|
equinox genkey --private-key=private.pem --public-key=public.pem
|
||||||
|
|
||||||
|
# with openssl
|
||||||
|
openssl genrsa -out private.pem 2048
|
||||||
|
openssl rsa -in private.pem -out public.pem -pubout
|
||||||
|
|
||||||
|
Once you have your key pair, you can instruct your program to validate its updates
|
||||||
|
with the public key:
|
||||||
|
|
||||||
|
const publicKey = `-----BEGIN PUBLIC KEY-----
|
||||||
|
...
|
||||||
|
-----END PUBLIC KEY-----`
|
||||||
|
|
||||||
|
up, err := update.New().VerifySignatureWithPEM(publicKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Bad public key: '%v': %v", publicKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
Once you've configured your program this way, it will disallow all updates unless they
|
||||||
|
are properly signed. You must now pass in the signature to verify with:
|
||||||
|
|
||||||
|
up.VerifySignature(signature).FromUrl("http://dl.example.com/update")
|
||||||
|
|
||||||
|
Error Handling and Recovery
|
||||||
|
|
||||||
|
To perform an update, the process must be able to read its executable file and to write
|
||||||
|
to the directory that contains its executable file. It can be useful to check whether the process
|
||||||
|
has the necessary permissions to perform an update before trying to apply one. Use the
|
||||||
|
CanUpdate call to provide a useful message to the user if the update can't proceed without
|
||||||
|
elevated permissions:
|
||||||
|
|
||||||
|
up := update.New().Target("/etc/hosts")
|
||||||
|
err := up.CanUpdate()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Can't update because: '%v'. Try as root or Administrator\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
err, errRecover := up.FromUrl("https://example.com/new/hosts")
|
||||||
|
|
||||||
|
Although exceedingly unlikely, the update operation itself is not atomic and can fail
|
||||||
|
in such a way that a user's computer is left in an inconsistent state. If that happens,
|
||||||
|
go-update attempts to recover to leave the system in a good state. If the recovery step
|
||||||
|
fails (even more unlikely), a second error, referred to as "errRecover" will be non-nil
|
||||||
|
so that you may inform your users of the bad news. You should handle this case as shown
|
||||||
|
here:
|
||||||
|
|
||||||
|
err, errRecover := up.FromUrl("https://example.com/update")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Update failed: %v\n", err)
|
||||||
|
if errRecover != nil {
|
||||||
|
fmt.Printf("Failed to recover bad update: %v!\n", errRecover)
|
||||||
|
fmt.Printf("Program exectuable may be missing!\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Subpackages
|
||||||
|
|
||||||
|
Sub-package check contains the client functionality for a simple protocol for negotiating
|
||||||
|
whether a new update is available, where it is, and the metadata needed for verifying it.
|
||||||
|
|
||||||
|
Sub-package download contains functionality for downloading from an HTTP endpoint
|
||||||
|
while outputting a progress meter and supports resuming partial downloads.
|
||||||
|
*/
|
||||||
|
package update
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bitbucket.org/kardianos/osext"
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/sha256"
|
||||||
|
_ "crypto/sha512" // for tls cipher support
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/inconshreveable/go-update/download"
|
||||||
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/kr/binarydist"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The type of a binary patch, if any. Only bsdiff is supported
|
||||||
|
type PatchType string
|
||||||
|
|
||||||
|
const (
|
||||||
|
PATCHTYPE_BSDIFF PatchType = "bsdiff"
|
||||||
|
PATCHTYPE_NONE = ""
|
||||||
|
)
|
||||||
|
|
||||||
|
type Update struct {
|
||||||
|
// empty string means "path of the current executable"
|
||||||
|
TargetPath string
|
||||||
|
|
||||||
|
// type of patch to apply. PATCHTYPE_NONE means "not a patch"
|
||||||
|
PatchType
|
||||||
|
|
||||||
|
// sha256 checksum of the new binary to verify against
|
||||||
|
Checksum []byte
|
||||||
|
|
||||||
|
// public key to use for signature verification
|
||||||
|
PublicKey *rsa.PublicKey
|
||||||
|
|
||||||
|
// signature to use for signature verification
|
||||||
|
Signature []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *Update) getPath() (string, error) {
|
||||||
|
if u.TargetPath == "" {
|
||||||
|
return osext.Executable()
|
||||||
|
} else {
|
||||||
|
return u.TargetPath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Update object.
|
||||||
|
// A default update object assumes the complete binary
|
||||||
|
// content will be used for update (not a patch) and that
|
||||||
|
// the intended target is the running executable.
|
||||||
|
//
|
||||||
|
// Use this as the start of a chain of calls on the Update
|
||||||
|
// object to build up your configuration. Example:
|
||||||
|
//
|
||||||
|
// up := update.New().ApplyPatch(update.PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
|
||||||
|
//
|
||||||
|
func New() *Update {
|
||||||
|
return &Update{
|
||||||
|
TargetPath: "",
|
||||||
|
PatchType: PATCHTYPE_NONE,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Target configures the update to update the file at the given path.
|
||||||
|
// The emptry string means 'the executable file of the running program'.
|
||||||
|
func (u *Update) Target(path string) *Update {
|
||||||
|
u.TargetPath = path
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyPatch configures the update to treat the contents of the update
|
||||||
|
// as a patch to apply to the existing to target. You must specify the
|
||||||
|
// format of the patch. Only PATCHTYPE_BSDIFF is supported at the moment.
|
||||||
|
func (u *Update) ApplyPatch(patchType PatchType) *Update {
|
||||||
|
u.PatchType = patchType
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyChecksum configures the update to verify that the
|
||||||
|
// the update has the given sha256 checksum.
|
||||||
|
func (u *Update) VerifyChecksum(checksum []byte) *Update {
|
||||||
|
u.Checksum = checksum
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignature configures the update to verify the given
|
||||||
|
// signature of the update. You must also call one of the
|
||||||
|
// VerifySignatureWith* functions to specify a public key
|
||||||
|
// to use for verification.
|
||||||
|
func (u *Update) VerifySignature(signature []byte) *Update {
|
||||||
|
u.Signature = signature
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignatureWith configures the update to use the given RSA
|
||||||
|
// public key to verify the update's signature. You must also call
|
||||||
|
// VerifySignature() with a signature to check.
|
||||||
|
//
|
||||||
|
// You'll probably want to use VerifySignatureWithPEM instead of
|
||||||
|
// parsing the public key yourself.
|
||||||
|
func (u *Update) VerifySignatureWith(publicKey *rsa.PublicKey) *Update {
|
||||||
|
u.PublicKey = publicKey
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySignatureWithPEM configures the update to use the given PEM-formatted
|
||||||
|
// RSA public key to verify the update's signature. You must also call
|
||||||
|
// VerifySignature() with a signature to check.
|
||||||
|
//
|
||||||
|
// A PEM formatted public key typically begins with
|
||||||
|
// -----BEGIN PUBLIC KEY-----
|
||||||
|
func (u *Update) VerifySignatureWithPEM(publicKeyPEM []byte) (*Update, error) {
|
||||||
|
block, _ := pem.Decode(publicKeyPEM)
|
||||||
|
if block == nil {
|
||||||
|
return u, fmt.Errorf("Couldn't parse PEM data")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
return u, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ok bool
|
||||||
|
u.PublicKey, ok = pub.(*rsa.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
return u, fmt.Errorf("Public key isn't an RSA public key")
|
||||||
|
}
|
||||||
|
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromUrl updates the target with the contents of the given URL.
|
||||||
|
func (u *Update) FromUrl(url string) (err error, errRecover error) {
|
||||||
|
target := new(download.MemoryTarget)
|
||||||
|
err = download.New(url, target).Get()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.FromStream(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromFile updates the target the contents of the given file.
|
||||||
|
func (u *Update) FromFile(path string) (err error, errRecover error) {
|
||||||
|
// open the new updated contents
|
||||||
|
fp, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
|
||||||
|
// do the update
|
||||||
|
return u.FromStream(fp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromStream updates the target file with the contents of the supplied io.Reader.
|
||||||
|
//
|
||||||
|
// FromStream performs the following actions to ensure a safe cross-platform update:
|
||||||
|
//
|
||||||
|
// 1. If configured, applies the contents of the io.Reader as a binary patch.
|
||||||
|
//
|
||||||
|
// 2. If configured, computes the sha256 checksum and verifies it matches.
|
||||||
|
//
|
||||||
|
// 3. If configured, verifies the RSA signature with a public key.
|
||||||
|
//
|
||||||
|
// 4. Creates a new file, /path/to/.target.new with mode 0755 with the contents of the updated file
|
||||||
|
//
|
||||||
|
// 5. Renames /path/to/target to /path/to/.target.old
|
||||||
|
//
|
||||||
|
// 6. Renames /path/to/.target.new to /path/to/target
|
||||||
|
//
|
||||||
|
// 7. If the rename is successful, deletes /path/to/.target.old, returns no error
|
||||||
|
//
|
||||||
|
// 8. If the rename fails, attempts to rename /path/to/.target.old back to /path/to/target
|
||||||
|
// If this operation fails, it is reported in the errRecover return value so as not to
|
||||||
|
// mask the original error that caused the recovery attempt.
|
||||||
|
//
|
||||||
|
// On Windows, the removal of /path/to/.target.old always fails, so instead,
|
||||||
|
// we just make the old file hidden instead.
|
||||||
|
func (u *Update) FromStream(updateWith io.Reader) (err error, errRecover error) {
|
||||||
|
updatePath, err := u.getPath()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var newBytes []byte
|
||||||
|
// apply a patch if requested
|
||||||
|
switch u.PatchType {
|
||||||
|
case PATCHTYPE_BSDIFF:
|
||||||
|
newBytes, err = applyPatch(updateWith, updatePath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case PATCHTYPE_NONE:
|
||||||
|
// no patch to apply, go on through
|
||||||
|
newBytes, err = ioutil.ReadAll(updateWith)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("Unrecognized patch type: %s", u.PatchType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify checksum if requested
|
||||||
|
if u.Checksum != nil {
|
||||||
|
if err = verifyChecksum(newBytes, u.Checksum); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// verify signature if requested
|
||||||
|
if u.Signature != nil || u.PublicKey != nil {
|
||||||
|
if u.Signature == nil {
|
||||||
|
err = fmt.Errorf("No public key specified to verify signature")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if u.PublicKey == nil {
|
||||||
|
err = fmt.Errorf("No signature to verify!")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = verifySignature(newBytes, u.Signature, u.PublicKey); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the directory the executable exists in
|
||||||
|
updateDir := filepath.Dir(updatePath)
|
||||||
|
filename := filepath.Base(updatePath)
|
||||||
|
|
||||||
|
// Copy the contents of of newbinary to a the new executable file
|
||||||
|
newPath := filepath.Join(updateDir, fmt.Sprintf(".%s.new", filename))
|
||||||
|
fp, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
_, err = io.Copy(fp, bytes.NewReader(newBytes))
|
||||||
|
|
||||||
|
// if we don't call fp.Close(), windows won't let us move the new executable
|
||||||
|
// because the file will still be "in use"
|
||||||
|
fp.Close()
|
||||||
|
|
||||||
|
// this is where we'll move the executable to so that we can swap in the updated replacement
|
||||||
|
oldPath := filepath.Join(updateDir, fmt.Sprintf(".%s.old", filename))
|
||||||
|
|
||||||
|
// delete any existing old exec file - this is necessary on Windows for two reasons:
|
||||||
|
// 1. after a successful update, Windows can't remove the .old file because the process is still running
|
||||||
|
// 2. windows rename operations fail if the destination file already exists
|
||||||
|
_ = os.Remove(oldPath)
|
||||||
|
|
||||||
|
// move the existing executable to a new file in the same directory
|
||||||
|
err = os.Rename(updatePath, oldPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// move the new exectuable in to become the new program
|
||||||
|
err = os.Rename(newPath, updatePath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// copy unsuccessful
|
||||||
|
errRecover = os.Rename(oldPath, updatePath)
|
||||||
|
} else {
|
||||||
|
// copy successful, remove the old binary
|
||||||
|
errRemove := os.Remove(oldPath)
|
||||||
|
|
||||||
|
// windows has trouble with removing old binaries, so hide it instead
|
||||||
|
if errRemove != nil {
|
||||||
|
_ = hideFile(oldPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanUpdate() determines whether the process has the correct permissions to
|
||||||
|
// perform the requested update. If the update can proceed, it returns nil, otherwise
|
||||||
|
// it returns the error that would occur if an update were attempted.
|
||||||
|
func (u *Update) CanUpdate() (err error) {
|
||||||
|
// get the directory the file exists in
|
||||||
|
path, err := u.getPath()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fileDir := filepath.Dir(path)
|
||||||
|
fileName := filepath.Base(path)
|
||||||
|
|
||||||
|
// attempt to open a file in the file's directory
|
||||||
|
newPath := filepath.Join(fileDir, fmt.Sprintf(".%s.new", fileName))
|
||||||
|
fp, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fp.Close()
|
||||||
|
|
||||||
|
_ = os.Remove(newPath)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyPatch(patch io.Reader, updatePath string) ([]byte, error) {
|
||||||
|
// open the file to update
|
||||||
|
old, err := os.Open(updatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer old.Close()
|
||||||
|
|
||||||
|
// apply the patch
|
||||||
|
applied := new(bytes.Buffer)
|
||||||
|
if err = binarydist.Patch(old, applied, patch); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return applied.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyChecksum(updated []byte, expectedChecksum []byte) error {
|
||||||
|
checksum, err := ChecksumForBytes(updated)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(expectedChecksum, checksum) {
|
||||||
|
return fmt.Errorf("Updated file has wrong checksum. Expected: %x, got: %x", expectedChecksum, checksum)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChecksumForFile returns the sha256 checksum for the given file
|
||||||
|
func ChecksumForFile(path string) ([]byte, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
return ChecksumForReader(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChecksumForReader returns the sha256 checksum for the entire
|
||||||
|
// contents of the given reader.
|
||||||
|
func ChecksumForReader(rd io.Reader) ([]byte, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
if _, err := io.Copy(h, rd); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return h.Sum(nil), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChecksumForBytes returns the sha256 checksum for the given bytes
|
||||||
|
func ChecksumForBytes(source []byte) ([]byte, error) {
|
||||||
|
return ChecksumForReader(bytes.NewReader(source))
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifySignature(source, signature []byte, publicKey *rsa.PublicKey) error {
|
||||||
|
checksum, err := ChecksumForBytes(source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return rsa.VerifyPKCS1v15(publicKey, crypto.SHA256, checksum, signature)
|
||||||
|
}
|
380
Godeps/_workspace/src/github.com/inconshreveable/go-update/update_test.go
generated
vendored
Normal file
380
Godeps/_workspace/src/github.com/inconshreveable/go-update/update_test.go
generated
vendored
Normal file
@ -0,0 +1,380 @@
|
|||||||
|
package update
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/kr/binarydist"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
oldFile = []byte{0xDE, 0xAD, 0xBE, 0xEF}
|
||||||
|
newFile = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
|
||||||
|
)
|
||||||
|
|
||||||
|
func cleanup(path string) {
|
||||||
|
os.Remove(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// we write with a separate name for each test so that we can run them in parallel
|
||||||
|
func writeOldFile(path string, t *testing.T) {
|
||||||
|
if err := ioutil.WriteFile(path, oldFile, 0777); err != nil {
|
||||||
|
t.Fatalf("Failed to write file for testing preparation: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateUpdate(path string, err error, t *testing.T) {
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to update: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := ioutil.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read file post-update: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(buf, newFile) {
|
||||||
|
t.Fatalf("File was not updated! Bytes read: %v, Bytes expected: %v", buf, newFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromStream(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestFromStream"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
err, _ := New().Target(fName).FromStream(bytes.NewReader(newFile))
|
||||||
|
validateUpdate(fName, err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromFile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestFromFile"
|
||||||
|
newFName := "NewTestFromFile"
|
||||||
|
defer cleanup(fName)
|
||||||
|
defer cleanup(newFName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
if err := ioutil.WriteFile(newFName, newFile, 0777); err != nil {
|
||||||
|
t.Fatalf("Failed to write file to update from: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err, _ := New().Target(fName).FromFile(newFName)
|
||||||
|
validateUpdate(fName, err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFromUrl(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestFromUrl"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
l, err := net.Listen("tcp", ":0")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Couldn't bind listener: %v", err)
|
||||||
|
}
|
||||||
|
addr := l.Addr().String()
|
||||||
|
|
||||||
|
go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Write(newFile)
|
||||||
|
}))
|
||||||
|
|
||||||
|
err, _ = New().Target(fName).FromUrl("http://" + addr)
|
||||||
|
validateUpdate(fName, err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyChecksum(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifyChecksum"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
checksum, err := ChecksumForBytes(newFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to compute checksum: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err, _ = New().Target(fName).VerifyChecksum(checksum).FromStream(bytes.NewReader(newFile))
|
||||||
|
validateUpdate(fName, err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyChecksumNegative(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifyChecksumNegative"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
badChecksum := []byte{0x0A, 0x0B, 0x0C, 0xFF}
|
||||||
|
err, _ := New().Target(fName).VerifyChecksum(badChecksum).FromStream(bytes.NewReader(newFile))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Failed to detect bad checksum!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApplyPatch(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestApplyPatch"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
patch := new(bytes.Buffer)
|
||||||
|
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(newFile), patch)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create patch: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF)
|
||||||
|
err, _ = up.FromStream(bytes.NewReader(patch.Bytes()))
|
||||||
|
validateUpdate(fName, err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCorruptPatch(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestCorruptPatch"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
badPatch := []byte{0x44, 0x38, 0x86, 0x3c, 0x4f, 0x8d, 0x26, 0x54, 0xb, 0x11, 0xce, 0xfe, 0xc1, 0xc0, 0xf8, 0x31, 0x38, 0xa0, 0x12, 0x1a, 0xa2, 0x57, 0x2a, 0xe1, 0x3a, 0x48, 0x62, 0x40, 0x2b, 0x81, 0x12, 0xb1, 0x21, 0xa5, 0x16, 0xed, 0x73, 0xd6, 0x54, 0x84, 0x29, 0xa6, 0xd6, 0xb2, 0x1b, 0xfb, 0xe6, 0xbe, 0x7b, 0x70}
|
||||||
|
up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF)
|
||||||
|
err, _ := up.FromStream(bytes.NewReader(badPatch))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Failed to detect corrupt patch!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyChecksumPatchNegative(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifyChecksumPatchNegative"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
checksum, err := ChecksumForBytes(newFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to compute checksum: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
patch := new(bytes.Buffer)
|
||||||
|
anotherFile := []byte{0x77, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
|
||||||
|
err = binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(anotherFile), patch)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create patch: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
up := New().Target(fName).ApplyPatch(PATCHTYPE_BSDIFF).VerifyChecksum(checksum)
|
||||||
|
err, _ = up.FromStream(bytes.NewReader(patch.Bytes()))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Failed to detect patch to wrong file!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const publicKey = `-----BEGIN PUBLIC KEY-----
|
||||||
|
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxSWmu7trWKAwDFjiCN2D
|
||||||
|
Tk2jj2sgcr/CMlI4cSSiIOHrXCFxP1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKab
|
||||||
|
b9ead+kD0kxk7i2bFYvKX43oq66IW0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4
|
||||||
|
y20C59dPr9Dpcz8DZkdLsBV6YKF6Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjT
|
||||||
|
x4xRnjgTRRRlZvRtALHMUkIChgxDOhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv5
|
||||||
|
5fhJ08Rz7mmZmtH5JxTK5XTquo59sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7Nrf
|
||||||
|
fQIDAQAB
|
||||||
|
-----END PUBLIC KEY-----`
|
||||||
|
|
||||||
|
const privateKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEogIBAAKCAQEAxSWmu7trWKAwDFjiCN2DTk2jj2sgcr/CMlI4cSSiIOHrXCFx
|
||||||
|
P1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKabb9ead+kD0kxk7i2bFYvKX43oq66I
|
||||||
|
W0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4y20C59dPr9Dpcz8DZkdLsBV6YKF6
|
||||||
|
Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjTx4xRnjgTRRRlZvRtALHMUkIChgxD
|
||||||
|
OhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv55fhJ08Rz7mmZmtH5JxTK5XTquo59
|
||||||
|
sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7NrffQIDAQABAoIBAAkN+6RvrTR61voa
|
||||||
|
Mvd5RQiZpEN4Bht/Fyo8gH8h0Zh1B9xJZOwlmMZLS5fdtHlfLEhR8qSrGDBL61vq
|
||||||
|
I8KkhEsUufF78EL+YzxVN+Q7cWYGHIOWFokqza7hzpSxUQO6lPOMQ1eIZaNueJTB
|
||||||
|
Zu07/47ISPPg/bXzgGVcpYlTCPTjUwKjtfyMqvX9AD7fIyYRm6zfE7EHj1J2sBFt
|
||||||
|
Yz1OGELg6HfJwXfpnPfBvftD0hWGzJ78Bp71fPJe6n5gnqmSqRvrcXNWFnH/yqkN
|
||||||
|
d6vPIxD6Z3LjvyZpkA7JillLva2L/zcIFhg4HZvQnWd8/PpDnUDonu36hcj4SC5j
|
||||||
|
W4aVPLkCgYEA4XzNKWxqYcajzFGZeSxlRHupSAl2MT7Cc5085MmE7dd31wK2T8O4
|
||||||
|
n7N4bkm/rjTbX85NsfWdKtWb6mpp8W3VlLP0rp4a/12OicVOkg4pv9LZDmY0sRlE
|
||||||
|
YuDJk1FeCZ50UrwTZI3rZ9IhZHhkgVA6uWAs7tYndONkxNHG0pjqs4sCgYEA39MZ
|
||||||
|
JwMqo3qsPntpgP940cCLflEsjS9hYNO3+Sv8Dq3P0HLVhBYajJnotf8VuU0fsQZG
|
||||||
|
grmtVn1yThFbMq7X1oY4F0XBA+paSiU18c4YyUnwax2u4sw9U/Q9tmQUZad5+ueT
|
||||||
|
qriMBwGv+ewO+nQxqvAsMUmemrVzrfwA5Oct+hcCgYAfiyXoNZJsOy2O15twqBVC
|
||||||
|
j0oPGcO+/9iT89sg5lACNbI+EdMPNYIOVTzzsL1v0VUfAe08h++Enn1BPcG0VHkc
|
||||||
|
ZFBGXTfJoXzfKQrkw7ZzbzuOGB4m6DH44xlP0oIlNlVvfX/5ASF9VJf3RiBJNsAA
|
||||||
|
TsP6ZVr/rw/ZuL7nlxy+IQKBgDhL/HOXlE3yOQiuOec8WsNHTs7C1BXe6PtVxVxi
|
||||||
|
988pYK/pclL6zEq5G5NLSceF4obAMVQIJ9UtUGbabrncyGUo9UrFPLsjYvprSZo8
|
||||||
|
YHegpVwL50UcYgCP2kXZ/ldjPIcjYDz8lhvdDMor2cidGTEJn9P11HLNWP9V91Ob
|
||||||
|
4jCZAoGAPNRSC5cC8iP/9j+s2/kdkfWJiNaolPYAUrmrkL6H39PYYZM5tnhaIYJV
|
||||||
|
Oh9AgABamU0eb3p3vXTISClVgV7ifq1HyZ7BSUhMfaY2Jk/s3sUHCWFxPZe9sgEG
|
||||||
|
KinIY/373KIkIV/5g4h2v1w330IWcfptxKcY/Er3DJr38f695GE=
|
||||||
|
-----END RSA PRIVATE KEY-----`
|
||||||
|
|
||||||
|
func sign(privatePEM string, source []byte, t *testing.T) []byte {
|
||||||
|
block, _ := pem.Decode([]byte(privatePEM))
|
||||||
|
if block == nil {
|
||||||
|
t.Fatalf("Failed to parse private key PEM")
|
||||||
|
}
|
||||||
|
|
||||||
|
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to parse private key DER")
|
||||||
|
}
|
||||||
|
|
||||||
|
checksum, err := ChecksumForBytes(source)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to make checksum")
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, err := rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA256, checksum)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to sign: %v", sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
return sig
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifySignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifySignature"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not parse public key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signature := sign(privateKey, newFile, t)
|
||||||
|
err, _ = up.VerifySignature(signature).FromStream(bytes.NewReader(newFile))
|
||||||
|
validateUpdate(fName, err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyFailBadSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifyFailBadSignature"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not parse public key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
badSig := []byte{0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA}
|
||||||
|
err, _ = up.VerifySignature(badSig).FromStream(bytes.NewReader(newFile))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Did not fail with bad signature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVerifyFailNoSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifySignatureWithPEM"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not parse public key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err, _ = up.VerifySignature([]byte{}).FromStream(bytes.NewReader(newFile))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Did not fail with empty signature")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const wrongKey = `-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEpAIBAAKCAQEArKqjT+xOFJILe0CX7lKfQy52YwWLF9devYtLeUHTbPOueGLy
|
||||||
|
6CjrXJBrWIxNBxRd53y4dtgiMqCX6Gmmvuy8HnfbBuJjR2mcdEYo8UDy+aSVBQ6T
|
||||||
|
/ND7Fd7KSzOruEFFzl2QFnZ/SrW/nsXdGyuF8l+YIwjZJRyV6StZkZ4ydOzOqUk9
|
||||||
|
FXTeIkhX/Q7/jTETw7L3wxMyLgJAlV3lxDsPkMjxymngtbAIjwEjLsVeU+Prcz2e
|
||||||
|
Ww34SZQ8qwzAdXieuDPryMwEsCcgc5NAKJFNL8TppYGDXOHI7CRXqHfNiJq2R+kQ
|
||||||
|
LdxRvmfx8/iu4xM2hBzk4uSDS6TTn2AnWBm+cQIDAQABAoIBAFp//aUwaCRj/9yU
|
||||||
|
GI3zhEJEIgz4pNTUL3YNgnuFwvlCJ9o1kreYavRTRdBdiSoCxM1GE7FGy3XZsoVA
|
||||||
|
iwNbNaaKj6RmGD8f3b8b3u3EaxXp66mA4JQMPO5TnZgY9xJWM+5cH9+GMGXKKStg
|
||||||
|
7ekFwOkuraD/TEElYHWcIRAv6KZbc/YOIa6YDKi+1Gc7u0MeIvwqN7nwaBAoJKUE
|
||||||
|
ZrJIfYKIViD/ZrCpgWN47C9x8w3ne7iiDrYoYct+0reC9LFlqwVBtDnyVx/q3upW
|
||||||
|
zzczbNQagu3w0QgprDGhy0ZhDNxuylV3XBWTB+xBrFQgz6rD3LzUPywlbt0N7ZmD
|
||||||
|
936MVSECgYEA1IElCahF/+hC/OxFgy98DubAUDGmrvxWeZF3bvTseWZQp/gzxVS+
|
||||||
|
SYumYyd2Ysx5+UjXQlVgR6BbDG13+DpSpZm6+MeWHBAR+KA2qCg009SDFv7l26/d
|
||||||
|
xMT7lvIWz7ckQDb/+jvhF9HL2llyTN1Zex+n3XBeAMKNrPaubdEBFsUCgYEA0AIO
|
||||||
|
tZMtzOpioAR1lGbwIguq04msDdrJNaY2TKrLeviJuQUw94fgL+3ULAPsiyxaU/Gv
|
||||||
|
vln11R7aIp1SJ09T2UoFRbty+6SGRC56+Wh0pn5VnAi7aT6qdkYWhEjhqRHuXosf
|
||||||
|
PYboXBuMwA0FBUTxWQL/lux2PZgvBkniYh5jI70CgYEAk9KmhhpFX2gdOT3OeRxO
|
||||||
|
CzufaemwDqfAK97yGwBLg4OV9dJliQ6TNCvt+amY489jxfJSs3UafZjh3TpFKyq/
|
||||||
|
FS1kb+y+0hSnu7EPdFhLr1N0QUndcb3b4iY48V7EWYgHspfP5y1CPsSVLvXr2eZc
|
||||||
|
eZaiuhqReavczAXpfsDWJhUCgYEAwmUp2gfyhc+G3IVOXaLWSPseaxP+9/PAl6L+
|
||||||
|
nCgCgqpEC+YOHUee/SwHXhtMtcR9pnX5CKyKUuLCehcM8C/y7N+AjerhSsw3rwDB
|
||||||
|
bNVyLydiWrDOdU1bga1+3aI/QwK/AxyB1b5+6ZXVtKZ2SrZj2Aw1UZcr6eSQDhB+
|
||||||
|
wbQkcwECgYBF13FMA6OOon992t9H3I+4KDgmz6G6mz3bVXSoFWfO1p/yXP04BzJl
|
||||||
|
jtLFvFVTZdMs2o/wTd4SL6gYjx9mlOWwM8FblmjfiNSUVIyye33fRntEAr1n+FYI
|
||||||
|
Xhv6aVnNdaGehGIqQxXFoGyiJxG3RYNkSwaTOamxY1V+ceLuO26n2Q==
|
||||||
|
-----END RSA PRIVATE KEY-----`
|
||||||
|
|
||||||
|
func TestVerifyFailWrongSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestVerifyFailWrongSignature"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not parse public key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
signature := sign(wrongKey, newFile, t)
|
||||||
|
err, _ = up.VerifySignature(signature).FromStream(bytes.NewReader(newFile))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Verified an update that was signed by an untrusted key!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSignatureButNoPublicKey(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestSignatureButNoPublicKey"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
sig := sign(privateKey, newFile, t)
|
||||||
|
err, _ := New().Target(fName).VerifySignature(sig).FromStream(bytes.NewReader(newFile))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Allowed an update with a signautre verification when no public key was specified!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPublicKeyButNoSignature(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fName := "TestPublicKeyButNoSignature"
|
||||||
|
defer cleanup(fName)
|
||||||
|
writeOldFile(fName, t)
|
||||||
|
|
||||||
|
up, err := New().Target(fName).VerifySignatureWithPEM([]byte(publicKey))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Could not parse public key: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err, _ = up.FromStream(bytes.NewReader(newFile))
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("Allowed an update with no signautre when a public key was specified!")
|
||||||
|
}
|
||||||
|
}
|
21
Godeps/_workspace/src/github.com/jbenet/go-multihash/LICENSE
generated
vendored
Normal file
21
Godeps/_workspace/src/github.com/jbenet/go-multihash/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Juan Batiz-Benet
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
1
Godeps/_workspace/src/github.com/kr/binarydist/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/kr/binarydist/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
test.*
|
22
Godeps/_workspace/src/github.com/kr/binarydist/License
generated
vendored
Normal file
22
Godeps/_workspace/src/github.com/kr/binarydist/License
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Copyright 2012 Keith Rarick
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person
|
||||||
|
obtaining a copy of this software and associated documentation
|
||||||
|
files (the "Software"), to deal in the Software without
|
||||||
|
restriction, including without limitation the rights to use,
|
||||||
|
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the
|
||||||
|
Software is furnished to do so, subject to the following
|
||||||
|
conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||||
|
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||||
|
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||||
|
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||||
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
OTHER DEALINGS IN THE SOFTWARE.
|
7
Godeps/_workspace/src/github.com/kr/binarydist/Readme.md
generated
vendored
Normal file
7
Godeps/_workspace/src/github.com/kr/binarydist/Readme.md
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
# binarydist
|
||||||
|
|
||||||
|
Package binarydist implements binary diff and patch as described on
|
||||||
|
<http://www.daemonology.net/bsdiff/>. It reads and writes files
|
||||||
|
compatible with the tools there.
|
||||||
|
|
||||||
|
Documentation at <http://go.pkgdoc.org/github.com/kr/binarydist>.
|
40
Godeps/_workspace/src/github.com/kr/binarydist/bzip2.go
generated
vendored
Normal file
40
Godeps/_workspace/src/github.com/kr/binarydist/bzip2.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
type bzip2Writer struct {
|
||||||
|
c *exec.Cmd
|
||||||
|
w io.WriteCloser
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w bzip2Writer) Write(b []byte) (int, error) {
|
||||||
|
return w.w.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w bzip2Writer) Close() error {
|
||||||
|
if err := w.w.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return w.c.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Package compress/bzip2 implements only decompression,
|
||||||
|
// so we'll fake it by running bzip2 in another process.
|
||||||
|
func newBzip2Writer(w io.Writer) (wc io.WriteCloser, err error) {
|
||||||
|
var bw bzip2Writer
|
||||||
|
bw.c = exec.Command("bzip2", "-c")
|
||||||
|
bw.c.Stdout = w
|
||||||
|
|
||||||
|
if bw.w, err = bw.c.StdinPipe(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = bw.c.Start(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bw, nil
|
||||||
|
}
|
93
Godeps/_workspace/src/github.com/kr/binarydist/common_test.go
generated
vendored
Normal file
93
Godeps/_workspace/src/github.com/kr/binarydist/common_test.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mustOpen(path string) *os.File {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustReadAll(r io.Reader) []byte {
|
||||||
|
b, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func fileCmp(a, b *os.File) int64 {
|
||||||
|
sa, err := a.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sb, err := b.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sa != sb {
|
||||||
|
return sa
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = a.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = b.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pa, err := ioutil.ReadAll(a)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pb, err := ioutil.ReadAll(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range pa {
|
||||||
|
if pa[i] != pb[i] {
|
||||||
|
return int64(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustWriteRandFile(path string, size int) *os.File {
|
||||||
|
p := make([]byte, size)
|
||||||
|
_, err := rand.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.Write(p)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = f.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
408
Godeps/_workspace/src/github.com/kr/binarydist/diff.go
generated
vendored
Normal file
408
Godeps/_workspace/src/github.com/kr/binarydist/diff.go
generated
vendored
Normal file
@ -0,0 +1,408 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func swap(a []int, i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
func split(I, V []int, start, length, h int) {
|
||||||
|
var i, j, k, x, jj, kk int
|
||||||
|
|
||||||
|
if length < 16 {
|
||||||
|
for k = start; k < start+length; k += j {
|
||||||
|
j = 1
|
||||||
|
x = V[I[k]+h]
|
||||||
|
for i = 1; k+i < start+length; i++ {
|
||||||
|
if V[I[k+i]+h] < x {
|
||||||
|
x = V[I[k+i]+h]
|
||||||
|
j = 0
|
||||||
|
}
|
||||||
|
if V[I[k+i]+h] == x {
|
||||||
|
swap(I, k+i, k+j)
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i = 0; i < j; i++ {
|
||||||
|
V[I[k+i]] = k + j - 1
|
||||||
|
}
|
||||||
|
if j == 1 {
|
||||||
|
I[k] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
x = V[I[start+length/2]+h]
|
||||||
|
jj = 0
|
||||||
|
kk = 0
|
||||||
|
for i = start; i < start+length; i++ {
|
||||||
|
if V[I[i]+h] < x {
|
||||||
|
jj++
|
||||||
|
}
|
||||||
|
if V[I[i]+h] == x {
|
||||||
|
kk++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
jj += start
|
||||||
|
kk += jj
|
||||||
|
|
||||||
|
i = start
|
||||||
|
j = 0
|
||||||
|
k = 0
|
||||||
|
for i < jj {
|
||||||
|
if V[I[i]+h] < x {
|
||||||
|
i++
|
||||||
|
} else if V[I[i]+h] == x {
|
||||||
|
swap(I, i, jj+j)
|
||||||
|
j++
|
||||||
|
} else {
|
||||||
|
swap(I, i, kk+k)
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for jj+j < kk {
|
||||||
|
if V[I[jj+j]+h] == x {
|
||||||
|
j++
|
||||||
|
} else {
|
||||||
|
swap(I, jj+j, kk+k)
|
||||||
|
k++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if jj > start {
|
||||||
|
split(I, V, start, jj-start, h)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i = 0; i < kk-jj; i++ {
|
||||||
|
V[I[jj+i]] = kk - 1
|
||||||
|
}
|
||||||
|
if jj == kk-1 {
|
||||||
|
I[jj] = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
if start+length > kk {
|
||||||
|
split(I, V, kk, start+length-kk, h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func qsufsort(obuf []byte) []int {
|
||||||
|
var buckets [256]int
|
||||||
|
var i, h int
|
||||||
|
I := make([]int, len(obuf)+1)
|
||||||
|
V := make([]int, len(obuf)+1)
|
||||||
|
|
||||||
|
for _, c := range obuf {
|
||||||
|
buckets[c]++
|
||||||
|
}
|
||||||
|
for i = 1; i < 256; i++ {
|
||||||
|
buckets[i] += buckets[i-1]
|
||||||
|
}
|
||||||
|
copy(buckets[1:], buckets[:])
|
||||||
|
buckets[0] = 0
|
||||||
|
|
||||||
|
for i, c := range obuf {
|
||||||
|
buckets[c]++
|
||||||
|
I[buckets[c]] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
I[0] = len(obuf)
|
||||||
|
for i, c := range obuf {
|
||||||
|
V[i] = buckets[c]
|
||||||
|
}
|
||||||
|
|
||||||
|
V[len(obuf)] = 0
|
||||||
|
for i = 1; i < 256; i++ {
|
||||||
|
if buckets[i] == buckets[i-1]+1 {
|
||||||
|
I[buckets[i]] = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
I[0] = -1
|
||||||
|
|
||||||
|
for h = 1; I[0] != -(len(obuf) + 1); h += h {
|
||||||
|
var n int
|
||||||
|
for i = 0; i < len(obuf)+1; {
|
||||||
|
if I[i] < 0 {
|
||||||
|
n -= I[i]
|
||||||
|
i -= I[i]
|
||||||
|
} else {
|
||||||
|
if n != 0 {
|
||||||
|
I[i-n] = -n
|
||||||
|
}
|
||||||
|
n = V[I[i]] + 1 - i
|
||||||
|
split(I, V, i, n, h)
|
||||||
|
i += n
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n != 0 {
|
||||||
|
I[i-n] = -n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i = 0; i < len(obuf)+1; i++ {
|
||||||
|
I[V[i]] = i
|
||||||
|
}
|
||||||
|
return I
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchlen(a, b []byte) (i int) {
|
||||||
|
for i < len(a) && i < len(b) && a[i] == b[i] {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func search(I []int, obuf, nbuf []byte, st, en int) (pos, n int) {
|
||||||
|
if en-st < 2 {
|
||||||
|
x := matchlen(obuf[I[st]:], nbuf)
|
||||||
|
y := matchlen(obuf[I[en]:], nbuf)
|
||||||
|
|
||||||
|
if x > y {
|
||||||
|
return I[st], x
|
||||||
|
} else {
|
||||||
|
return I[en], y
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
x := st + (en-st)/2
|
||||||
|
if bytes.Compare(obuf[I[x]:], nbuf) < 0 {
|
||||||
|
return search(I, obuf, nbuf, x, en)
|
||||||
|
} else {
|
||||||
|
return search(I, obuf, nbuf, st, x)
|
||||||
|
}
|
||||||
|
panic("unreached")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diff computes the difference between old and new, according to the bsdiff
|
||||||
|
// algorithm, and writes the result to patch.
|
||||||
|
func Diff(old, new io.Reader, patch io.Writer) error {
|
||||||
|
obuf, err := ioutil.ReadAll(old)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nbuf, err := ioutil.ReadAll(new)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
pbuf, err := diffBytes(obuf, nbuf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = patch.Write(pbuf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func diffBytes(obuf, nbuf []byte) ([]byte, error) {
|
||||||
|
var patch seekBuffer
|
||||||
|
err := diff(obuf, nbuf, &patch)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return patch.buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func diff(obuf, nbuf []byte, patch io.WriteSeeker) error {
|
||||||
|
var lenf int
|
||||||
|
I := qsufsort(obuf)
|
||||||
|
db := make([]byte, len(nbuf))
|
||||||
|
eb := make([]byte, len(nbuf))
|
||||||
|
var dblen, eblen int
|
||||||
|
|
||||||
|
var hdr header
|
||||||
|
hdr.Magic = magic
|
||||||
|
hdr.NewSize = int64(len(nbuf))
|
||||||
|
err := binary.Write(patch, signMagLittleEndian{}, &hdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the differences, writing ctrl as we go
|
||||||
|
pfbz2, err := newBzip2Writer(patch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var scan, pos, length int
|
||||||
|
var lastscan, lastpos, lastoffset int
|
||||||
|
for scan < len(nbuf) {
|
||||||
|
var oldscore int
|
||||||
|
scan += length
|
||||||
|
for scsc := scan; scan < len(nbuf); scan++ {
|
||||||
|
pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf))
|
||||||
|
|
||||||
|
for ; scsc < scan+length; scsc++ {
|
||||||
|
if scsc+lastoffset < len(obuf) &&
|
||||||
|
obuf[scsc+lastoffset] == nbuf[scsc] {
|
||||||
|
oldscore++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (length == oldscore && length != 0) || length > oldscore+8 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] {
|
||||||
|
oldscore--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if length != oldscore || scan == len(nbuf) {
|
||||||
|
var s, Sf int
|
||||||
|
lenf = 0
|
||||||
|
for i := 0; lastscan+i < scan && lastpos+i < len(obuf); {
|
||||||
|
if obuf[lastpos+i] == nbuf[lastscan+i] {
|
||||||
|
s++
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if s*2-i > Sf*2-lenf {
|
||||||
|
Sf = s
|
||||||
|
lenf = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lenb := 0
|
||||||
|
if scan < len(nbuf) {
|
||||||
|
var s, Sb int
|
||||||
|
for i := 1; (scan >= lastscan+i) && (pos >= i); i++ {
|
||||||
|
if obuf[pos-i] == nbuf[scan-i] {
|
||||||
|
s++
|
||||||
|
}
|
||||||
|
if s*2-i > Sb*2-lenb {
|
||||||
|
Sb = s
|
||||||
|
lenb = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if lastscan+lenf > scan-lenb {
|
||||||
|
overlap := (lastscan + lenf) - (scan - lenb)
|
||||||
|
s := 0
|
||||||
|
Ss := 0
|
||||||
|
lens := 0
|
||||||
|
for i := 0; i < overlap; i++ {
|
||||||
|
if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
|
||||||
|
s++
|
||||||
|
}
|
||||||
|
if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
|
||||||
|
s--
|
||||||
|
}
|
||||||
|
if s > Ss {
|
||||||
|
Ss = s
|
||||||
|
lens = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
lenf += lens - overlap
|
||||||
|
lenb -= lens
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < lenf; i++ {
|
||||||
|
db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i]
|
||||||
|
}
|
||||||
|
for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ {
|
||||||
|
eb[eblen+i] = nbuf[lastscan+lenf+i]
|
||||||
|
}
|
||||||
|
|
||||||
|
dblen += lenf
|
||||||
|
eblen += (scan - lenb) - (lastscan + lenf)
|
||||||
|
|
||||||
|
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf))
|
||||||
|
if err != nil {
|
||||||
|
pfbz2.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
val := (scan - lenb) - (lastscan + lenf)
|
||||||
|
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
|
||||||
|
if err != nil {
|
||||||
|
pfbz2.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
val = (pos - lenb) - (lastpos + lenf)
|
||||||
|
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
|
||||||
|
if err != nil {
|
||||||
|
pfbz2.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lastscan = scan - lenb
|
||||||
|
lastpos = pos - lenb
|
||||||
|
lastoffset = pos - scan
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = pfbz2.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute size of compressed ctrl data
|
||||||
|
l64, err := patch.Seek(0, 1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.CtrlLen = int64(l64 - 32)
|
||||||
|
|
||||||
|
// Write compressed diff data
|
||||||
|
pfbz2, err = newBzip2Writer(patch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := pfbz2.Write(db[:dblen])
|
||||||
|
if err != nil {
|
||||||
|
pfbz2.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if n != dblen {
|
||||||
|
pfbz2.Close()
|
||||||
|
return io.ErrShortWrite
|
||||||
|
}
|
||||||
|
err = pfbz2.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute size of compressed diff data
|
||||||
|
n64, err := patch.Seek(0, 1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hdr.DiffLen = n64 - l64
|
||||||
|
|
||||||
|
// Write compressed extra data
|
||||||
|
pfbz2, err = newBzip2Writer(patch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err = pfbz2.Write(eb[:eblen])
|
||||||
|
if err != nil {
|
||||||
|
pfbz2.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if n != eblen {
|
||||||
|
pfbz2.Close()
|
||||||
|
return io.ErrShortWrite
|
||||||
|
}
|
||||||
|
err = pfbz2.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seek to the beginning, write the header, and close the file
|
||||||
|
_, err = patch.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = binary.Write(patch, signMagLittleEndian{}, &hdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
67
Godeps/_workspace/src/github.com/kr/binarydist/diff_test.go
generated
vendored
Normal file
67
Godeps/_workspace/src/github.com/kr/binarydist/diff_test.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var diffT = []struct {
|
||||||
|
old *os.File
|
||||||
|
new *os.File
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
old: mustWriteRandFile("test.old", 1e3),
|
||||||
|
new: mustWriteRandFile("test.new", 1e3),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
old: mustOpen("testdata/sample.old"),
|
||||||
|
new: mustOpen("testdata/sample.new"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDiff(t *testing.T) {
|
||||||
|
for _, s := range diffT {
|
||||||
|
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
os.Remove(got.Name())
|
||||||
|
|
||||||
|
exp, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command("bsdiff", s.old.Name(), s.new.Name(), exp.Name())
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
err = cmd.Run()
|
||||||
|
os.Remove(exp.Name())
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Diff(s.old, s.new, got)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("err", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = got.Seek(0, 0)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
gotBuf := mustReadAll(got)
|
||||||
|
expBuf := mustReadAll(exp)
|
||||||
|
|
||||||
|
if !bytes.Equal(gotBuf, expBuf) {
|
||||||
|
t.Fail()
|
||||||
|
t.Logf("diff %s %s", s.old.Name(), s.new.Name())
|
||||||
|
t.Logf("%s: len(got) = %d", got.Name(), len(gotBuf))
|
||||||
|
t.Logf("%s: len(exp) = %d", exp.Name(), len(expBuf))
|
||||||
|
i := matchlen(gotBuf, expBuf)
|
||||||
|
t.Logf("produced different output at pos %d; %d != %d", i, gotBuf[i], expBuf[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
24
Godeps/_workspace/src/github.com/kr/binarydist/doc.go
generated
vendored
Normal file
24
Godeps/_workspace/src/github.com/kr/binarydist/doc.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Package binarydist implements binary diff and patch as described on
|
||||||
|
// http://www.daemonology.net/bsdiff/. It reads and writes files
|
||||||
|
// compatible with the tools there.
|
||||||
|
package binarydist
|
||||||
|
|
||||||
|
var magic = [8]byte{'B', 'S', 'D', 'I', 'F', 'F', '4', '0'}
|
||||||
|
|
||||||
|
// File format:
|
||||||
|
// 0 8 "BSDIFF40"
|
||||||
|
// 8 8 X
|
||||||
|
// 16 8 Y
|
||||||
|
// 24 8 sizeof(newfile)
|
||||||
|
// 32 X bzip2(control block)
|
||||||
|
// 32+X Y bzip2(diff block)
|
||||||
|
// 32+X+Y ??? bzip2(extra block)
|
||||||
|
// with control block a set of triples (x,y,z) meaning "add x bytes
|
||||||
|
// from oldfile to x bytes from the diff block; copy y bytes from the
|
||||||
|
// extra block; seek forwards in oldfile by z bytes".
|
||||||
|
type header struct {
|
||||||
|
Magic [8]byte
|
||||||
|
CtrlLen int64
|
||||||
|
DiffLen int64
|
||||||
|
NewSize int64
|
||||||
|
}
|
53
Godeps/_workspace/src/github.com/kr/binarydist/encoding.go
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/kr/binarydist/encoding.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
// SignMagLittleEndian is the numeric encoding used by the bsdiff tools.
|
||||||
|
// It implements binary.ByteOrder using a sign-magnitude format
|
||||||
|
// and little-endian byte order. Only methods Uint64 and String
|
||||||
|
// have been written; the rest panic.
|
||||||
|
type signMagLittleEndian struct{}
|
||||||
|
|
||||||
|
func (signMagLittleEndian) Uint16(b []byte) uint16 { panic("unimplemented") }
|
||||||
|
|
||||||
|
func (signMagLittleEndian) PutUint16(b []byte, v uint16) { panic("unimplemented") }
|
||||||
|
|
||||||
|
func (signMagLittleEndian) Uint32(b []byte) uint32 { panic("unimplemented") }
|
||||||
|
|
||||||
|
func (signMagLittleEndian) PutUint32(b []byte, v uint32) { panic("unimplemented") }
|
||||||
|
|
||||||
|
func (signMagLittleEndian) Uint64(b []byte) uint64 {
|
||||||
|
y := int64(b[0]) |
|
||||||
|
int64(b[1])<<8 |
|
||||||
|
int64(b[2])<<16 |
|
||||||
|
int64(b[3])<<24 |
|
||||||
|
int64(b[4])<<32 |
|
||||||
|
int64(b[5])<<40 |
|
||||||
|
int64(b[6])<<48 |
|
||||||
|
int64(b[7]&0x7f)<<56
|
||||||
|
|
||||||
|
if b[7]&0x80 != 0 {
|
||||||
|
y = -y
|
||||||
|
}
|
||||||
|
return uint64(y)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (signMagLittleEndian) PutUint64(b []byte, v uint64) {
|
||||||
|
x := int64(v)
|
||||||
|
neg := x < 0
|
||||||
|
if neg {
|
||||||
|
x = -x
|
||||||
|
}
|
||||||
|
|
||||||
|
b[0] = byte(x)
|
||||||
|
b[1] = byte(x >> 8)
|
||||||
|
b[2] = byte(x >> 16)
|
||||||
|
b[3] = byte(x >> 24)
|
||||||
|
b[4] = byte(x >> 32)
|
||||||
|
b[5] = byte(x >> 40)
|
||||||
|
b[6] = byte(x >> 48)
|
||||||
|
b[7] = byte(x >> 56)
|
||||||
|
if neg {
|
||||||
|
b[7] |= 0x80
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (signMagLittleEndian) String() string { return "signMagLittleEndian" }
|
109
Godeps/_workspace/src/github.com/kr/binarydist/patch.go
generated
vendored
Normal file
109
Godeps/_workspace/src/github.com/kr/binarydist/patch.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/bzip2"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrCorrupt = errors.New("corrupt patch")
|
||||||
|
|
||||||
|
// Patch applies patch to old, according to the bspatch algorithm,
|
||||||
|
// and writes the result to new.
|
||||||
|
func Patch(old io.Reader, new io.Writer, patch io.Reader) error {
|
||||||
|
var hdr header
|
||||||
|
err := binary.Read(patch, signMagLittleEndian{}, &hdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if hdr.Magic != magic {
|
||||||
|
return ErrCorrupt
|
||||||
|
}
|
||||||
|
if hdr.CtrlLen < 0 || hdr.DiffLen < 0 || hdr.NewSize < 0 {
|
||||||
|
return ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
ctrlbuf := make([]byte, hdr.CtrlLen)
|
||||||
|
_, err = io.ReadFull(patch, ctrlbuf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cpfbz2 := bzip2.NewReader(bytes.NewReader(ctrlbuf))
|
||||||
|
|
||||||
|
diffbuf := make([]byte, hdr.DiffLen)
|
||||||
|
_, err = io.ReadFull(patch, diffbuf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dpfbz2 := bzip2.NewReader(bytes.NewReader(diffbuf))
|
||||||
|
|
||||||
|
// The entire rest of the file is the extra block.
|
||||||
|
epfbz2 := bzip2.NewReader(patch)
|
||||||
|
|
||||||
|
obuf, err := ioutil.ReadAll(old)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
nbuf := make([]byte, hdr.NewSize)
|
||||||
|
|
||||||
|
var oldpos, newpos int64
|
||||||
|
for newpos < hdr.NewSize {
|
||||||
|
var ctrl struct{ Add, Copy, Seek int64 }
|
||||||
|
err = binary.Read(cpfbz2, signMagLittleEndian{}, &ctrl)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sanity-check
|
||||||
|
if newpos+ctrl.Add > hdr.NewSize {
|
||||||
|
return ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read diff string
|
||||||
|
_, err = io.ReadFull(dpfbz2, nbuf[newpos:newpos+ctrl.Add])
|
||||||
|
if err != nil {
|
||||||
|
return ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add old data to diff string
|
||||||
|
for i := int64(0); i < ctrl.Add; i++ {
|
||||||
|
if oldpos+i >= 0 && oldpos+i < int64(len(obuf)) {
|
||||||
|
nbuf[newpos+i] += obuf[oldpos+i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adjust pointers
|
||||||
|
newpos += ctrl.Add
|
||||||
|
oldpos += ctrl.Add
|
||||||
|
|
||||||
|
// Sanity-check
|
||||||
|
if newpos+ctrl.Copy > hdr.NewSize {
|
||||||
|
return ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read extra string
|
||||||
|
_, err = io.ReadFull(epfbz2, nbuf[newpos:newpos+ctrl.Copy])
|
||||||
|
if err != nil {
|
||||||
|
return ErrCorrupt
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adjust pointers
|
||||||
|
newpos += ctrl.Copy
|
||||||
|
oldpos += ctrl.Seek
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the new file
|
||||||
|
for len(nbuf) > 0 {
|
||||||
|
n, err := new.Write(nbuf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nbuf = nbuf[n:]
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
62
Godeps/_workspace/src/github.com/kr/binarydist/patch_test.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/kr/binarydist/patch_test.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPatch(t *testing.T) {
|
||||||
|
mustWriteRandFile("test.old", 1e3)
|
||||||
|
mustWriteRandFile("test.new", 1e3)
|
||||||
|
|
||||||
|
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
os.Remove(got.Name())
|
||||||
|
|
||||||
|
err = exec.Command("bsdiff", "test.old", "test.new", "test.patch").Run()
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Patch(mustOpen("test.old"), got, mustOpen("test.patch"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("err", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := got.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("got %d bytes", ref)
|
||||||
|
if n := fileCmp(got, mustOpen("test.new")); n > -1 {
|
||||||
|
t.Fatalf("produced different output at pos %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPatchHk(t *testing.T) {
|
||||||
|
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
os.Remove(got.Name())
|
||||||
|
|
||||||
|
err = Patch(mustOpen("testdata/sample.old"), got, mustOpen("testdata/sample.patch"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("err", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ref, err := got.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("got %d bytes", ref)
|
||||||
|
if n := fileCmp(got, mustOpen("testdata/sample.new")); n > -1 {
|
||||||
|
t.Fatalf("produced different output at pos %d", n)
|
||||||
|
}
|
||||||
|
}
|
43
Godeps/_workspace/src/github.com/kr/binarydist/seek.go
generated
vendored
Normal file
43
Godeps/_workspace/src/github.com/kr/binarydist/seek.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type seekBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
pos int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *seekBuffer) Write(p []byte) (n int, err error) {
|
||||||
|
n = copy(b.buf[b.pos:], p)
|
||||||
|
if n == len(p) {
|
||||||
|
b.pos += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
b.buf = append(b.buf, p[n:]...)
|
||||||
|
b.pos += len(p)
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *seekBuffer) Seek(offset int64, whence int) (ret int64, err error) {
|
||||||
|
var abs int64
|
||||||
|
switch whence {
|
||||||
|
case 0:
|
||||||
|
abs = offset
|
||||||
|
case 1:
|
||||||
|
abs = int64(b.pos) + offset
|
||||||
|
case 2:
|
||||||
|
abs = int64(len(b.buf)) + offset
|
||||||
|
default:
|
||||||
|
return 0, errors.New("binarydist: invalid whence")
|
||||||
|
}
|
||||||
|
if abs < 0 {
|
||||||
|
return 0, errors.New("binarydist: negative position")
|
||||||
|
}
|
||||||
|
if abs >= 1<<31 {
|
||||||
|
return 0, errors.New("binarydist: position out of range")
|
||||||
|
}
|
||||||
|
b.pos = int(abs)
|
||||||
|
return abs, nil
|
||||||
|
}
|
33
Godeps/_workspace/src/github.com/kr/binarydist/sort_test.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/kr/binarydist/sort_test.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package binarydist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var sortT = [][]byte{
|
||||||
|
mustRandBytes(1000),
|
||||||
|
mustReadAll(mustOpen("test.old")),
|
||||||
|
[]byte("abcdefabcdef"),
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQsufsort(t *testing.T) {
|
||||||
|
for _, s := range sortT {
|
||||||
|
I := qsufsort(s)
|
||||||
|
for i := 1; i < len(I); i++ {
|
||||||
|
if bytes.Compare(s[I[i-1]:], s[I[i]:]) > 0 {
|
||||||
|
t.Fatalf("unsorted at %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustRandBytes(n int) []byte {
|
||||||
|
b := make([]byte, n)
|
||||||
|
_, err := rand.Read(b)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
BIN
Godeps/_workspace/src/github.com/kr/binarydist/testdata/sample.new
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/kr/binarydist/testdata/sample.new
generated
vendored
Normal file
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/kr/binarydist/testdata/sample.old
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/kr/binarydist/testdata/sample.old
generated
vendored
Normal file
Binary file not shown.
BIN
Godeps/_workspace/src/github.com/kr/binarydist/testdata/sample.patch
generated
vendored
Normal file
BIN
Godeps/_workspace/src/github.com/kr/binarydist/testdata/sample.patch
generated
vendored
Normal file
Binary file not shown.
Reference in New Issue
Block a user