1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-05-17 06:57:40 +08:00

Fix typos and cleanup

This commit is contained in:
Dimitris Apostolou
2020-04-18 17:45:01 +03:00
parent ed4812b57a
commit 1e437c7e97
64 changed files with 105 additions and 105 deletions

View File

@ -238,7 +238,7 @@ release).
However,
1. Badger is complicated while flatfs pushes all the complexity down into the
filesystem itself. That means that flatfs is only likely to loose your data
filesystem itself. That means that flatfs is only likely to lose your data
if your underlying filesystem gets corrupted while there are more
opportunities for badger itself to get corrupted.
2. Badger can use a lot of memory. In this release, we've tuned badger to use

View File

@ -87,7 +87,7 @@ RUN mkdir /ipfs /ipns \
# Expose the fs-repo as a volume.
# start_ipfs initializes an fs-repo if none is mounted.
# Important this happens after the USER directive so permission are correct.
# Important this happens after the USER directive so permissions are correct.
VOLUME $IPFS_PATH
# The default logging level

View File

@ -108,7 +108,7 @@ For Linux and MacOSX you can use the purely functional package manager [Nix](htt
$ nix-env -i ipfs
```
You can also install the Package by using it's attribute name, which is also `ipfs`.
You can also install the Package by using its attribute name, which is also `ipfs`.
#### Guix
@ -127,7 +127,7 @@ In solus, go-ipfs is available in the main repository as
$ sudo eopkg install go-ipfs
```
You can also install it trough the Solus software center.
You can also install it through the Solus software center.
#### Snap

View File

@ -29,7 +29,7 @@ PREFIX=$(expr "$0" : "\(.*\/\)") || PREFIX='./'
# Include the 'check_at_least_version' function
. ${PREFIX}check_version
# Check that the go binary exist and is in the path
# Check that the go binary exists and is in the path
GOCC=${GOCC="go"}

View File

@ -15,7 +15,7 @@ import (
// If a block was removed successfully than the Error string will be
// empty. If a block could not be removed than Error will contain the
// reason the block could not be removed. If the removal was aborted
// due to a fatal error Hash will be be empty, Error will contain the
// due to a fatal error Hash will be empty, Error will contain the
// reason, and no more results will be sent.
type RemovedBlock struct {
Hash string `json:",omitempty"`

View File

@ -122,7 +122,7 @@ You can setup CORS headers the same way:
Shutdown
To shutdown the daemon, send a SIGINT signal to it (e.g. by pressing 'Ctrl-C')
To shut down the daemon, send a SIGINT signal to it (e.g. by pressing 'Ctrl-C')
or send a SIGTERM signal to it (e.g. with 'kill'). It may take a while for the
daemon to shutdown gracefully, but it can be killed forcibly by sending a
second signal.
@ -434,7 +434,7 @@ func daemonFunc(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment
}()
// collect long-running errors and block for shutdown
// TODO(cryptix): our fuse currently doesnt follow this pattern for graceful shutdown
// TODO(cryptix): our fuse currently doesn't follow this pattern for graceful shutdown
var errs error
for err := range merge(apiErrc, gwErrc, gcErrc) {
if err != nil {
@ -489,7 +489,7 @@ func serveHTTPApi(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, error
}
for _, listener := range listeners {
// we might have listened to /tcp/0 - lets see what we are listing on
// we might have listened to /tcp/0 - let's see what we are listing on
fmt.Printf("API server listening on %s\n", listener.Multiaddr())
// Browsers require TCP.
switch listener.Addr().Network() {
@ -625,7 +625,7 @@ func serveHTTPGateway(req *cmds.Request, cctx *oldcmds.Context) (<-chan error, e
listeners = append(listeners, gwLis)
}
// we might have listened to /tcp/0 - lets see what we are listing on
// we might have listened to /tcp/0 - let's see what we are listing on
gwType := "readonly"
if writable {
gwType = "writable"

View File

@ -66,8 +66,8 @@ func ManageFdLimit() (changed bool, newLimit uint64, err error) {
// the soft limit is the value that the kernel enforces for the
// corresponding resource
// the hard limit acts as a ceiling for the soft limit
// an unprivileged process may only set it's soft limit to a
// alue in the range from 0 up to the hard limit
// an unprivileged process may only set its soft limit to a
// value in the range from 0 up to the hard limit
err = setLimit(targetLimit, targetLimit)
switch err {
case nil:
@ -82,7 +82,7 @@ func ManageFdLimit() (changed bool, newLimit uint64, err error) {
// set the soft value
err = setLimit(targetLimit, hard)
if err != nil {
err = fmt.Errorf("error setting ulimit wihout hard limit: %s", err)
err = fmt.Errorf("error setting ulimit without hard limit: %s", err)
break
}
newLimit = targetLimit

View File

@ -115,7 +115,7 @@ The optional format string is a printf style format string:
type CidFormatRes struct {
CidStr string // Original Cid String passed in
Formatted string // Formated Result
Formatted string // Formatted Result
ErrorMsg string // Error
}
@ -255,7 +255,7 @@ var basesCmd = &cmds.Command{
Tagline: "List available multibase encodings.",
},
Options: []cmds.Option{
cmds.BoolOption(prefixOptionName, "also include the single leter prefixes in addition to the code"),
cmds.BoolOption(prefixOptionName, "also include the single letter prefixes in addition to the code"),
cmds.BoolOption(numericOptionName, "also include numeric codes"),
},
Run: func(req *cmds.Request, resp cmds.ResponseEmitter, env cmds.Environment) error {

View File

@ -26,7 +26,7 @@ func (e *commandEncoder) Encode(v interface{}) error {
)
if cmd, ok = v.(*Command); !ok {
return fmt.Errorf(`core/commands: uenxpected type %T, expected *"core/commands".Command`, v)
return fmt.Errorf(`core/commands: unexpected type %T, expected *"core/commands".Command`, v)
}
for _, s := range cmdPathStrings(cmd, cmd.showOpts) {

View File

@ -217,7 +217,7 @@ var DagResolveCmd = &cmds.Command{
Helptext: cmds.HelpText{
Tagline: "Resolve ipld block",
ShortDescription: `
'ipfs dag resolve' fetches a dag node from ipfs, prints it's address and remaining path.
'ipfs dag resolve' fetches a dag node from ipfs, prints its address and remaining path.
`,
},
Arguments: []cmds.Argument{
@ -366,7 +366,7 @@ Maximum supported CAR version: 1
// to the Pinning interface, sigh...
//
// If we didn't have the problem of inability to take multiple pinlocks,
// we could use the Api directly like so (though internally it does the same):
// we could use the api directly like so (though internally it does the same):
//
// // not ideal, but the pinning api takes only paths :(
// rp := path.NewResolvedPath(
@ -462,7 +462,7 @@ func importWorker(req *cmds.Request, re cmds.ResponseEmitter, api iface.CoreAPI,
//
// every single file in it() is already open before we start
// just close here sooner rather than later for neatness
// and to surface potential erorrs writing on closed fifos
// and to surface potential errors writing on closed fifos
// this won't/can't help with not running out of handles
err := func() error {
defer file.Close()

View File

@ -712,7 +712,7 @@ Newly created leaves will be in the legacy format (Protobuf) if the
CID version is 0, or raw if the CID version is non-zero. Use of the
'--raw-leaves' option will override this behavior.
If the '--flush' option is set to false, changes will not be propogated to the
If the '--flush' option is set to false, changes will not be propagated to the
merkledag root. This can make operations much faster when doing a large number
of writes to a deeper directory structure.
@ -1166,7 +1166,7 @@ func getFileHandle(r *mfs.Root, path string, create bool, builder cid.Builder) (
return nil, err
}
// if create is specified and the file doesnt exist, we create the file
// if create is specified and the file doesn't exist, we create the file
dirname, fname := gopath.Split(path)
pdir, err := getParentDir(r, dirname)
if err != nil {
@ -1191,7 +1191,7 @@ func getFileHandle(r *mfs.Root, path string, create bool, builder cid.Builder) (
fi, ok := fsn.(*mfs.File)
if !ok {
return nil, errors.New("expected *mfs.File, didnt get it. This is likely a race condition")
return nil, errors.New("expected *mfs.File, didn't get it. This is likely a race condition")
}
return fi, nil
@ -1224,7 +1224,7 @@ func getParentDir(root *mfs.Root, dir string) (*mfs.Directory, error) {
pdir, ok := parent.(*mfs.Directory)
if !ok {
return nil, errors.New("expected *mfs.Directory, didnt get it. This is likely a race condition")
return nil, errors.New("expected *mfs.Directory, didn't get it. This is likely a race condition")
}
return pdir, nil
}

View File

@ -155,7 +155,7 @@ func makeProgressBar(out io.Writer, l int64) *pb.ProgressBar {
bar.Output = out
// the progress bar lib doesn't give us a way to get the width of the output,
// so as a hack we just use a callback to measure the output, then git rid of it
// so as a hack we just use a callback to measure the output, then get rid of it
bar.Callback = func(line string) {
terminalWidth := len(line)
bar.Callback = nil

View File

@ -522,8 +522,8 @@ var updatePinCmd = &cmds.Command{
Efficiently pins a new object based on differences from an existing one and,
by default, removes the old pin.
This commands is useful when the new pin contains many similarities or is a
derivative of an existing one, particuarly for large objects. This allows a more
This command is useful when the new pin contains many similarities or is a
derivative of an existing one, particularly for large objects. This allows a more
efficient DAG-traversal which fully skips already-pinned branches from the old
object. As a requirement, the old object needs to be an existing recursive
pin.

View File

@ -73,7 +73,7 @@ func ListenAndServe(n *core.IpfsNode, listeningMultiAddr string, options ...Serv
return err
}
// we might have listened to /tcp/0 - lets see what we are listing on
// we might have listened to /tcp/0 - let's see what we are listing on
addr = list.Multiaddr()
fmt.Printf("API server listening on %s\n", addr)

View File

@ -228,7 +228,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
w.Header().Set("Etag", etag)
// set these headers _after_ the error, for we may just not have it
// and dont want the client to cache a 500 response...
// and don't want the client to cache a 500 response...
// and only if it's /ipfs!
// TODO: break this out when we split /ipfs /ipns routes.
modtime := time.Now()
@ -321,7 +321,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
// keep backlink
case len(pathSplit) == 4 && pathSplit[3] == "": // url: /ipfs/$hash/
// add the correct link depending on wether the path ends with a slash
// add the correct link depending on whether the path ends with a slash
default:
if strings.HasSuffix(backLink, "/") {
backLink += "./.."

View File

@ -87,7 +87,7 @@ func TestAddMultipleGCLive(t *testing.T) {
// GC shouldn't get the lock until after the file is completely added
select {
case <-gc1started:
t.Fatal("gc shouldnt have started yet")
t.Fatal("gc shouldn't have started yet")
default:
}
@ -118,7 +118,7 @@ func TestAddMultipleGCLive(t *testing.T) {
select {
case <-gc2started:
t.Fatal("gc shouldnt have started yet")
t.Fatal("gc shouldn't have started yet")
default:
}
@ -192,7 +192,7 @@ func TestAddGCLive(t *testing.T) {
case o := <-out:
addedHashes[o.(*coreiface.AddEvent).Path.Cid().String()] = struct{}{}
case <-addDone:
t.Fatal("add shouldnt complete yet")
t.Fatal("add shouldn't complete yet")
}
var gcout <-chan gc.Result
@ -202,14 +202,14 @@ func TestAddGCLive(t *testing.T) {
gcout = gc.GC(context.Background(), node.Blockstore, node.Repo.Datastore(), node.Pinning, nil)
}()
// gc shouldnt start until we let the add finish its current file.
// gc shouldn't start until we let the add finish its current file.
if _, err := pipew.Write([]byte("some data for file b")); err != nil {
t.Fatal(err)
}
select {
case <-gcstarted:
t.Fatal("gc shouldnt have started yet")
t.Fatal("gc shouldn't have started yet")
default:
}

View File

@ -40,7 +40,7 @@ $(d)/ipfs: $(d)/main
CLEAN += $(d)/ipfs
ifneq ($(filter coverage%,$(MAKECMDGOALS)),)
# this is quite hacky but it is best way I could fiture out
# this is quite hacky but it is best way I could figure out
DEPS_test/sharness += cmd/ipfs/ipfs-test-cover $(d)/coverage_deps $(d)/ipfs
endif

View File

@ -620,7 +620,7 @@ Below is a list of the most common public gateway setups.
### `Identity.PeerID`
The unique PKI identity label for this configs peer. Set on init and never read,
its merely here for convenience. Ipfs will always generate the peerID from its
it's merely here for convenience. Ipfs will always generate the peerID from its
keypair at runtime.
### `Identity.PrivKey`
@ -694,7 +694,7 @@ An array of addresses (multiaddr netmasks) to not dial. By default, IPFS nodes
advertise _all_ addresses, even internal ones. This makes it easier for nodes on
the same network to reach each other. Unfortunately, this means that an IPFS
node will try to connect to one or more private IP addresses whenever dialing
another node, even if this other node is on a different network. This may may
another node, even if this other node is on a different network. This may
trigger netscan alerts on some hosting providers or cause strain in some setups.
The `server` configuration profile fills up this list with sensible defaults,

View File

@ -34,7 +34,7 @@ If you feel intrepid, you can dump this information and investigate it yourself:
### Analyzing the stack dump
The first thing to look for is hung goroutines -- any goroutine thats been stuck
The first thing to look for is hung goroutines -- any goroutine that's been stuck
for over a minute will note that in the trace. It looks something like:
```

View File

@ -84,7 +84,7 @@ Default: https://ipfs.io/ipfs/$something (depends on the IPFS version)
## `IPFS_NS_MAP`
Adds static namesys records for deteministic tests and debugging.
Adds static namesys records for deterministic tests and debugging.
Useful for testing things like DNSLink without real DNS lookup.
Example:

View File

@ -271,7 +271,7 @@ func TestFileSizeReporting(t *testing.T) {
}
}
// Test to make sure you cant create multiple entries with the same name
// Test to make sure you can't create multiple entries with the same name
func TestDoubleEntryFailure(t *testing.T) {
if testing.Short() {
t.SkipNow()
@ -458,7 +458,7 @@ func TestFSThrash(t *testing.T) {
}
if !bytes.Equal(data, out) {
t.Errorf("Data didnt match in %s: expected %v, got %v", name, data, out)
t.Errorf("Data didn't match in %s: expected %v, got %v", name, data, out)
}
}
}

View File

@ -42,7 +42,7 @@ For more help, see:
`
// errStrNoFuseHeaders is included in the output of `go get <fuseVersionPkg>` if there
// are no fuse headers. this means they dont have OSXFUSE installed.
// are no fuse headers. this means they don't have OSXFUSE installed.
var errStrNoFuseHeaders = "no such file or directory: '/usr/local/lib/libosxfuse.dylib'"
var errStrUpgradeFuse = `OSXFUSE version %s not supported.
@ -208,10 +208,10 @@ func ensureFuseVersionIsInstalled() error {
cmd.Stdout = cmdout
cmd.Stderr = cmdout
if err := cmd.Run(); err != nil {
// Ok, install fuse-version failed. is it they dont have fuse?
// Ok, install fuse-version failed. is it they don't have fuse?
cmdoutstr := cmdout.String()
if strings.Contains(cmdoutstr, errStrNoFuseHeaders) {
// yes! it is! they dont have fuse!
// yes! it is! they don't have fuse!
return fmt.Errorf(errStrFuseRequired)
}
@ -233,7 +233,7 @@ func ensureFuseVersionIsInstalled() error {
func userAskedToSkipFuseCheck(node *core.IpfsNode) (skip bool, err error) {
val, err := node.Repo.GetConfigKey(dontCheckOSXFUSEConfigKey)
if err != nil {
return false, nil // failed to get config value. dont skip check.
return false, nil // failed to get config value. don't skip check.
}
switch val := val.(type) {
@ -242,7 +242,7 @@ func userAskedToSkipFuseCheck(node *core.IpfsNode) (skip bool, err error) {
case bool:
return val, nil
default:
// got config value, but it's invalid... dont skip check, ask the user to fix it...
// got config value, but it's invalid... don't skip check, ask the user to fix it...
return false, fmt.Errorf(errStrFixConfig, dontCheckOSXFUSEConfigKey, val,
dontCheckOSXFUSEConfigKey)
}

View File

@ -224,7 +224,7 @@ func (s *Node) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
}
func (s *Node) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
// TODO: is nil the right response for 'bug off, we aint got none' ?
// TODO: is nil the right response for 'bug off, we ain't got none' ?
resp.Xattr = nil
return nil
}

View File

@ -250,7 +250,7 @@ func ColoredSet(ctx context.Context, pn pin.Pinner, ng ipld.NodeGetter, bestEffo
}
// ErrCannotFetchAllLinks is returned as the last Result in the GC output
// channel when there was a error creating the marked set because of a
// channel when there was an error creating the marked set because of a
// problem when finding descendants.
var ErrCannotFetchAllLinks = errors.New("garbage collection aborted: could not retrieve some links")

View File

@ -19,7 +19,7 @@ var codec = base32.StdEncoding.WithPadding(base32.NoPadding)
// Keystore provides a key management interface
type Keystore interface {
// Has returns whether or not a key exist in the Keystore
// Has returns whether or not a key exists in the Keystore
Has(string) (bool, error)
// Put stores a key in the Keystore, if a key with the same name already exists, returns ErrKeyExists
Put(string, ci.PrivKey) error
@ -57,7 +57,7 @@ func NewFSKeystore(dir string) (*FSKeystore, error) {
return &FSKeystore{dir}, nil
}
// Has returns whether or not a key exist in the Keystore
// Has returns whether or not a key exists in the Keystore
func (ks *FSKeystore) Has(name string) (bool, error) {
name, err := encode(name)
if err != nil {

View File

@ -137,7 +137,7 @@ func TestKeystoreBasics(t *testing.T) {
}
if err := ks.Put("", k1); err == nil {
t.Fatal("shouldnt be able to put a key with no name")
t.Fatal("shouldn't be able to put a key with no name")
}
if err := ks.Put(".foo", k1); err != nil {
@ -238,7 +238,7 @@ func assertGetKey(ks Keystore, name string, exp ci.PrivKey) error {
}
if !outK.Equals(exp) {
return fmt.Errorf("key we got out didnt match expectation")
return fmt.Errorf("key we got out didn't match expectation")
}
return nil

View File

@ -17,7 +17,7 @@ func NewMemKeystore() *MemKeystore {
return &MemKeystore{make(map[string]ci.PrivKey)}
}
// Has return whether or not a key exist in the Keystore
// Has return whether or not a key exists in the Keystore
func (mk *MemKeystore) Has(name string) (bool, error) {
_, ok := mk.keys[name]
return ok, nil

View File

@ -90,7 +90,7 @@ func TestMemKeyStoreBasics(t *testing.T) {
}
if err := ks.Put("", k1); err == nil {
t.Fatal("shouldnt be able to put a key with no name")
t.Fatal("shouldn't be able to put a key with no name")
}
if err := ks.Put(".foo", k1); err != nil {

View File

@ -172,9 +172,9 @@ _ipfs_dag_get()
_ipfs_dag_put()
{
if [[ ${prev} == "--format" ]] ; then
_ipfs_comp "cbor placeholder1" # TODO: a) Which format more then cbor is valid? b) Solve autocomplete bug for "="
_ipfs_comp "cbor placeholder1" # TODO: a) Which format more than cbor is valid? b) Solve autocomplete bug for "="
elif [[ ${prev} == "--input-enc" ]] ; then
_ipfs_comp "json placeholder1" # TODO: a) Which format more then json is valid? b) Solve autocomplete bug for "="
_ipfs_comp "json placeholder1" # TODO: a) Which format more than json is valid? b) Solve autocomplete bug for "="
elif [[ ${word} == -* ]] ; then
_ipfs_comp "--format= --input-enc= --help"
else
@ -227,7 +227,7 @@ _ipfs_diag_cmds()
if [[ ${prev} == "clear" ]] ; then
return 0
elif [[ ${prev} =~ ^-?[0-9]+$ ]] ; then
_ipfs_comp "ns us µs ms s m h" # TODO: Trigger with out space, eg. "ipfs diag set-time 10ns" not "... set-time 10 ns"
_ipfs_comp "ns us µs ms s m h" # TODO: Trigger without space, eg. "ipfs diag set-time 10ns" not "... set-time 10 ns"
elif [[ ${prev} == "set-time" ]] ; then
_ipfs_help_only
elif [[ ${word} == -* ]] ; then

View File

@ -45,7 +45,7 @@ func NewNameSystem(r routing.ValueStore, ds ds.Datastore, cachesize int) NameSys
cache, _ = lru.New(cachesize)
}
// Prewarm namesys cache with static records for deteministic tests and debugging.
// Prewarm namesys cache with static records for deterministic tests and debugging.
// Useful for testing things like DNSLink without real DNS lookup.
// Example:
// IPFS_NS_MAP="dnslink-test.example.com:/ipfs/bafkreicysg23kiwv34eg2d7qweipxwosdo2py4ldv42nbauguluen5v6am"

View File

@ -92,7 +92,7 @@ func TestRepublish(t *testing.T) {
// The republishers that are contained within the nodes have their timeout set
// to 12 hours. Instead of trying to tweak those, we're just going to pretend
// they dont exist and make our own.
// they don't exist and make our own.
repub := NewRepublisher(rp, publisher.Repo.Datastore(), publisher.PrivateKey, publisher.Repo.Keystore())
repub.Interval = time.Second
repub.RecordLifetime = time.Second * 5

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash
dir=${1:?first paramater with dir to work in is required}
dir=${1:?first parameter with dir to work in is required}
pkg=${2:?second parameter with full name of the package is required}
main_pkg="$dir/main"

View File

@ -116,7 +116,7 @@ func verifyMigrationSupportsVersion(fsrbin string, vn int) error {
return nil
}
return fmt.Errorf("migrations binary doesnt support version %d: %s", vn, fsrbin)
return fmt.Errorf("migrations binary doesn't support version %d: %s", vn, fsrbin)
}
func migrationsVersion(bin string) (int, error) {
@ -163,7 +163,7 @@ func GetLatestVersion(ipfspath, dist string) (string, error) {
}
}
if latest == "" {
return "", fmt.Errorf("couldnt find a non dev version in the list")
return "", fmt.Errorf("couldn't find a non dev version in the list")
}
return vs[len(vs)-1], nil
}

View File

@ -12,7 +12,7 @@ type OnlyOne struct {
}
// Open a Repo identified by key. If Repo is not already open, the
// open function is called, and the result is remember for further
// open function is called, and the result is remembered for further
// use.
//
// Key must be comparable, or Open will panic. Make sure to pick keys

View File

@ -9,7 +9,7 @@ fi
# {data, server, client, bootstrap}
tag=zaqwsx_ipfs-test-img
# could use set -v, but i dont want to see the comments...
# could use set -v, but i don't want to see the comments...
img=$(docker images | grep $1 | awk '{print $3}')
echo "using docker image: $img ($1)"

View File

@ -9,7 +9,7 @@ shellquote() {
_space=''
for _arg
do
# On Mac OS, sed adds a newline character.
# On macOS, sed adds a newline character.
# With a printf wrapper the extra newline is removed.
printf "$_space'%s'" "$(printf "%s" "$_arg" | sed -e "s/'/'\\\\''/g;")"
_space=' '

View File

@ -107,7 +107,7 @@ test_expect_success "'ipfs add --help' succeeds" '
# other tests here...
# dont forget to kill the daemon!!
# don't forget to kill the daemon!!
test_kill_ipfs_daemon
```
@ -126,6 +126,6 @@ test_mount_ipfs
# tests mounted here
# dont forget to kill the daemon!!
# don't forget to kill the daemon!!
test_kill_ipfs_daemon
```

View File

@ -44,7 +44,7 @@ startup_cluster() {
'
fi
test_expect_success "connect nodes to eachother" '
test_expect_success "connect nodes to each other" '
iptb connect [1-$bound] 0
'

View File

@ -67,9 +67,9 @@ test_expect_success "All sub-commands accept help" '
while read -r cmd
do
${cmd:0:4} help ${cmd:5} >/dev/null ||
{ echo "$cmd doesnt accept --help"; echo 1 > fail; }
{ echo "$cmd does not accept --help"; echo 1 > fail; }
echo stuff | $cmd --help >/dev/null ||
{ echo "$cmd doesnt accept --help when using stdin"; echo 1 > fail; }
{ echo "$cmd does not accept --help when using stdin"; echo 1 > fail; }
done <commands.txt
if [ $(cat fail) = 1 ]; then
@ -82,9 +82,9 @@ test_expect_success "All commands accept --help" '
while read -r cmd
do
$cmd --help >/dev/null ||
{ echo "$cmd doesnt accept --help"; echo 1 > fail; }
{ echo "$cmd does not accept --help"; echo 1 > fail; }
echo stuff | $cmd --help >/dev/null ||
{ echo "$cmd doesnt accept --help when using stdin"; echo 1 > fail; }
{ echo "$cmd does not accept --help when using stdin"; echo 1 > fail; }
done <commands.txt
if [ $(cat fail) = 1 ]; then

View File

@ -8,7 +8,7 @@ test_description="Test init command"
. lib/test-lib.sh
# test that ipfs fails to init if IPFS_PATH isnt writeable
# test that ipfs fails to init if IPFS_PATH isn't writeable
test_expect_success "create dir and change perms succeeds" '
export IPFS_PATH="$(pwd)/.badipfs" &&
mkdir "$IPFS_PATH" &&
@ -114,7 +114,7 @@ test_expect_success "'ipfs init --empty-repo' output looks good" '
test_cmp expected actual_init
'
test_expect_success "Welcome readme doesn't exists" '
test_expect_success "Welcome readme doesn't exist" '
test_must_fail ipfs cat /ipfs/$HASH_WELCOME_DOCS/readme
'

View File

@ -8,7 +8,7 @@ test_description="Test mount command"
. lib/test-lib.sh
# if in travis CI, dont test mount (no fuse)
# if in travis CI, don't test mount (no fuse)
if ! test_have_prereq FUSE; then
skip_all='skipping mount tests, fuse not available'

View File

@ -5,7 +5,7 @@ test_description="Test mount command in conjunction with publishing"
# imports
. lib/test-lib.sh
# if in travis CI, dont test mount (no fuse)
# if in travis CI, don't test mount (no fuse)
if ! test_have_prereq FUSE; then
skip_all='skipping mount tests, fuse not available'

View File

@ -13,7 +13,7 @@ test_add_cat_file() {
ipfs add --help 2> add_help_err1 > /dev/null
'
test_expect_success "stdin reading message doesnt show up" '
test_expect_success "stdin reading message doesn't show up" '
test_expect_code 1 grep "ipfs: Reading from" add_help_err1 &&
test_expect_code 1 grep "send Ctrl-d to stop." add_help_err1
'
@ -22,7 +22,7 @@ test_add_cat_file() {
ipfs help add 2> add_help_err2 > /dev/null
'
test_expect_success "stdin reading message doesnt show up" '
test_expect_success "stdin reading message doesn't show up" '
test_expect_code 1 grep "ipfs: Reading from" add_help_err2 &&
test_expect_code 1 grep "send Ctrl-d to stop." add_help_err2
'
@ -83,7 +83,7 @@ test_add_cat_file() {
test_cmp expected actual
'
test_expect_success "ipfs cat from negitive offset should fail" '
test_expect_success "ipfs cat from negative offset should fail" '
test_expect_code 1 ipfs cat --offset -102 "$HASH" > actual
'
@ -132,7 +132,7 @@ test_add_cat_file() {
test_cmp expected actual
'
test_expect_success "ipfs cat with negitive length should fail" '
test_expect_success "ipfs cat with negative length should fail" '
test_expect_code 1 ipfs cat --length -102 "$HASH" > actual
'

View File

@ -228,7 +228,7 @@ test_expect_success "can read block with different hash" '
# Misc tests
#
test_expect_success "'ipfs block stat' with nothing from stdin doesnt crash" '
test_expect_success "'ipfs block stat' with nothing from stdin doesn't crash" '
test_expect_code 1 ipfs block stat < /dev/null 2> stat_out
'

View File

@ -206,7 +206,7 @@ test_object_cmd() {
test_cmp expected actual
'
test_expect_success "after gc, objects still acessible" '
test_expect_success "after gc, objects still accessible" '
ipfs repo gc > /dev/null &&
ipfs refs -r --timeout=2s $HASH > /dev/null
'

View File

@ -97,7 +97,7 @@ test_dag_cmd() {
ipfs pin add $EXPHASH
'
test_expect_success "after gc, objects still acessible" '
test_expect_success "after gc, objects still accessible" '
ipfs repo gc > /dev/null &&
ipfs refs -r --timeout=2s $EXPHASH > /dev/null
'
@ -110,7 +110,7 @@ test_dag_cmd() {
grep "{\"/\":\"" ipld_obj_out > /dev/null
'
test_expect_success "retreived object hashes back correctly" '
test_expect_success "retrieved object hashes back correctly" '
IPLDHASH2=$(cat ipld_obj_out | ipfs dag put) &&
test "$IPLDHASH" = "$IPLDHASH2"
'
@ -153,7 +153,7 @@ test_dag_cmd() {
PINHASH=$(printf {\"foo\":\"bar\"} | ipfs dag put --pin=true)
'
test_expect_success "after gc, objects still acessible" '
test_expect_success "after gc, objects still accessible" '
ipfs repo gc > /dev/null &&
ipfs refs -r --timeout=2s $PINHASH > /dev/null
'

View File

@ -184,7 +184,7 @@ test_expect_success "daemon actually can handle 2048 file descriptors" '
hang-fds -hold=2s 2000 '$API_MADDR' > /dev/null
'
test_expect_success "daemon didnt throw any errors" '
test_expect_success "daemon didn't throw any errors" '
test_expect_code 1 grep "too many open files" daemon_err
'

View File

@ -66,7 +66,7 @@ test_client_suite() {
# first, test things without daemon, without /api file
# with no daemon, everything should fail
# (using unreachable because API_MADDR doesnt get set until daemon start)
# (using unreachable because API_MADDR doesn't get set until daemon start)
test_client_suite "(daemon off, no --api, no /api file)" false false "$api_unreachable" "$api_other"

View File

@ -33,7 +33,7 @@ test_expect_success "'ipfs repo gc' looks good (patch root)" '
grep -v "removed $HASH" gc_out_actual
'
test_expect_success "'ipfs repo gc' doesnt remove file" '
test_expect_success "'ipfs repo gc' doesn't remove file" '
ipfs cat "$HASH" >out &&
test_cmp out afile
'

View File

@ -35,7 +35,7 @@ test_gc_robust_part1() {
test_must_fail ipfs cat $HASH1
'
test_expect_success "'ipfs repo gc' should still be be fine" '
test_expect_success "'ipfs repo gc' should still be fine" '
ipfs repo gc
'

View File

@ -156,7 +156,7 @@ test_get_cmd() {
}
test_get_fail() {
test_expect_success "create an object that has unresolveable links" '
test_expect_success "create an object that has unresolvable links" '
cat <<-\EOF >bad_object &&
{ "Links": [ { "Name": "foo", "Hash": "QmZzaC6ydNXiR65W8VjGA73ET9MZ6VFAqUT1ngYMXcpihn", "Size": 1897 }, { "Name": "bar", "Hash": "Qmd4mG6pDFDmDTn6p3hX1srP8qTbkyXKj5yjpEsiHDX3u8", "Size": 56 }, { "Name": "baz", "Hash": "QmUTjwRnG28dSrFFVTYgbr6LiDLsBmRr2SaUSTGheK2YqG", "Size": 24266 } ], "Data": "\b\u0001" }
EOF

View File

@ -64,7 +64,7 @@ test_expect_success "GET IPFS directory file output looks good" '
test_cmp dir/test actual
'
test_expect_success "GET IPFS non existent file returns code expected (404)" '
test_expect_success "GET IPFS nonexistent file returns code expected (404)" '
test_curl_resp_http_code "http://127.0.0.1:$port/ipfs/$HASH2/pleaseDontAddMe" "HTTP/1.1 404 Not Found"
'
@ -250,7 +250,7 @@ test_expect_success "try fetching not present ipns key from node 0" '
test_expect_code 22 curl -f "http://127.0.0.1:$GWPORT/ipns/$PEERID_1"
'
test_expect_success "try fetching present key from from node 0" '
test_expect_success "try fetching present key from node 0" '
BAR=$(echo "bar" | ipfsi 0 add -Q) &&
curl -f "http://127.0.0.1:$GWPORT/ipfs/$BAR"
'

View File

@ -67,7 +67,7 @@ test_localhost_gateway_response_should_contain() {
"
}
# Helper that checks gateway resonse for specific hostname in Host header
# Helper that checks gateway response for specific hostname in Host header
test_hostname_gateway_response_should_contain() {
local label="$1"
local hostname="$2"

View File

@ -60,7 +60,7 @@ test_expect_success "add a file on a node in client mode" '
FILE_HASH=$(ipfsi 8 add -q filea)
'
test_expect_success "retrieve that file on a client mode node" '
test_expect_success "retrieve that file on a node in client mode" '
check_file_fetch 9 $FILE_HASH filea
'

View File

@ -40,7 +40,7 @@ test_expect_success "output looks good" '
addr="/ip4/127.0.0.1/tcp/9898/p2p/QmUWKoHbjsqsSMesRC2Zoscs8edyFz6F77auBB1YBBhgpX"
test_expect_success "cant trigger a dial backoff with swarm connect" '
test_expect_success "can't trigger a dial backoff with swarm connect" '
test_expect_code 1 ipfs swarm connect $addr 2> connect_out
test_expect_code 1 ipfs swarm connect $addr 2>> connect_out
test_expect_code 1 ipfs swarm connect $addr 2>> connect_out

View File

@ -41,7 +41,7 @@ test_expect_success 'check subscriptions' '
test_cmp expected subs2
'
test_expect_success 'add an obect on publisher node' '
test_expect_success 'add an object on publisher node' '
echo "ipns is super fun" > file &&
HASH_FILE=$(ipfsi 0 add -q file)
'

View File

@ -445,7 +445,7 @@ test_files_api() {
test_cmp filehash_expected filehash
'
test_expect_success "cant write to negative offset $EXTRA" '
test_expect_success "can't write to negative offset $EXTRA" '
test_expect_code 1 ipfs files write $ARGS $RAW_LEAVES --offset -1 /cats/ipfs < output
'

View File

@ -48,7 +48,7 @@ test_filestore_adds() {
assert_repo_size_less_than 1000000
test_expect_success "normal add with fscache doesnt duplicate data" '
test_expect_success "normal add with fscache doesn't duplicate data" '
ipfs add --raw-leaves --fscache -r -q somedir > /dev/null
'

View File

@ -125,7 +125,7 @@ EOF
test_cmp verify_expect_2 verify_actual_2
'
test_expect_success "files can not be retrieved via the urlstore" '
test_expect_success "files cannot be retrieved via the urlstore" '
test_must_fail ipfs cat $HASH1 > /dev/null &&
test_must_fail ipfs cat $HASH2 > /dev/null
'
@ -164,7 +164,7 @@ EOF
test_kill_ipfs_daemon
test_expect_success "files can not be retrieved via the urlstore" '
test_expect_success "files cannot be retrieved via the urlstore" '
test_must_fail ipfs cat $HASH1 > /dev/null &&
test_must_fail ipfs cat $HASH2 > /dev/null &&
test_must_fail ipfs cat $HASH3 > /dev/null

View File

@ -10,7 +10,7 @@ test_description="Test API add command"
test_init_ipfs
# Verify that that API add command returns size
# Verify that the API add command returns size
test_launch_ipfs_daemon
test_expect_success "API Add response includes size field" '

View File

@ -19,7 +19,7 @@ test_expect_success "ipfs refs local over HTTP API returns NDJOSN not flat - #28
curl -X POST "http://$API_ADDR/api/v0/refs/local" | grep "Ref" | grep "Err"
'
test_expect_success "args expecting stdin dont crash when not given" '
test_expect_success "args expecting stdin don't crash when not given" '
curl -X POST "$API_ADDR/api/v0/bootstrap/add" > result
'

View File

@ -170,7 +170,7 @@ do
done
# The following will allow us to check that
# we are properly excuding enough stuff using:
# we are properly excluding enough stuff using:
# diff -u ipfs_cmd_result.txt cmd_found.txt
log "Get all the line commands that matched"
CMD_FOUND="$TMPDIR/cmd_found.txt"

View File

@ -1,5 +1,5 @@
thirdparty consists of Golang packages that contain no go-ipfs dependencies and
may be vendored ipfs/go-ipfs at a later date.
packages in under this directory _must not_ import packages under
packages under this directory _must not_ import packages under
`ipfs/go-ipfs` that are not also under `thirdparty`.

View File

@ -1,5 +1,5 @@
// Package notifier provides a simple notification dispatcher
// meant to be embedded in larger structres who wish to allow
// meant to be embedded in larger structures who wish to allow
// clients to sign up for event notifications.
package notifier