1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-28 08:47:42 +08:00

Merge pull request #844 from jbenet/tests-fix

fix the tests
This commit is contained in:
Juan Batiz-Benet
2015-03-04 08:39:26 -08:00
17 changed files with 186 additions and 41 deletions

4
Godeps/Godeps.json generated
View File

@ -151,6 +151,10 @@
"ImportPath": "github.com/jbenet/go-datastore", "ImportPath": "github.com/jbenet/go-datastore",
"Rev": "35738aceb35505bd3c77c2a618fb1947ca3f72da" "Rev": "35738aceb35505bd3c77c2a618fb1947ca3f72da"
}, },
{
"ImportPath": "github.com/jbenet/go-detect-race",
"Rev": "3463798d9574bd0b7eca275dccc530804ff5216f"
},
{ {
"ImportPath": "github.com/jbenet/go-fuse-version", "ImportPath": "github.com/jbenet/go-fuse-version",
"Rev": "b733dfc0597e1f6780510ee7afad8b6e3c7af3eb" "Rev": "b733dfc0597e1f6780510ee7afad8b6e3c7af3eb"

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -0,0 +1,33 @@
# go-detect-race
Check if the race detector is running.
I didnt find a variable to check quickly enough so I made this.
## Usage
```go
import (
detectrace "github.com/jbenet/go-detect-race"
)
func main() {
if detectrace.WithRace() {
// running with -race
} else {
// running without -race
}
}
```
## Why?
Because the race detector doesnt like massive stress tests. Example:
https://groups.google.com/forum/#!topic/golang-nuts/XDPHUt2LE70
## Why didn't you just use...
Please tell me about a better way of doing this. It wasn't
readily apparent to me, so I made this. But i would much prefer
an env var or some already existing var from the stdlib :)

View File

@ -0,0 +1,7 @@
package detectrace
// WithRace returns whether the binary was compiled
// with the race flag on.
func WithRace() bool {
return withRace
}

View File

@ -0,0 +1,9 @@
package detectrace
import (
"testing"
)
func TestWithRace(t *testing.T) {
t.Logf("WithRace() is %v\n", WithRace())
}

View File

@ -0,0 +1,5 @@
// +build !race
package detectrace
const withRace = false

View File

@ -0,0 +1,5 @@
// +build race
package detectrace
const withRace = true

View File

@ -215,9 +215,10 @@ func daemonFunc(req cmds.Request, res cmds.Response) {
if rootRedirect != nil { if rootRedirect != nil {
opts = append(opts, rootRedirect) opts = append(opts, rootRedirect)
} }
fmt.Printf("Gateway server listening on %s\n", gatewayMaddr)
if writable { if writable {
fmt.Printf("Gateway server is writable\n") fmt.Printf("Gateway (writable) server listening on %s\n", gatewayMaddr)
} else {
fmt.Printf("Gateway (readonly) server listening on %s\n", gatewayMaddr)
} }
err := corehttp.ListenAndServe(node, gatewayMaddr.String(), opts...) err := corehttp.ListenAndServe(node, gatewayMaddr.String(), opts...)
if err != nil { if err != nil {

View File

@ -6,7 +6,9 @@ import (
"testing" "testing"
"time" "time"
detectrace "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-detect-race"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
blocks "github.com/jbenet/go-ipfs/blocks" blocks "github.com/jbenet/go-ipfs/blocks"
blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil" blocksutil "github.com/jbenet/go-ipfs/blocks/blocksutil"
tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet" tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet"
@ -93,9 +95,15 @@ func TestLargeSwarm(t *testing.T) {
if testing.Short() { if testing.Short() {
t.SkipNow() t.SkipNow()
} }
t.Parallel()
numInstances := 500 numInstances := 500
numBlocks := 2 numBlocks := 2
if detectrace.WithRace() {
// when running with the race detector, 500 instances launches
// well over 8k goroutines. This hits a race detector limit.
numInstances = 100
} else {
t.Parallel()
}
PerformDistributionTest(t, numInstances, numBlocks) PerformDistributionTest(t, numInstances, numBlocks)
} }

View File

@ -354,7 +354,9 @@ func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
if err != nil { if err != nil {
return err return err
} }
n, err := io.ReadFull(r, resp.Data[:req.Size])
buf := resp.Data[:min(req.Size, int(r.Size()))]
n, err := io.ReadFull(r, buf)
resp.Data = resp.Data[:n] resp.Data = resp.Data[:n]
lm["res_size"] = n lm["res_size"] = n
return err // may be non-nil / not succeeded return err // may be non-nil / not succeeded
@ -652,3 +654,10 @@ type ipnsNode interface {
} }
var _ ipnsNode = (*Node)(nil) var _ ipnsNode = (*Node)(nil)
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -4,6 +4,7 @@ import (
"os" "os"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/bazil.org/fuse/fs"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context" "github.com/jbenet/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
) )
@ -18,7 +19,9 @@ func (l *Link) Attr() fuse.Attr {
} }
} }
func (l *Link) Readlink(req *fuse.ReadlinkRequest, ctx context.Context) (string, error) { func (l *Link) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
log.Debugf("ReadLink: %s", l.Target) log.Debugf("ReadLink: %s", l.Target)
return l.Target, nil return l.Target, nil
} }
var _ fs.NodeReadlinker = (*Link)(nil)

View File

@ -5,7 +5,6 @@
package readonly package readonly
import ( import (
"bytes"
"io" "io"
"os" "os"
@ -169,8 +168,9 @@ func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR
if err != nil { if err != nil {
return err return err
} }
buf := bytes.NewBuffer(resp.Data)
n, err := io.CopyN(buf, r, int64(req.Size)) buf := resp.Data[:min(req.Size, int(r.Size()))]
n, err := io.ReadFull(r, buf)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return err return err
} }
@ -196,3 +196,10 @@ type roNode interface {
} }
var _ roNode = (*Node)(nil) var _ roNode = (*Node)(nil)
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -185,9 +185,11 @@ func (rt *RoutingTable) NearestPeers(id ID, count int) []peer.ID {
// Size returns the total number of peers in the routing table // Size returns the total number of peers in the routing table
func (rt *RoutingTable) Size() int { func (rt *RoutingTable) Size() int {
var tot int var tot int
rt.tabLock.RLock()
for _, buck := range rt.Buckets { for _, buck := range rt.Buckets {
tot += buck.Len() tot += buck.Len()
} }
rt.tabLock.RUnlock()
return tot return tot
} }

View File

@ -125,6 +125,28 @@ test_config_set() {
test_init_ipfs() { test_init_ipfs() {
# we have a problem where initializing daemons with the same api port
# often fails-- it hangs indefinitely. The proper solution is to make
# ipfs pick an unused port for the api on startup, and then use that.
# Unfortunately, ipfs doesnt yet know how to do this-- the api port
# must be specified. Until ipfs learns how to do this, we must use
# specific port numbers, which may still fail but less frequently
# if we at least use different ones.
# Using RANDOM like this is clearly wrong-- it samples with replacement
# and it doesnt even check the port is unused. this is a trivial stop gap
# until the proper solution is implemented.
RANDOM=$$
PORT_API=$((RANDOM % 3000 + 5100))
ADDR_API="/ip4/127.0.0.1/tcp/$PORT_API"
PORT_GWAY=$((RANDOM % 3000 + 8100))
ADDR_GWAY="/ip4/127.0.0.1/tcp/$PORT_GWAY"
# we set the Addresses.API config variable.
# the cli client knows to use it, so only need to set.
# todo: in the future, use env?
test_expect_success "ipfs init succeeds" ' test_expect_success "ipfs init succeeds" '
export IPFS_PATH="$(pwd)/.go-ipfs" && export IPFS_PATH="$(pwd)/.go-ipfs" &&
ipfs init -b=1024 > /dev/null ipfs init -b=1024 > /dev/null
@ -134,6 +156,8 @@ test_init_ipfs() {
mkdir mountdir ipfs ipns && mkdir mountdir ipfs ipns &&
test_config_set Mounts.IPFS "$(pwd)/ipfs" && test_config_set Mounts.IPFS "$(pwd)/ipfs" &&
test_config_set Mounts.IPNS "$(pwd)/ipns" && test_config_set Mounts.IPNS "$(pwd)/ipns" &&
test_config_set Addresses.API "$ADDR_API" &&
test_config_set Addresses.Gateway "$ADDR_GWAY" &&
ipfs bootstrap rm --all || ipfs bootstrap rm --all ||
test_fsh cat "\"$IPFS_PATH/config\"" test_fsh cat "\"$IPFS_PATH/config\""
' '
@ -172,7 +196,6 @@ test_launch_ipfs_daemon() {
' '
# we say the daemon is ready when the API server is ready. # we say the daemon is ready when the API server is ready.
ADDR_API="/ip4/127.0.0.1/tcp/5001"
test_expect_success "'ipfs daemon' is ready" ' test_expect_success "'ipfs daemon' is ready" '
IPFS_PID=$! && IPFS_PID=$! &&
pollEndpoint -ep=/version -host=$ADDR_API -v -tout=1s -tries=60 2>poll_apierr > poll_apiout || pollEndpoint -ep=/version -host=$ADDR_API -v -tout=1s -tries=60 2>poll_apierr > poll_apiout ||

View File

@ -40,7 +40,7 @@ test_expect_success "ipfs add output looks good" '
' '
test_expect_success "ipfs cat succeeds" ' test_expect_success "ipfs cat succeeds" '
ipfs cat $HASH >actual ipfs cat "$HASH" >actual
' '
test_expect_success "ipfs cat output looks good" ' test_expect_success "ipfs cat output looks good" '
@ -49,7 +49,7 @@ test_expect_success "ipfs cat output looks good" '
' '
test_expect_success FUSE "cat ipfs/stuff succeeds" ' test_expect_success FUSE "cat ipfs/stuff succeeds" '
cat ipfs/$HASH >actual cat "ipfs/$HASH" >actual
' '
test_expect_success FUSE "cat ipfs/stuff looks good" ' test_expect_success FUSE "cat ipfs/stuff looks good" '
@ -108,7 +108,7 @@ test_expect_success "'ipfs add bigfile' output looks good" '
test_cmp expected actual test_cmp expected actual
' '
test_expect_success "'ipfs cat' succeeds" ' test_expect_success "'ipfs cat' succeeds" '
ipfs cat $HASH >actual ipfs cat "$HASH" >actual
' '
test_expect_success "'ipfs cat' output looks good" ' test_expect_success "'ipfs cat' output looks good" '
@ -116,7 +116,7 @@ test_expect_success "'ipfs cat' output looks good" '
' '
test_expect_success FUSE "cat ipfs/bigfile succeeds" ' test_expect_success FUSE "cat ipfs/bigfile succeeds" '
cat ipfs/$HASH >actual cat "ipfs/$HASH" >actual
' '
test_expect_success FUSE "cat ipfs/bigfile looks good" ' test_expect_success FUSE "cat ipfs/bigfile looks good" '
@ -144,11 +144,11 @@ test_expect_success EXPENSIVE "ipfs add bigfile output looks good" '
' '
test_expect_success EXPENSIVE "ipfs cat succeeds" ' test_expect_success EXPENSIVE "ipfs cat succeeds" '
ipfs cat $HASH | multihash -a=sha1 -e=hex >sha1_actual ipfs cat "$HASH" | multihash -a=sha1 -e=hex >sha1_actual
' '
test_expect_success EXPENSIVE "ipfs cat output looks good" ' test_expect_success EXPENSIVE "ipfs cat output looks good" '
ipfs cat $HASH >actual && ipfs cat "$HASH" >actual &&
test_cmp mountdir/bigfile actual test_cmp mountdir/bigfile actual
' '
@ -158,7 +158,7 @@ test_expect_success EXPENSIVE "ipfs cat output hashed looks good" '
' '
test_expect_success FUSE,EXPENSIVE "cat ipfs/bigfile succeeds" ' test_expect_success FUSE,EXPENSIVE "cat ipfs/bigfile succeeds" '
cat ipfs/$HASH | multihash -a=sha1 -e=hex >sha1_actual cat "ipfs/$HASH" | multihash -a=sha1 -e=hex >sha1_actual
' '
test_expect_success FUSE,EXPENSIVE "cat ipfs/bigfile looks good" ' test_expect_success FUSE,EXPENSIVE "cat ipfs/bigfile looks good" '

View File

@ -9,10 +9,11 @@ test_description="Test HTTP Gateway"
. lib/test-lib.sh . lib/test-lib.sh
test_init_ipfs test_init_ipfs
test_config_ipfs_gateway_readonly "/ip4/0.0.0.0/tcp/5002" test_config_ipfs_gateway_readonly $ADDR_GWAY
test_launch_ipfs_daemon test_launch_ipfs_daemon
webui_hash="QmXdu7HWdV6CUaUabd9q2ZeA4iHZLVyDRj3Gi4dsJsWjbr" port=$PORT_GWAY
apiport=$PORT_API
# TODO check both 5001 and 5002. # TODO check both 5001 and 5002.
# 5001 should have a readable gateway (part of the API) # 5001 should have a readable gateway (part of the API)
@ -24,7 +25,7 @@ webui_hash="QmXdu7HWdV6CUaUabd9q2ZeA4iHZLVyDRj3Gi4dsJsWjbr"
test_expect_success "GET IPFS path succeeds" ' test_expect_success "GET IPFS path succeeds" '
echo "Hello Worlds!" > expected && echo "Hello Worlds!" > expected &&
HASH=`ipfs add -q expected` && HASH=`ipfs add -q expected` &&
wget "http://127.0.0.1:5002/ipfs/$HASH" -O actual wget "http://127.0.0.1:$port/ipfs/$HASH" -O actual
' '
test_expect_success "GET IPFS path output looks good" ' test_expect_success "GET IPFS path output looks good" '
@ -36,11 +37,11 @@ test_expect_success "GET IPFS directory path succeeds" '
mkdir dir && mkdir dir &&
echo "12345" > dir/test && echo "12345" > dir/test &&
HASH2=`ipfs add -r -q dir | tail -n 1` && HASH2=`ipfs add -r -q dir | tail -n 1` &&
wget "http://127.0.0.1:5002/ipfs/$HASH2" wget "http://127.0.0.1:$port/ipfs/$HASH2"
' '
test_expect_success "GET IPFS directory file succeeds" ' test_expect_success "GET IPFS directory file succeeds" '
wget "http://127.0.0.1:5002/ipfs/$HASH2/test" -O actual wget "http://127.0.0.1:$port/ipfs/$HASH2/test" -O actual
' '
test_expect_success "GET IPFS directory file output looks good" ' test_expect_success "GET IPFS directory file output looks good" '
@ -50,7 +51,7 @@ test_expect_success "GET IPFS directory file output looks good" '
test_expect_failure "GET IPNS path succeeds" ' test_expect_failure "GET IPNS path succeeds" '
ipfs name publish "$HASH" && ipfs name publish "$HASH" &&
NAME=`ipfs config Identity.PeerID` && NAME=`ipfs config Identity.PeerID` &&
wget "http://127.0.0.1:5002/ipns/$NAME" -O actual wget "http://127.0.0.1:$port/ipns/$NAME" -O actual
' '
test_expect_failure "GET IPNS path output looks good" ' test_expect_failure "GET IPNS path output looks good" '
@ -58,24 +59,24 @@ test_expect_failure "GET IPNS path output looks good" '
' '
test_expect_success "GET invalid IPFS path errors" ' test_expect_success "GET invalid IPFS path errors" '
test_must_fail wget http://127.0.0.1:5002/ipfs/12345 test_must_fail wget http://127.0.0.1:$port/ipfs/12345
' '
test_expect_success "GET invalid path errors" ' test_expect_success "GET invalid path errors" '
test_must_fail wget http://127.0.0.1:5002/12345 test_must_fail wget http://127.0.0.1:$port/12345
' '
test_expect_success "GET /webui returns code expected" ' test_expect_success "GET /webui returns code expected" '
echo "HTTP/1.1 302 Found" | head -c 18 > expected && echo "HTTP/1.1 302 Found" | head -c 18 > expected &&
echo "HTTP/1.1 301 Moved Permanently" | head -c 18 > also_ok && echo "HTTP/1.1 301 Moved Permanently" | head -c 18 > also_ok &&
curl -I http://127.0.0.1:5001/webui | head -c 18 > actual1 && curl -I http://127.0.0.1:$apiport/webui | head -c 18 > actual1 &&
(test_cmp expected actual1 || test_cmp actual1 also_ok) && (test_cmp expected actual1 || test_cmp actual1 also_ok) &&
rm actual1 rm actual1
' '
test_expect_success "GET /webui/ returns code expected" ' test_expect_success "GET /webui/ returns code expected" '
curl -I http://127.0.0.1:5001/webui/ | head -c 18 > actual2 && curl -I http://127.0.0.1:$apiport/webui/ | head -c 18 > actual2 &&
(test_cmp expected actual2 || test_cmp actual2 also_ok) && (test_cmp expected actual2 || test_cmp actual2 also_ok) &&
rm expected && rm expected &&
rm also_ok && rm also_ok &&

View File

@ -9,35 +9,41 @@ test_description="Test HTTP Gateway (Writable)"
. lib/test-lib.sh . lib/test-lib.sh
test_init_ipfs test_init_ipfs
test_config_ipfs_gateway_writable "/ip4/0.0.0.0/tcp/5002" test_config_ipfs_gateway_writable $ADDR_GWAY
test_launch_ipfs_daemon test_launch_ipfs_daemon
test_expect_success "ipfs daemon listening to TCP port 5002" ' port=$PORT_GWAY
test_wait_open_tcp_port_10_sec 5002
test_expect_success "ipfs daemon listening to TCP port $port" '
test_wait_open_tcp_port_10_sec "$PORT_GWAY"
' '
test_expect_success "HTTP gateway gives access to sample file" ' test_expect_success "HTTP gateway gives access to sample file" '
curl -s -o welcome "http://localhost:5002/ipfs/$HASH_WELCOME_DOCS/readme" && curl -s -o welcome "http://localhost:$PORT_GWAY/ipfs/$HASH_WELCOME_DOCS/readme" &&
grep "Hello and Welcome to IPFS!" welcome grep "Hello and Welcome to IPFS!" welcome
' '
test_expect_success "HTTP POST file gives Hash" ' test_expect_success "HTTP POST file gives Hash" '
echo "$RANDOM" >infile && echo "$RANDOM" >infile &&
URL="http://localhost:5002/ipfs/" && URL="http://localhost:$port/ipfs/" &&
curl -svX POST --data-binary @infile "$URL" 2>curl.out && curl -svX POST --data-binary @infile "$URL" 2>curl.out &&
grep "HTTP/1.1 201 Created" curl.out && grep "HTTP/1.1 201 Created" curl.out &&
LOCATION=$(grep Location curl.out) && LOCATION=$(grep Location curl.out) &&
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)\s") HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)$")
' '
test_expect_success "We can HTTP GET file just created" ' # this is failing on osx
URL="http://localhost:5002/ipfs/$HASH" && # claims "multihash too short. must be > 3 bytes" but the multihash is there.
test_expect_failure "We can HTTP GET file just created" '
URL="http://localhost:$port/ipfs/$HASH" &&
curl -so outfile "$URL" && curl -so outfile "$URL" &&
test_cmp infile outfile test_cmp infile outfile ||
echo $URL &&
test_fsh cat outfile
' '
test_expect_success "HTTP PUT empty directory" ' test_expect_success "HTTP PUT empty directory" '
URL="http://localhost:5002/ipfs/$HASH_EMPTY_DIR/" && URL="http://localhost:$port/ipfs/$HASH_EMPTY_DIR/" &&
echo "PUT $URL" && echo "PUT $URL" &&
curl -svX PUT "$URL" 2>curl.out && curl -svX PUT "$URL" 2>curl.out &&
cat curl.out && cat curl.out &&
@ -54,7 +60,7 @@ test_expect_success "HTTP GET empty directory" '
test_expect_success "HTTP PUT file to construct a hierarchy" ' test_expect_success "HTTP PUT file to construct a hierarchy" '
echo "$RANDOM" >infile && echo "$RANDOM" >infile &&
URL="http://localhost:5002/ipfs/$HASH_EMPTY_DIR/test.txt" && URL="http://localhost:$port/ipfs/$HASH_EMPTY_DIR/test.txt" &&
echo "PUT $URL" && echo "PUT $URL" &&
curl -svX PUT --data-binary @infile "$URL" 2>curl.out && curl -svX PUT --data-binary @infile "$URL" 2>curl.out &&
grep "HTTP/1.1 201 Created" curl.out && grep "HTTP/1.1 201 Created" curl.out &&
@ -63,7 +69,7 @@ test_expect_success "HTTP PUT file to construct a hierarchy" '
' '
test_expect_success "We can HTTP GET file just created" ' test_expect_success "We can HTTP GET file just created" '
URL="http://localhost:5002/ipfs/$HASH/test.txt" && URL="http://localhost:$port/ipfs/$HASH/test.txt" &&
echo "GET $URL" && echo "GET $URL" &&
curl -so outfile "$URL" && curl -so outfile "$URL" &&
test_cmp infile outfile test_cmp infile outfile
@ -71,7 +77,7 @@ test_expect_success "We can HTTP GET file just created" '
test_expect_success "HTTP PUT file to append to existing hierarchy" ' test_expect_success "HTTP PUT file to append to existing hierarchy" '
echo "$RANDOM" >infile2 && echo "$RANDOM" >infile2 &&
URL="http://localhost:5002/ipfs/$HASH/test/test.txt" && URL="http://localhost:$port/ipfs/$HASH/test/test.txt" &&
echo "PUT $URL" && echo "PUT $URL" &&
curl -svX PUT --data-binary @infile2 "$URL" 2>curl.out && curl -svX PUT --data-binary @infile2 "$URL" 2>curl.out &&
grep "HTTP/1.1 201 Created" curl.out && grep "HTTP/1.1 201 Created" curl.out &&
@ -79,12 +85,13 @@ test_expect_success "HTTP PUT file to append to existing hierarchy" '
HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)/test/test.txt") HASH=$(expr "$LOCATION" : "< Location: /ipfs/\(.*\)/test/test.txt")
' '
test_expect_success "We can HTTP GET file just created" ' test_expect_success "We can HTTP GET file just created" '
URL="http://localhost:5002/ipfs/$HASH/test/test.txt" && URL="http://localhost:$port/ipfs/$HASH/test/test.txt" &&
echo "GET $URL" && echo "GET $URL" &&
curl -so outfile2 "$URL" && curl -so outfile2 "$URL" &&
test_cmp infile2 outfile2 && test_cmp infile2 outfile2 &&
URL="http://localhost:5002/ipfs/$HASH/test.txt" && URL="http://localhost:$port/ipfs/$HASH/test.txt" &&
echo "GET $URL" && echo "GET $URL" &&
curl -so outfile "$URL" && curl -so outfile "$URL" &&
test_cmp infile outfile test_cmp infile outfile