1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-27 16:07:42 +08:00

Merge pull request #478 from jbenet/relay

Stream relaying
This commit is contained in:
Juan Batiz-Benet
2015-01-04 00:46:39 -08:00
158 changed files with 3336 additions and 1954 deletions

14
Godeps/Godeps.json generated
View File

@ -21,13 +21,13 @@
},
{
"ImportPath": "code.google.com/p/go.crypto/blowfish",
"Comment": "null-219",
"Rev": "00a7d3b31bbab5795b4a51933c04fc2768242970"
"Comment": "null-236",
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
},
{
"ImportPath": "code.google.com/p/go.crypto/sha3",
"Comment": "null-219",
"Rev": "00a7d3b31bbab5795b4a51933c04fc2768242970"
"Comment": "null-236",
"Rev": "69e2a90ed92d03812364aeb947b7068dc42e561e"
},
{
"ImportPath": "code.google.com/p/go.net/context",
@ -118,7 +118,7 @@
},
{
"ImportPath": "github.com/jbenet/go-msgio",
"Rev": "5e7289d3a0cd046a5bee30b187cc844c31f54dce"
"Rev": "dbae89193876910c736b2ce1291fa8bbcf299d77"
},
{
"ImportPath": "github.com/jbenet/go-multiaddr",
@ -131,8 +131,8 @@
},
{
"ImportPath": "github.com/jbenet/go-multihash",
"Comment": "0.1.0-5-g1976046",
"Rev": "1976046c2b0db0b668791b3e541d76a38b7c1af7"
"Comment": "0.1.0-19-g8ce5cb1",
"Rev": "8ce5cb1b82e1b4c1bea1fdf3cd467ef49301734e"
},
{
"ImportPath": "github.com/jbenet/go-peerstream",

View File

@ -0,0 +1,68 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
//
// Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
//
// Security strengths
//
// The SHA3-x functions have a security strength against preimage attacks of x
// bits. Since they only produce x bits of output, their collision-resistance
// is only x/2 bits.
//
// The SHAKE-x functions have a generic security strength of x bits against
// all attacks, provided that at least 2x bits of their output is used.
// Requesting more than 2x bits of output does not increase the collision-
// resistance of the SHAKE functions.
//
//
// The sponge construction
//
// A sponge builds a pseudo-random function from a pseudo-random permutation,
// by applying the permutation to a state of "rate + capacity" bytes, but
// hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// has thus been "filled up" and the permutation is applied. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge by the same method, except that
// output is copied out.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
//
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that security_strength == (1600 - bitrate) / 2.
//
//
// Recommendations, detailed
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks.
//
// The Keccak team recommends SHAKE256 for most applications upgrading from
// SHA2-512. (NIST chose a much stronger, but much slower, sponge instance
// for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3

View File

@ -0,0 +1,65 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"hash"
)
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}

Binary file not shown.

View File

@ -1,16 +1,11 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file implements the core Keccak permutation function necessary for computing SHA3.
// This is implemented in a separate file to allow for replacement by an optimized implementation.
// Nothing in this package is exported.
// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/).
// rc stores the round constants for use in the ι step.
var rc = [...]uint64{
var rc = [24]uint64{
0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
@ -37,129 +32,379 @@ var rc = [...]uint64{
0x8000000080008008,
}
// keccakF computes the complete Keccak-f function consisting of 24 rounds with a different
// constant (rc) in each round. This implementation fully unrolls the round function to avoid
// inner loops, as well as pre-calculating shift offsets.
func keccakF(a *[numLanes]uint64) {
var t, bc0, bc1, bc2, bc3, bc4 uint64
for _, roundConstant := range rc {
// θ step
// keccakF1600 applies the Keccak permutation to a 1600b-wide
// state represented as a slice of 25 uint64s.
func keccakF1600(a *[25]uint64) {
// Implementation translated from Keccak-inplace.c
// in the keccak reference code.
var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
for i := 0; i < 24; i += 4 {
// Combines the 5 steps in each round into 2 steps.
// Unrolls 4 rounds per loop and spreads some steps across rounds.
// Round 1
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
t = bc4 ^ (bc1<<1 ^ bc1>>63)
a[0] ^= t
a[5] ^= t
a[10] ^= t
a[15] ^= t
a[20] ^= t
t = bc0 ^ (bc2<<1 ^ bc2>>63)
a[1] ^= t
a[6] ^= t
a[11] ^= t
a[16] ^= t
a[21] ^= t
t = bc1 ^ (bc3<<1 ^ bc3>>63)
a[2] ^= t
a[7] ^= t
a[12] ^= t
a[17] ^= t
a[22] ^= t
t = bc2 ^ (bc4<<1 ^ bc4>>63)
a[3] ^= t
a[8] ^= t
a[13] ^= t
a[18] ^= t
a[23] ^= t
t = bc3 ^ (bc0<<1 ^ bc0>>63)
a[4] ^= t
a[9] ^= t
a[14] ^= t
a[19] ^= t
a[24] ^= t
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
// ρ and π steps
t = a[1]
t, a[10] = a[10], t<<1^t>>(64-1)
t, a[7] = a[7], t<<3^t>>(64-3)
t, a[11] = a[11], t<<6^t>>(64-6)
t, a[17] = a[17], t<<10^t>>(64-10)
t, a[18] = a[18], t<<15^t>>(64-15)
t, a[3] = a[3], t<<21^t>>(64-21)
t, a[5] = a[5], t<<28^t>>(64-28)
t, a[16] = a[16], t<<36^t>>(64-36)
t, a[8] = a[8], t<<45^t>>(64-45)
t, a[21] = a[21], t<<55^t>>(64-55)
t, a[24] = a[24], t<<2^t>>(64-2)
t, a[4] = a[4], t<<14^t>>(64-14)
t, a[15] = a[15], t<<27^t>>(64-27)
t, a[23] = a[23], t<<41^t>>(64-41)
t, a[19] = a[19], t<<56^t>>(64-56)
t, a[13] = a[13], t<<8^t>>(64-8)
t, a[12] = a[12], t<<25^t>>(64-25)
t, a[2] = a[2], t<<43^t>>(64-43)
t, a[20] = a[20], t<<62^t>>(64-62)
t, a[14] = a[14], t<<18^t>>(64-18)
t, a[22] = a[22], t<<39^t>>(64-39)
t, a[9] = a[9], t<<61^t>>(64-61)
t, a[6] = a[6], t<<20^t>>(64-20)
a[1] = t<<44 ^ t>>(64-44)
bc0 = a[0] ^ d0
t = a[6] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[12] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[18] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[24] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
a[6] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
// χ step
bc0 = a[0]
bc1 = a[1]
bc2 = a[2]
bc3 = a[3]
bc4 = a[4]
a[0] ^= bc2 &^ bc1
a[1] ^= bc3 &^ bc2
a[2] ^= bc4 &^ bc3
a[3] ^= bc0 &^ bc4
a[4] ^= bc1 &^ bc0
bc0 = a[5]
bc1 = a[6]
bc2 = a[7]
bc3 = a[8]
bc4 = a[9]
a[5] ^= bc2 &^ bc1
a[6] ^= bc3 &^ bc2
a[7] ^= bc4 &^ bc3
a[8] ^= bc0 &^ bc4
a[9] ^= bc1 &^ bc0
bc0 = a[10]
bc1 = a[11]
bc2 = a[12]
bc3 = a[13]
bc4 = a[14]
a[10] ^= bc2 &^ bc1
a[11] ^= bc3 &^ bc2
a[12] ^= bc4 &^ bc3
a[13] ^= bc0 &^ bc4
a[14] ^= bc1 &^ bc0
bc0 = a[15]
bc1 = a[16]
bc2 = a[17]
bc3 = a[18]
bc4 = a[19]
a[15] ^= bc2 &^ bc1
a[16] ^= bc3 &^ bc2
a[17] ^= bc4 &^ bc3
a[18] ^= bc0 &^ bc4
a[19] ^= bc1 &^ bc0
bc0 = a[20]
bc1 = a[21]
bc2 = a[22]
bc3 = a[23]
bc4 = a[24]
a[20] ^= bc2 &^ bc1
a[21] ^= bc3 &^ bc2
a[22] ^= bc4 &^ bc3
a[23] ^= bc0 &^ bc4
a[24] ^= bc1 &^ bc0
t = a[10] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[16] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[22] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[3] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[10] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
// ι step
a[0] ^= roundConstant
t = a[20] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[1] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[7] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[19] ^ d4
bc3 = t<<8 | t>>(64-8)
a[20] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[11] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[23] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[4] ^ d4
bc0 = t<<27 | t>>(64-27)
a[5] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[2] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[8] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[14] ^ d4
bc2 = t<<39 | t>>(64-39)
a[15] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
// Round 2
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[16] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[7] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[23] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[14] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
a[16] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[11] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[2] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[18] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[20] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[6] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[22] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[4] ^ d4
bc3 = t<<8 | t>>(64-8)
a[15] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[1] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[8] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[24] ^ d4
bc0 = t<<27 | t>>(64-27)
a[10] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[12] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[3] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[19] ^ d4
bc2 = t<<39 | t>>(64-39)
a[5] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
// Round 3
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[11] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[22] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[8] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[19] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
a[11] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[1] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[12] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[23] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[15] = bc0 ^ (bc2 &^ bc1)
a[1] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[16] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[2] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[24] ^ d4
bc3 = t<<8 | t>>(64-8)
a[5] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[6] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[3] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[14] ^ d4
bc0 = t<<27 | t>>(64-27)
a[20] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[7] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[18] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[4] ^ d4
bc2 = t<<39 | t>>(64-39)
a[10] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
// Round 4
bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
d0 = bc4 ^ (bc1<<1 | bc1>>63)
d1 = bc0 ^ (bc2<<1 | bc2>>63)
d2 = bc1 ^ (bc3<<1 | bc3>>63)
d3 = bc2 ^ (bc4<<1 | bc4>>63)
d4 = bc3 ^ (bc0<<1 | bc0>>63)
bc0 = a[0] ^ d0
t = a[1] ^ d1
bc1 = t<<44 | t>>(64-44)
t = a[2] ^ d2
bc2 = t<<43 | t>>(64-43)
t = a[3] ^ d3
bc3 = t<<21 | t>>(64-21)
t = a[4] ^ d4
bc4 = t<<14 | t>>(64-14)
a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
a[1] = bc1 ^ (bc3 &^ bc2)
a[2] = bc2 ^ (bc4 &^ bc3)
a[3] = bc3 ^ (bc0 &^ bc4)
a[4] = bc4 ^ (bc1 &^ bc0)
t = a[5] ^ d0
bc2 = t<<3 | t>>(64-3)
t = a[6] ^ d1
bc3 = t<<45 | t>>(64-45)
t = a[7] ^ d2
bc4 = t<<61 | t>>(64-61)
t = a[8] ^ d3
bc0 = t<<28 | t>>(64-28)
t = a[9] ^ d4
bc1 = t<<20 | t>>(64-20)
a[5] = bc0 ^ (bc2 &^ bc1)
a[6] = bc1 ^ (bc3 &^ bc2)
a[7] = bc2 ^ (bc4 &^ bc3)
a[8] = bc3 ^ (bc0 &^ bc4)
a[9] = bc4 ^ (bc1 &^ bc0)
t = a[10] ^ d0
bc4 = t<<18 | t>>(64-18)
t = a[11] ^ d1
bc0 = t<<1 | t>>(64-1)
t = a[12] ^ d2
bc1 = t<<6 | t>>(64-6)
t = a[13] ^ d3
bc2 = t<<25 | t>>(64-25)
t = a[14] ^ d4
bc3 = t<<8 | t>>(64-8)
a[10] = bc0 ^ (bc2 &^ bc1)
a[11] = bc1 ^ (bc3 &^ bc2)
a[12] = bc2 ^ (bc4 &^ bc3)
a[13] = bc3 ^ (bc0 &^ bc4)
a[14] = bc4 ^ (bc1 &^ bc0)
t = a[15] ^ d0
bc1 = t<<36 | t>>(64-36)
t = a[16] ^ d1
bc2 = t<<10 | t>>(64-10)
t = a[17] ^ d2
bc3 = t<<15 | t>>(64-15)
t = a[18] ^ d3
bc4 = t<<56 | t>>(64-56)
t = a[19] ^ d4
bc0 = t<<27 | t>>(64-27)
a[15] = bc0 ^ (bc2 &^ bc1)
a[16] = bc1 ^ (bc3 &^ bc2)
a[17] = bc2 ^ (bc4 &^ bc3)
a[18] = bc3 ^ (bc0 &^ bc4)
a[19] = bc4 ^ (bc1 &^ bc0)
t = a[20] ^ d0
bc3 = t<<41 | t>>(64-41)
t = a[21] ^ d1
bc4 = t<<2 | t>>(64-2)
t = a[22] ^ d2
bc0 = t<<62 | t>>(64-62)
t = a[23] ^ d3
bc1 = t<<55 | t>>(64-55)
t = a[24] ^ d4
bc2 = t<<39 | t>>(64-39)
a[20] = bc0 ^ (bc2 &^ bc1)
a[21] = bc1 ^ (bc3 &^ bc2)
a[22] = bc2 ^ (bc4 &^ bc3)
a[23] = bc3 ^ (bc0 &^ bc4)
a[24] = bc4 ^ (bc1 &^ bc0)
}
}

View File

@ -0,0 +1,18 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package sha3
import (
"crypto"
)
func init() {
crypto.RegisterHash(crypto.SHA3_224, New224)
crypto.RegisterHash(crypto.SHA3_256, New256)
crypto.RegisterHash(crypto.SHA3_384, New384)
crypto.RegisterHash(crypto.SHA3_512, New512)
}

View File

@ -1,213 +1,226 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA3 hash algorithm (formerly called Keccak) chosen by NIST in 2012.
// This file provides a SHA3 implementation which implements the standard hash.Hash interface.
// Writing input data, including padding, and reading output data are computed in this file.
// Note that the current implementation can compute the hash of an integral number of bytes only.
// This is a consequence of the hash interface in which a buffer of bytes is passed in.
// The internals of the Keccak-f function are computed in keccakf.go.
// For the detailed specification, refer to the Keccak web site (http://keccak.noekeon.org/).
package sha3
import (
"encoding/binary"
"hash"
)
// laneSize is the size in bytes of each "lane" of the internal state of SHA3 (5 * 5 * 8).
// Note that changing this size would requires using a type other than uint64 to store each lane.
const laneSize = 8
// spongeDirection indicates the direction bytes are flowing through the sponge.
type spongeDirection int
// sliceSize represents the dimensions of the internal state, a square matrix of
// sliceSize ** 2 lanes. This is the size of both the "rows" and "columns" dimensions in the
// terminology of the SHA3 specification.
const sliceSize = 5
const (
// spongeAbsorbing indicates that the sponge is absorbing input.
spongeAbsorbing spongeDirection = iota
// spongeSqueezing indicates that the sponge is being squeezed.
spongeSqueezing
)
// numLanes represents the total number of lanes in the state.
const numLanes = sliceSize * sliceSize
const (
// maxRate is the maximum size of the internal buffer. SHAKE-256
// currently needs the largest buffer.
maxRate = 168
)
// stateSize is the size in bytes of the internal state of SHA3 (5 * 5 * WSize).
const stateSize = laneSize * numLanes
type state struct {
// Generic sponge components.
a [25]uint64 // main state of the hash
buf []byte // points into storage
rate int // the number of bytes of state to use
// digest represents the partial evaluation of a checksum.
// Note that capacity, and not outputSize, is the critical security parameter, as SHA3 can output
// an arbitrary number of bytes for any given capacity. The Keccak proposal recommends that
// capacity = 2*outputSize to ensure that finding a collision of size outputSize requires
// O(2^{outputSize/2}) computations (the birthday lower bound). Future standards may modify the
// capacity/outputSize ratio to allow for more output with lower cryptographic security.
type digest struct {
a [numLanes]uint64 // main state of the hash
outputSize int // desired output size in bytes
capacity int // number of bytes to leave untouched during squeeze/absorb
absorbed int // number of bytes absorbed thus far
// dsbyte contains the "domain separation" value and the first bit of
// the padding. In sections 6.1 and 6.2 of [1], the SHA-3 and SHAKE
// functions are defined with bits appended to the message: SHA-3
// functions have 01 and SHAKE functions have 1111. Because of the way
// that bits are numbered from the LSB upwards, that ends up as
// 00000010b and 00001111b, respectively. Then the padding rule from
// section 5.1 is applied to pad to a multiple of the rate, which
// involves adding a 1 bit, zero or more zero bits and then a final one
// bit. The first one bit from the padding is merged into the dsbyte
// value giving 00000110b (0x06) and 00011111b (0x1f), respectively.
//
// [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf,
dsbyte byte
storage [maxRate]byte
// Specific to SHA-3 and SHAKE.
fixedOutput bool // whether this is a fixed-ouput-length instance
outputLen int // the default output size in bytes
state spongeDirection // current direction of the sponge
}
// minInt returns the lesser of two integer arguments, to simplify the absorption routine.
func minInt(v1, v2 int) int {
if v1 <= v2 {
return v1
}
return v2
}
// BlockSize returns the rate of sponge underlying this hash function.
func (d *state) BlockSize() int { return d.rate }
// rate returns the number of bytes of the internal state which can be absorbed or squeezed
// in between calls to the permutation function.
func (d *digest) rate() int {
return stateSize - d.capacity
}
// Size returns the output size of the hash function in bytes.
func (d *state) Size() int { return d.outputLen }
// Reset clears the internal state by zeroing bytes in the state buffer.
// This can be skipped for a newly-created hash state; the default zero-allocated state is correct.
func (d *digest) Reset() {
d.absorbed = 0
// Reset clears the internal state by zeroing the sponge state and
// the byte buffer, and setting Sponge.state to absorbing.
func (d *state) Reset() {
// Zero the permutation's state.
for i := range d.a {
d.a[i] = 0
}
d.state = spongeAbsorbing
d.buf = d.storage[:0]
}
// BlockSize, required by the hash.Hash interface, does not have a standard intepretation
// for a sponge-based construction like SHA3. We return the data rate: the number of bytes which
// can be absorbed per invocation of the permutation function. For Merkle-Damgård based hashes
// (ie SHA1, SHA2, MD5) the output size of the internal compression function is returned.
// We consider this to be roughly equivalent because it represents the number of bytes of output
// produced per cryptographic operation.
func (d *digest) BlockSize() int { return d.rate() }
// Size returns the output size of the hash function in bytes.
func (d *digest) Size() int {
return d.outputSize
}
// unalignedAbsorb is a helper function for Write, which absorbs data that isn't aligned with an
// 8-byte lane. This requires shifting the individual bytes into position in a uint64.
func (d *digest) unalignedAbsorb(p []byte) {
var t uint64
for i := len(p) - 1; i >= 0; i-- {
t <<= 8
t |= uint64(p[i])
func (d *state) clone() *state {
ret := *d
if ret.state == spongeAbsorbing {
ret.buf = ret.storage[:len(ret.buf)]
} else {
ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
}
offset := (d.absorbed) % d.rate()
t <<= 8 * uint(offset%laneSize)
d.a[offset/laneSize] ^= t
d.absorbed += len(p)
return &ret
}
// Write "absorbs" bytes into the state of the SHA3 hash, updating as needed when the sponge
// "fills up" with rate() bytes. Since lanes are stored internally as type uint64, this requires
// converting the incoming bytes into uint64s using a little endian interpretation. This
// implementation is optimized for large, aligned writes of multiples of 8 bytes (laneSize).
// Non-aligned or uneven numbers of bytes require shifting and are slower.
func (d *digest) Write(p []byte) (int, error) {
// An initial offset is needed if the we aren't absorbing to the first lane initially.
offset := d.absorbed % d.rate()
toWrite := len(p)
// xorIn xors a buffer into the state, byte-swapping to
// little-endian as necessary; it returns the number of bytes
// copied, including any zeros appended to the bytestring.
func (d *state) xorIn(buf []byte) {
n := len(buf) / 8
// The first lane may need to absorb unaligned and/or incomplete data.
if (offset%laneSize != 0 || len(p) < 8) && len(p) > 0 {
toAbsorb := minInt(laneSize-(offset%laneSize), len(p))
d.unalignedAbsorb(p[:toAbsorb])
p = p[toAbsorb:]
offset = (d.absorbed) % d.rate()
for i := 0; i < n; i++ {
a := binary.LittleEndian.Uint64(buf)
d.a[i] ^= a
buf = buf[8:]
}
if len(buf) != 0 {
// XOR in the last partial ulint64.
a := uint64(0)
for i, v := range buf {
a |= uint64(v) << uint64(8*i)
}
d.a[n] ^= a
}
}
// For every rate() bytes absorbed, the state must be permuted via the F Function.
if (d.absorbed)%d.rate() == 0 {
keccakF(&d.a)
// copyOut copies ulint64s to a byte buffer.
func (d *state) copyOut(b []byte) {
for i := 0; len(b) >= 8; i++ {
binary.LittleEndian.PutUint64(b, d.a[i])
b = b[8:]
}
}
// permute applies the KeccakF-1600 permutation. It handles
// any input-output buffering.
func (d *state) permute() {
switch d.state {
case spongeAbsorbing:
// If we're absorbing, we need to xor the input into the state
// before applying the permutation.
d.xorIn(d.buf)
d.buf = d.storage[:0]
keccakF1600(&d.a)
case spongeSqueezing:
// If we're squeezing, we need to apply the permutatin before
// copying more output.
keccakF1600(&d.a)
d.buf = d.storage[:d.rate]
d.copyOut(d.buf)
}
}
// pads appends the domain separation bits in dsbyte, applies
// the multi-bitrate 10..1 padding rule, and permutes the state.
func (d *state) padAndPermute(dsbyte byte) {
if d.buf == nil {
d.buf = d.storage[:0]
}
// Pad with this instance's domain-separator bits. We know that there's
// at least one byte of space in d.buf because, if it were full,
// permute would have been called to empty it. dsbyte also contains the
// first one bit for the padding. See the comment in the state struct.
d.buf = append(d.buf, dsbyte)
zerosStart := len(d.buf)
d.buf = d.storage[:d.rate]
for i := zerosStart; i < d.rate; i++ {
d.buf[i] = 0
}
// This adds the final one bit for the padding. Because of the way that
// bits are numbered from the LSB upwards, the final bit is the MSB of
// the last byte.
d.buf[d.rate-1] ^= 0x80
// Apply the permutation
d.permute()
d.state = spongeSqueezing
d.buf = d.storage[:d.rate]
d.copyOut(d.buf)
}
// Write absorbs more data into the hash's state. It produces an error
// if more data is written to the ShakeHash after writing
func (d *state) Write(p []byte) (written int, err error) {
if d.state != spongeAbsorbing {
panic("sha3: write to sponge after read")
}
if d.buf == nil {
d.buf = d.storage[:0]
}
written = len(p)
for len(p) > 0 {
if len(d.buf) == 0 && len(p) >= d.rate {
// The fast path; absorb a full "rate" bytes of input and apply the permutation.
d.xorIn(p[:d.rate])
p = p[d.rate:]
keccakF1600(&d.a)
} else {
// The slow path; buffer the input until we can fill the sponge, and then xor it in.
todo := d.rate - len(d.buf)
if todo > len(p) {
todo = len(p)
}
d.buf = append(d.buf, p[:todo]...)
p = p[todo:]
// If the sponge is full, apply the permutation.
if len(d.buf) == d.rate {
d.permute()
}
}
}
// This loop should absorb the bulk of the data into full, aligned lanes.
// It will call the update function as necessary.
for len(p) > 7 {
firstLane := offset / laneSize
lastLane := minInt(d.rate()/laneSize, firstLane+len(p)/laneSize)
// This inner loop absorbs input bytes into the state in groups of 8, converted to uint64s.
for lane := firstLane; lane < lastLane; lane++ {
d.a[lane] ^= binary.LittleEndian.Uint64(p[:laneSize])
p = p[laneSize:]
}
d.absorbed += (lastLane - firstLane) * laneSize
// For every rate() bytes absorbed, the state must be permuted via the F Function.
if (d.absorbed)%d.rate() == 0 {
keccakF(&d.a)
}
offset = 0
}
// If there are insufficient bytes to fill the final lane, an unaligned absorption.
// This should always start at a correct lane boundary though, or else it would be caught
// by the uneven opening lane case above.
if len(p) > 0 {
d.unalignedAbsorb(p)
}
return toWrite, nil
return
}
// pad computes the SHA3 padding scheme based on the number of bytes absorbed.
// The padding is a 1 bit, followed by an arbitrary number of 0s and then a final 1 bit, such that
// the input bits plus padding bits are a multiple of rate(). Adding the padding simply requires
// xoring an opening and closing bit into the appropriate lanes.
func (d *digest) pad() {
offset := d.absorbed % d.rate()
// The opening pad bit must be shifted into position based on the number of bytes absorbed
padOpenLane := offset / laneSize
d.a[padOpenLane] ^= 0x0000000000000001 << uint(8*(offset%laneSize))
// The closing padding bit is always in the last position
padCloseLane := (d.rate() / laneSize) - 1
d.a[padCloseLane] ^= 0x8000000000000000
}
// finalize prepares the hash to output data by padding and one final permutation of the state.
func (d *digest) finalize() {
d.pad()
keccakF(&d.a)
}
// squeeze outputs an arbitrary number of bytes from the hash state.
// Squeezing can require multiple calls to the F function (one per rate() bytes squeezed),
// although this is not the case for standard SHA3 parameters. This implementation only supports
// squeezing a single time, subsequent squeezes may lose alignment. Future implementations
// may wish to support multiple squeeze calls, for example to support use as a PRNG.
func (d *digest) squeeze(in []byte, toSqueeze int) []byte {
// Because we read in blocks of laneSize, we need enough room to read
// an integral number of lanes
needed := toSqueeze + (laneSize-toSqueeze%laneSize)%laneSize
if cap(in)-len(in) < needed {
newIn := make([]byte, len(in), len(in)+needed)
copy(newIn, in)
in = newIn
// Read squeezes an arbitrary number of bytes from the sponge.
func (d *state) Read(out []byte) (n int, err error) {
// If we're still absorbing, pad and apply the permutation.
if d.state == spongeAbsorbing {
d.padAndPermute(d.dsbyte)
}
out := in[len(in) : len(in)+needed]
n = len(out)
// Now, do the squeezing.
for len(out) > 0 {
for i := 0; i < d.rate() && len(out) > 0; i += laneSize {
binary.LittleEndian.PutUint64(out[:], d.a[i/laneSize])
out = out[laneSize:]
}
if len(out) > 0 {
keccakF(&d.a)
n := copy(out, d.buf)
d.buf = d.buf[n:]
out = out[n:]
// Apply the permutation if we've squeezed the sponge dry.
if len(d.buf) == 0 {
d.permute()
}
}
return in[:len(in)+toSqueeze] // Re-slice in case we wrote extra data.
return
}
// Sum applies padding to the hash state and then squeezes out the desired nubmer of output bytes.
func (d *digest) Sum(in []byte) []byte {
// Make a copy of the original hash so that caller can keep writing and summing.
dup := *d
dup.finalize()
return dup.squeeze(in, dup.outputSize)
// Sum applies padding to the hash state and then squeezes out the desired
// number of output bytes.
func (d *state) Sum(in []byte) []byte {
// Make a copy of the original hash so that caller can keep writing
// and summing.
dup := d.clone()
hash := make([]byte, dup.outputLen)
dup.Read(hash)
return append(in, hash...)
}
// The NewKeccakX constructors enable initializing a hash in any of the four recommend sizes
// from the Keccak specification, all of which set capacity=2*outputSize. Note that the final
// NIST standard for SHA3 may specify different input/output lengths.
// The output size is indicated in bits but converted into bytes internally.
func NewKeccak224() hash.Hash { return &digest{outputSize: 224 / 8, capacity: 2 * 224 / 8} }
func NewKeccak256() hash.Hash { return &digest{outputSize: 256 / 8, capacity: 2 * 256 / 8} }
func NewKeccak384() hash.Hash { return &digest{outputSize: 384 / 8, capacity: 2 * 384 / 8} }
func NewKeccak512() hash.Hash { return &digest{outputSize: 512 / 8, capacity: 2 * 512 / 8} }

View File

@ -1,34 +1,56 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// These tests are a subset of those provided by the Keccak web site(http://keccak.noekeon.org/).
// Tests include all the ShortMsgKATs provided by the Keccak team at
// https://github.com/gvanas/KeccakCodePackage
//
// They only include the zero-bit case of the utterly useless bitwise
// testvectors published by NIST in the draft of FIPS-202.
import (
"bytes"
"compress/flate"
"encoding/hex"
"fmt"
"encoding/json"
"hash"
"os"
"strings"
"testing"
)
// testDigests maintains a digest state of each standard type.
var testDigests = map[string]*digest{
"Keccak224": {outputSize: 224 / 8, capacity: 2 * 224 / 8},
"Keccak256": {outputSize: 256 / 8, capacity: 2 * 256 / 8},
"Keccak384": {outputSize: 384 / 8, capacity: 2 * 384 / 8},
"Keccak512": {outputSize: 512 / 8, capacity: 2 * 512 / 8},
const (
testString = "brekeccakkeccak koax koax"
katFilename = "keccakKats.json.deflate"
)
// Internal-use instances of SHAKE used to test against KATs.
func newHashShake128() hash.Hash {
return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
}
func newHashShake256() hash.Hash {
return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
}
// testVector represents a test input and expected outputs from multiple algorithm variants.
type testVector struct {
desc string
input []byte
repeat int // input will be concatenated the input this many times.
want map[string]string
// testDigests contains functions returning hash.Hash instances
// with output-length equal to the KAT length for both SHA-3 and
// SHAKE instances.
var testDigests = map[string]func() hash.Hash{
"SHA3-224": New224,
"SHA3-256": New256,
"SHA3-384": New384,
"SHA3-512": New512,
"SHAKE128": newHashShake128,
"SHAKE256": newHashShake256,
}
// testShakes contains functions returning ShakeHash instances for
// testing the ShakeHash-specific interface.
var testShakes = map[string]func() ShakeHash{
"SHAKE128": NewShake128,
"SHAKE256": NewShake256,
}
// decodeHex converts an hex-encoded string into a raw byte string.
@ -40,102 +62,61 @@ func decodeHex(s string) []byte {
return b
}
// shortTestVectors stores a series of short testVectors.
// Inputs of 8, 248, and 264 bits from http://keccak.noekeon.org/ are included below.
// The standard defines additional test inputs of all sizes between 0 and 2047 bits.
// Because the current implementation can only handle an integral number of bytes,
// most of the standard test inputs can't be used.
var shortKeccakTestVectors = []testVector{
{
desc: "short-8b",
input: decodeHex("CC"),
repeat: 1,
want: map[string]string{
"Keccak224": "A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802",
"Keccak256": "EEAD6DBFC7340A56CAEDC044696A168870549A6A7F6F56961E84A54BD9970B8A",
"Keccak384": "1B84E62A46E5A201861754AF5DC95C4A1A69CAF4A796AE405680161E29572641F5FA1E8641D7958336EE7B11C58F73E9",
"Keccak512": "8630C13CBD066EA74BBE7FE468FEC1DEE10EDC1254FB4C1B7C5FD69B646E44160B8CE01D05A0908CA790DFB080F4B513BC3B6225ECE7A810371441A5AC666EB9",
},
},
{
desc: "short-248b",
input: decodeHex("84FB51B517DF6C5ACCB5D022F8F28DA09B10232D42320FFC32DBECC3835B29"),
repeat: 1,
want: map[string]string{
"Keccak224": "81AF3A7A5BD4C1F948D6AF4B96F93C3B0CF9C0E7A6DA6FCD71EEC7F6",
"Keccak256": "D477FB02CAAA95B3280EC8EE882C29D9E8A654B21EF178E0F97571BF9D4D3C1C",
"Keccak384": "503DCAA4ADDA5A9420B2E436DD62D9AB2E0254295C2982EF67FCE40F117A2400AB492F7BD5D133C6EC2232268BC27B42",
"Keccak512": "9D8098D8D6EDBBAA2BCFC6FB2F89C3EAC67FEC25CDFE75AA7BD570A648E8C8945FF2EC280F6DCF73386109155C5BBC444C707BB42EAB873F5F7476657B1BC1A8",
},
},
{
desc: "short-264b",
input: decodeHex("DE8F1B3FAA4B7040ED4563C3B8E598253178E87E4D0DF75E4FF2F2DEDD5A0BE046"),
repeat: 1,
want: map[string]string{
"Keccak224": "F217812E362EC64D4DC5EACFABC165184BFA456E5C32C2C7900253D0",
"Keccak256": "E78C421E6213AFF8DE1F025759A4F2C943DB62BBDE359C8737E19B3776ED2DD2",
"Keccak384": "CF38764973F1EC1C34B5433AE75A3AAD1AAEF6AB197850C56C8617BCD6A882F6666883AC17B2DCCDBAA647075D0972B5",
"Keccak512": "9A7688E31AAF40C15575FC58C6B39267AAD3722E696E518A9945CF7F7C0FEA84CB3CB2E9F0384A6B5DC671ADE7FB4D2B27011173F3EEEAF17CB451CF26542031",
},
},
}
// longTestVectors stores longer testVectors (currently only one).
// The computed test vector is 64 MiB long and is a truncated version of the
// ExtremelyLongMsgKAT taken from http://keccak.noekeon.org/.
var longKeccakTestVectors = []testVector{
{
desc: "long-64MiB",
input: []byte("abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmno"),
repeat: 1024 * 1024,
want: map[string]string{
"Keccak224": "50E35E40980FEEFF1EA490957B0E970257F75EA0D410EE0F0B8A7A58",
"Keccak256": "5015A4935F0B51E091C6550A94DCD262C08998232CCAA22E7F0756DEAC0DC0D0",
"Keccak384": "7907A8D0FAA7BC6A90FE14C6C958C956A0877E751455D8F13ACDB96F144B5896E716C06EC0CB56557A94EF5C3355F6F3",
"Keccak512": "3EC327D6759F769DEB74E80CA70C831BC29CAB048A4BF4190E4A1DD5C6507CF2B4B58937FDE81D36014E7DFE1B1DD8B0F27CB7614F9A645FEC114F1DAAEFC056",
},
},
}
// TestKeccakVectors checks that correct output is produced for a set of known testVectors.
func TestKeccakVectors(t *testing.T) {
testCases := append([]testVector{}, shortKeccakTestVectors...)
if !testing.Short() {
testCases = append(testCases, longKeccakTestVectors...)
// structs used to marshal JSON test-cases.
type KeccakKats struct {
Kats map[string][]struct {
Digest string `json:"digest"`
Length int64 `json:"length"`
Message string `json:"message"`
}
for _, tc := range testCases {
for alg, want := range tc.want {
d := testDigests[alg]
}
// TestKeccakKats tests the SHA-3 and Shake implementations against all the
// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
// (The testvectors are stored in keccakKats.json.deflate due to their length.)
func TestKeccakKats(t *testing.T) {
// Read the KATs.
deflated, err := os.Open(katFilename)
if err != nil {
t.Errorf("Error opening %s: %s", katFilename, err)
}
file := flate.NewReader(deflated)
dec := json.NewDecoder(file)
var katSet KeccakKats
err = dec.Decode(&katSet)
if err != nil {
t.Errorf("%s", err)
}
// Do the KATs.
for functionName, kats := range katSet.Kats {
d := testDigests[functionName]()
t.Logf("%s", functionName)
for _, kat := range kats {
d.Reset()
for i := 0; i < tc.repeat; i++ {
d.Write(tc.input)
in, err := hex.DecodeString(kat.Message)
if err != nil {
t.Errorf("%s", err)
}
d.Write(in[:kat.Length/8])
got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
want := kat.Digest
if got != want {
t.Errorf("%s, alg=%s\ngot %q, want %q", tc.desc, alg, got, want)
t.Errorf("function=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
functionName, kat.Length, kat.Message, got, want)
t.Logf("wanted %+v", kat)
t.FailNow()
}
}
}
}
// dumpState is a debugging function to pretty-print the internal state of the hash.
func (d *digest) dumpState() {
fmt.Printf("SHA3 hash, %d B output, %d B capacity (%d B rate)\n", d.outputSize, d.capacity, d.rate())
fmt.Printf("Internal state after absorbing %d B:\n", d.absorbed)
for x := 0; x < sliceSize; x++ {
for y := 0; y < sliceSize; y++ {
fmt.Printf("%v, ", d.a[x*sliceSize+y])
}
fmt.Println("")
}
}
// TestUnalignedWrite tests that writing data in an arbitrary pattern with small input buffers.
// TestUnalignedWrite tests that writing data in an arbitrary pattern with
// small input buffers.
func TestUnalignedWrite(t *testing.T) {
buf := sequentialBytes(0x10000)
for alg, d := range testDigests {
for alg, df := range testDigests {
d := df()
d.Reset()
d.Write(buf)
want := d.Sum(nil)
@ -145,7 +126,9 @@ func TestUnalignedWrite(t *testing.T) {
// Because 137 is prime this sequence should exercise all corner cases.
offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
for _, j := range offsets {
j = minInt(j, len(buf)-i)
if v := len(buf) - i; v < j {
j = v
}
d.Write(buf[i : i+j])
i += j
}
@ -157,8 +140,9 @@ func TestUnalignedWrite(t *testing.T) {
}
}
// Test that appending works when reallocation is necessary.
func TestAppend(t *testing.T) {
d := NewKeccak224()
d := New224()
for capacity := 2; capacity < 64; capacity += 64 {
// The first time around the loop, Sum will have to reallocate.
@ -167,24 +151,57 @@ func TestAppend(t *testing.T) {
d.Reset()
d.Write([]byte{0xcc})
buf = d.Sum(buf)
expected := "0000A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802"
expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
t.Errorf("got %s, want %s", got, expected)
}
}
}
// Test that appending works when no reallocation is necessary.
func TestAppendNoRealloc(t *testing.T) {
buf := make([]byte, 1, 200)
d := NewKeccak224()
d := New224()
d.Write([]byte{0xcc})
buf = d.Sum(buf)
expected := "00A9CAB59EB40A10B246290F2D6086E32E3689FAF1D26B470C899F2802"
expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
t.Errorf("got %s, want %s", got, expected)
}
}
// TestSqueezing checks that squeezing the full output a single time produces
// the same output as repeatedly squeezing the instance.
func TestSqueezing(t *testing.T) {
for functionName, newShakeHash := range testShakes {
t.Logf("%s", functionName)
d0 := newShakeHash()
d0.Write([]byte(testString))
ref := make([]byte, 32)
d0.Read(ref)
d1 := newShakeHash()
d1.Write([]byte(testString))
var multiple []byte
for _ = range ref {
one := make([]byte, 1)
d1.Read(one)
multiple = append(multiple, one...)
}
if !bytes.Equal(ref, multiple) {
t.Errorf("squeezing %d bytes one at a time failed", len(ref))
}
}
}
func TestReadSimulation(t *testing.T) {
d := NewShake256()
d.Write(nil)
dwr := make([]byte, 32)
d.Read(dwr)
}
// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
func sequentialBytes(size int) []byte {
result := make([]byte, size)
@ -194,77 +211,39 @@ func sequentialBytes(size int) []byte {
return result
}
// benchmarkBlockWrite tests the speed of writing data and never calling the permutation function.
func benchmarkBlockWrite(b *testing.B, d *digest) {
b.StopTimer()
d.Reset()
// Write all but the last byte of a block, to ensure that the permutation is not called.
data := sequentialBytes(d.rate() - 1)
b.SetBytes(int64(len(data)))
b.StartTimer()
for i := 0; i < b.N; i++ {
d.absorbed = 0 // Reset absorbed to avoid ever calling the permutation function
d.Write(data)
}
b.StopTimer()
d.Reset()
}
// BenchmarkPermutationFunction measures the speed of the permutation function with no input data.
// BenchmarkPermutationFunction measures the speed of the permutation function
// with no input data.
func BenchmarkPermutationFunction(b *testing.B) {
b.SetBytes(int64(stateSize))
var lanes [numLanes]uint64
b.SetBytes(int64(200))
var lanes [25]uint64
for i := 0; i < b.N; i++ {
keccakF(&lanes)
keccakF1600(&lanes)
}
}
// BenchmarkSingleByteWrite tests the latency from writing a single byte
func BenchmarkSingleByteWrite(b *testing.B) {
b.StopTimer()
d := testDigests["Keccak512"]
d.Reset()
data := sequentialBytes(1) //1 byte buffer
b.SetBytes(int64(d.rate()) - 1)
b.StartTimer()
for i := 0; i < b.N; i++ {
d.absorbed = 0 // Reset absorbed to avoid ever calling the permutation function
// Write all but the last byte of a block, one byte at a time.
for j := 0; j < d.rate()-1; j++ {
d.Write(data)
}
}
b.StopTimer()
d.Reset()
}
// BenchmarkSingleByteX measures the block write speed for each size of the digest.
func BenchmarkBlockWrite512(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak512"]) }
func BenchmarkBlockWrite384(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak384"]) }
func BenchmarkBlockWrite256(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak256"]) }
func BenchmarkBlockWrite224(b *testing.B) { benchmarkBlockWrite(b, testDigests["Keccak224"]) }
// benchmarkBulkHash tests the speed to hash a 16 KiB buffer.
func benchmarkBulkHash(b *testing.B, h hash.Hash) {
// benchmarkBulkHash tests the speed to hash a buffer of buflen.
func benchmarkBulkHash(b *testing.B, h hash.Hash, size int) {
b.StopTimer()
h.Reset()
size := 1 << 14
data := sequentialBytes(size)
b.SetBytes(int64(size))
b.StartTimer()
var digest []byte
var state []byte
for i := 0; i < b.N; i++ {
h.Write(data)
digest = h.Sum(digest[:0])
state = h.Sum(state[:0])
}
b.StopTimer()
h.Reset()
}
// benchmarkBulkKeccakX test the speed to hash a 16 KiB buffer by calling benchmarkBulkHash.
func BenchmarkBulkKeccak512(b *testing.B) { benchmarkBulkHash(b, NewKeccak512()) }
func BenchmarkBulkKeccak384(b *testing.B) { benchmarkBulkHash(b, NewKeccak384()) }
func BenchmarkBulkKeccak256(b *testing.B) { benchmarkBulkHash(b, NewKeccak256()) }
func BenchmarkBulkKeccak224(b *testing.B) { benchmarkBulkHash(b, NewKeccak224()) }
func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkBulkHash(b, New512(), 1350) }
func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkBulkHash(b, New384(), 1350) }
func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkBulkHash(b, New256(), 1350) }
func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkBulkHash(b, New224(), 1350) }
func BenchmarkShake256_MTU(b *testing.B) { benchmarkBulkHash(b, newHashShake256(), 1350) }
func BenchmarkShake128_MTU(b *testing.B) { benchmarkBulkHash(b, newHashShake128(), 1350) }
func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkBulkHash(b, New512(), 1<<20) }
func BenchmarkShake256_1MiB(b *testing.B) { benchmarkBulkHash(b, newHashShake256(), 1<<20) }

View File

@ -0,0 +1,60 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file defines the ShakeHash interface, and provides
// functions for creating SHAKE instances, as well as utility
// functions for hashing bytes to arbitrary-length output.
import (
"io"
)
// ShakeHash defines the interface to hash functions that
// support arbitrary-length output.
type ShakeHash interface {
// Write absorbs more data into the hash's state. It panics if input is
// written to it after output has been read from it.
io.Writer
// Read reads more output from the hash; reading affects the hash's
// state. (ShakeHash.Read is thus very different from Hash.Sum)
// It never returns an error.
io.Reader
// Clone returns a copy of the ShakeHash in its current state.
Clone() ShakeHash
// Reset resets the ShakeHash to its initial state.
Reset()
}
func (d *state) Clone() ShakeHash {
return d.clone()
}
// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 128 bits against all attacks if at
// least 32 bytes of its output are used.
func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
// Its generic security strength is 256 bits against all attacks if
// at least 64 bytes of its output are used.
func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
// ShakeSum128 writes an arbitrary-length digest of data into hash.
func ShakeSum128(hash, data []byte) {
h := NewShake128()
h.Write(data)
h.Read(hash)
}
// ShakeSum256 writes an arbitrary-length digest of data into hash.
func ShakeSum256(hash, data []byte) {
h := NewShake256()
h.Write(data)
h.Read(hash)
}

View File

@ -15,6 +15,24 @@ func TestReadWrite(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewWriter(buf)
reader := NewReader(buf)
SubtestReadWrite(t, writer, reader)
}
func TestReadWriteMsg(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewWriter(buf)
reader := NewReader(buf)
SubtestReadWriteMsg(t, writer, reader)
}
func TestReadWriteMsgSync(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewWriter(buf)
reader := NewReader(buf)
SubtestReadWriteMsgSync(t, writer, reader)
}
func SubtestReadWrite(t *testing.T, writer WriteCloser, reader ReadCloser) {
msgs := [1000][]byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
@ -58,10 +76,7 @@ func TestReadWrite(t *testing.T) {
}
}
func TestReadWriteMsg(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewWriter(buf)
reader := NewReader(buf)
func SubtestReadWriteMsg(t *testing.T, writer WriteCloser, reader ReadCloser) {
msgs := [1000][]byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
@ -100,10 +115,7 @@ func TestReadWriteMsg(t *testing.T) {
}
}
func TestReadWriteMsgSync(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewWriter(buf)
reader := NewReader(buf)
func SubtestReadWriteMsgSync(t *testing.T, writer WriteCloser, reader ReadCloser) {
msgs := [1000][]byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))

View File

@ -0,0 +1,182 @@
package msgio
import (
"encoding/binary"
"io"
"sync"
mpool "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio/mpool"
)
// varintWriter is the underlying type that implements the Writer interface.
type varintWriter struct {
W io.Writer
lbuf []byte // for encoding varints
lock sync.Locker // for threadsafe writes
}
// NewVarintWriter wraps an io.Writer with a varint msgio framed writer.
// The msgio.Writer will write the length prefix of every message written
// as a varint, using https://golang.org/pkg/encoding/binary/#PutUvarint
func NewVarintWriter(w io.Writer) WriteCloser {
return &varintWriter{
W: w,
lbuf: make([]byte, binary.MaxVarintLen64),
lock: new(sync.Mutex),
}
}
func (s *varintWriter) Write(msg []byte) (int, error) {
err := s.WriteMsg(msg)
if err != nil {
return 0, err
}
return len(msg), nil
}
func (s *varintWriter) WriteMsg(msg []byte) error {
s.lock.Lock()
defer s.lock.Unlock()
length := uint64(len(msg))
n := binary.PutUvarint(s.lbuf, length)
if _, err := s.W.Write(s.lbuf[:n]); err != nil {
return err
}
_, err := s.W.Write(msg)
return err
}
func (s *varintWriter) Close() error {
s.lock.Lock()
defer s.lock.Unlock()
if c, ok := s.W.(io.Closer); ok {
return c.Close()
}
return nil
}
// varintReader is the underlying type that implements the Reader interface.
type varintReader struct {
R io.Reader
br io.ByteReader // for reading varints.
lbuf []byte
next int
pool *mpool.Pool
lock sync.Locker
}
// NewVarintReader wraps an io.Reader with a varint msgio framed reader.
// The msgio.Reader will read whole messages at a time (using the length).
// Varints read according to https://golang.org/pkg/encoding/binary/#ReadUvarint
// Assumes an equivalent writer on the other side.
func NewVarintReader(r io.Reader) ReadCloser {
return NewVarintReaderWithPool(r, &mpool.ByteSlicePool)
}
// NewVarintReaderWithPool wraps an io.Reader with a varint msgio framed reader.
// The msgio.Reader will read whole messages at a time (using the length).
// Varints read according to https://golang.org/pkg/encoding/binary/#ReadUvarint
// Assumes an equivalent writer on the other side. It uses a given mpool.Pool
func NewVarintReaderWithPool(r io.Reader, p *mpool.Pool) ReadCloser {
if p == nil {
panic("nil pool")
}
return &varintReader{
R: r,
br: &simpleByteReader{R: r},
lbuf: make([]byte, binary.MaxVarintLen64),
next: -1,
pool: p,
lock: new(sync.Mutex),
}
}
// NextMsgLen reads the length of the next msg into s.lbuf, and returns it.
// WARNING: like Read, NextMsgLen is destructive. It reads from the internal
// reader.
func (s *varintReader) NextMsgLen() (int, error) {
s.lock.Lock()
defer s.lock.Unlock()
return s.nextMsgLen()
}
func (s *varintReader) nextMsgLen() (int, error) {
if s.next == -1 {
length, err := binary.ReadUvarint(s.br)
if err != nil {
return 0, err
}
s.next = int(length)
}
return s.next, nil
}
func (s *varintReader) Read(msg []byte) (int, error) {
s.lock.Lock()
defer s.lock.Unlock()
length, err := s.nextMsgLen()
if err != nil {
return 0, err
}
if length > len(msg) {
return 0, io.ErrShortBuffer
}
_, err = io.ReadFull(s.R, msg[:length])
s.next = -1 // signal we've consumed this msg
return length, err
}
func (s *varintReader) ReadMsg() ([]byte, error) {
s.lock.Lock()
defer s.lock.Unlock()
length, err := s.nextMsgLen()
if err != nil {
return nil, err
}
msgb := s.pool.Get(uint32(length))
if msgb == nil {
return nil, io.ErrShortBuffer
}
msg := msgb.([]byte)[:length]
_, err = io.ReadFull(s.R, msg)
s.next = -1 // signal we've consumed this msg
return msg, err
}
func (s *varintReader) ReleaseMsg(msg []byte) {
s.pool.Put(uint32(cap(msg)), msg)
}
func (s *varintReader) Close() error {
s.lock.Lock()
defer s.lock.Unlock()
if c, ok := s.R.(io.Closer); ok {
return c.Close()
}
return nil
}
type simpleByteReader struct {
R io.Reader
buf []byte
}
func (r *simpleByteReader) ReadByte() (c byte, err error) {
if r.buf == nil {
r.buf = make([]byte, 1)
}
if _, err := io.ReadFull(r.R, r.buf); err != nil {
return 0, err
}
return r.buf[0], nil
}

View File

@ -0,0 +1,66 @@
package msgio
import (
"bytes"
"encoding/binary"
"testing"
)
func TestVarintReadWrite(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewVarintWriter(buf)
reader := NewVarintReader(buf)
SubtestReadWrite(t, writer, reader)
}
func TestVarintReadWriteMsg(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewVarintWriter(buf)
reader := NewVarintReader(buf)
SubtestReadWriteMsg(t, writer, reader)
}
func TestVarintReadWriteMsgSync(t *testing.T) {
buf := bytes.NewBuffer(nil)
writer := NewVarintWriter(buf)
reader := NewVarintReader(buf)
SubtestReadWriteMsgSync(t, writer, reader)
}
func TestVarintWrite(t *testing.T) {
SubtestVarintWrite(t, []byte("hello world"))
SubtestVarintWrite(t, []byte("hello world hello world hello world"))
SubtestVarintWrite(t, make([]byte, 1<<20))
SubtestVarintWrite(t, []byte(""))
}
func SubtestVarintWrite(t *testing.T, msg []byte) {
buf := bytes.NewBuffer(nil)
writer := NewVarintWriter(buf)
if err := writer.WriteMsg(msg); err != nil {
t.Fatal(err)
}
bb := buf.Bytes()
sbr := simpleByteReader{R: buf}
length, err := binary.ReadUvarint(&sbr)
if err != nil {
t.Fatal(err)
}
t.Logf("checking varint is %d", len(msg))
if int(length) != len(msg) {
t.Fatalf("incorrect varint: %d != %d", length, len(msg))
}
lbuf := make([]byte, binary.MaxVarintLen64)
n := binary.PutUvarint(lbuf, length)
bblen := int(length) + n
t.Logf("checking wrote (%d + %d) bytes", length, n)
if len(bb) != bblen {
t.Fatalf("wrote incorrect number of bytes: %d != %d", len(bb), bblen)
}
}

View File

@ -0,0 +1,79 @@
package multihash
import (
"fmt"
"io"
)
// Reader is an io.Reader wrapper that exposes a function
// to read a whole multihash, parse it, and return it.
type Reader interface {
io.Reader
ReadMultihash() (Multihash, error)
}
// Writer is an io.Writer wrapper that exposes a function
// to write a whole multihash.
type Writer interface {
io.Writer
WriteMultihash(Multihash) error
}
// NewReader wraps an io.Reader with a multihash.Reader
func NewReader(r io.Reader) Reader {
return &mhReader{r}
}
// NewWriter wraps an io.Writer with a multihash.Writer
func NewWriter(w io.Writer) Writer {
return &mhWriter{w}
}
type mhReader struct {
r io.Reader
}
func (r *mhReader) Read(buf []byte) (n int, err error) {
return r.r.Read(buf)
}
func (r *mhReader) ReadMultihash() (Multihash, error) {
mhhdr := make([]byte, 2)
if _, err := io.ReadFull(r.r, mhhdr); err != nil {
return nil, err
}
// first byte is the algo, the second is the length.
// (varints someday...)
length := uint(mhhdr[1])
if length > 127 {
return nil, fmt.Errorf("varints not yet supported (length is %d)", length)
}
buf := make([]byte, length+2)
buf[0] = mhhdr[0]
buf[1] = mhhdr[1]
if _, err := io.ReadFull(r.r, buf[2:]); err != nil {
return nil, err
}
return Cast(buf)
}
type mhWriter struct {
w io.Writer
}
func (w *mhWriter) Write(buf []byte) (n int, err error) {
return w.w.Write(buf)
}
func (w *mhWriter) WriteMultihash(m Multihash) error {
_, err := w.w.Write([]byte(m))
return err
}

View File

@ -0,0 +1,69 @@
package multihash
import (
"bytes"
"io"
"testing"
)
func TestReader(t *testing.T) {
var buf bytes.Buffer
for _, tc := range testCases {
m, err := tc.Multihash()
if err != nil {
t.Fatal(err)
}
buf.Write([]byte(m))
}
r := NewReader(&buf)
for _, tc := range testCases {
h, err := tc.Multihash()
if err != nil {
t.Fatal(err)
}
h2, err := r.ReadMultihash()
if err != nil {
t.Error(err)
continue
}
if !bytes.Equal(h, h2) {
t.Error("h and h2 should be equal")
}
}
}
func TestWriter(t *testing.T) {
var buf bytes.Buffer
w := NewWriter(&buf)
for _, tc := range testCases {
m, err := tc.Multihash()
if err != nil {
t.Error(err)
continue
}
if err := w.WriteMultihash(m); err != nil {
t.Error(err)
continue
}
buf2 := make([]byte, len(m))
if _, err := io.ReadFull(&buf, buf2); err != nil {
t.Error(err)
continue
}
if !bytes.Equal(m, buf2) {
t.Error("m and buf2 should be equal")
}
}
}

View File

@ -2,43 +2,67 @@ package multihash
import (
"encoding/hex"
"errors"
"fmt"
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
)
// errors
var (
ErrUnknownCode = errors.New("unknown multihash code")
ErrTooShort = errors.New("multihash too short. must be > 3 bytes")
ErrTooLong = errors.New("multihash too long. must be < 129 bytes")
ErrLenNotSupported = errors.New("multihash does not yet support digests longer than 127 bytes")
)
// ErrInconsistentLen is returned when a decoded multihash has an inconsistent length
type ErrInconsistentLen struct {
dm *DecodedMultihash
}
func (e ErrInconsistentLen) Error() string {
return fmt.Sprintf("multihash length inconsistent: %v", e.dm)
}
// constants
const SHA1 = 0x11
const SHA2_256 = 0x12
const SHA2_512 = 0x13
const SHA3 = 0x14
const BLAKE2B = 0x40
const BLAKE2S = 0x41
const (
SHA1 = 0x11
SHA2_256 = 0x12
SHA2_512 = 0x13
SHA3 = 0x14
BLAKE2B = 0x40
BLAKE2S = 0x41
)
// Names maps the name of a hash to the code
var Names = map[string]int{
"sha1": 0x11,
"sha2-256": 0x12,
"sha2-512": 0x13,
"sha3": 0x14,
"blake2b": 0x40,
"blake2s": 0x41,
"sha1": SHA1,
"sha2-256": SHA2_256,
"sha2-512": SHA2_512,
"sha3": SHA3,
"blake2b": BLAKE2B,
"blake2s": BLAKE2S,
}
// Codes maps a hash code to it's name
var Codes = map[int]string{
0x11: "sha1",
0x12: "sha2-256",
0x13: "sha2-512",
0x14: "sha3",
0x40: "blake2b",
0x41: "blake2s",
SHA1: "sha1",
SHA2_256: "sha2-256",
SHA2_512: "sha2-512",
SHA3: "sha3",
BLAKE2B: "blake2b",
BLAKE2S: "blake2s",
}
// DefaultLengths maps a hash code to it's default length
var DefaultLengths = map[int]int{
0x11: 20,
0x12: 32,
0x13: 64,
0x14: 64,
0x40: 64,
0x41: 32,
SHA1: 20,
SHA2_256: 32,
SHA2_512: 64,
SHA3: 64,
BLAKE2B: 64,
BLAKE2S: 32,
}
type DecodedMultihash struct {
@ -50,8 +74,12 @@ type DecodedMultihash struct {
type Multihash []byte
func (m Multihash) HexString() string {
return hex.EncodeToString([]byte(m))
func (m *Multihash) HexString() string {
return hex.EncodeToString([]byte(*m))
}
func (m *Multihash) String() string {
return m.HexString()
}
func FromHexString(s string) (Multihash, error) {
@ -88,21 +116,21 @@ func Cast(buf []byte) (Multihash, error) {
}
if !ValidCode(dm.Code) {
return Multihash{}, fmt.Errorf("unknown multihash code")
return Multihash{}, ErrUnknownCode
}
return Multihash(buf), nil
}
// Decodes a hash from the given Multihash.
// Decode a hash from the given Multihash.
func Decode(buf []byte) (*DecodedMultihash, error) {
if len(buf) < 3 {
return nil, fmt.Errorf("multihash too short. must be > 3 bytes.")
return nil, ErrTooShort
}
if len(buf) > 129 {
return nil, fmt.Errorf("multihash too long. must be < 129 bytes.")
return nil, ErrTooLong
}
dm := &DecodedMultihash{
@ -113,23 +141,22 @@ func Decode(buf []byte) (*DecodedMultihash, error) {
}
if len(dm.Digest) != dm.Length {
return nil, fmt.Errorf("multihash length inconsistent: %v", dm)
return nil, ErrInconsistentLen{dm}
}
return dm, nil
}
// Encodes a hash digest along with the specified function code.
// Encode a hash digest along with the specified function code.
// Note: the length is derived from the length of the digest itself.
func Encode(buf []byte, code int) ([]byte, error) {
if !ValidCode(code) {
return nil, fmt.Errorf("unknown multihash code")
return nil, ErrUnknownCode
}
if len(buf) > 127 {
m := "multihash does not yet support digests longer than 127 bytes."
return nil, fmt.Errorf(m)
return nil, ErrLenNotSupported
}
pre := make([]byte, 2)
@ -142,7 +169,7 @@ func EncodeName(buf []byte, name string) ([]byte, error) {
return Encode(buf, Names[name])
}
// Checks whether a multihash code is valid.
// ValidCode checks whether a multihash code is valid.
func ValidCode(code int) bool {
if AppCode(code) {
return true
@ -155,7 +182,7 @@ func ValidCode(code int) bool {
return false
}
// Checks whether a multihash code is part of the App range.
// AppCode checks whether a multihash code is part of the App range.
func AppCode(code int) bool {
return code >= 0 && code < 0x10
}

View File

@ -3,6 +3,7 @@ package multihash
import (
"bytes"
"encoding/hex"
"fmt"
"testing"
)
@ -31,6 +32,19 @@ var testCases = []TestCase{
TestCase{"0beec7b5ea3f0fdbc9", 0x40, "blake2b"},
}
func (tc TestCase) Multihash() (Multihash, error) {
ob, err := hex.DecodeString(tc.hex)
if err != nil {
return nil, err
}
b := make([]byte, 2+len(ob))
b[0] = byte(uint8(tc.code))
b[1] = byte(uint8(len(ob)))
copy(b[2:], ob)
return Cast(b)
}
func TestEncode(t *testing.T) {
for _, tc := range testCases {
ob, err := hex.DecodeString(tc.hex)
@ -63,9 +77,28 @@ func TestEncode(t *testing.T) {
if !bytes.Equal(encN, nb) {
t.Error("encoded byte mismatch: ", encN, nb)
}
h, err := tc.Multihash()
if err != nil {
t.Error(err)
}
if !bytes.Equal(h, nb) {
t.Error("Multihash func mismatch.")
}
}
}
func ExampleEncodeName() {
// ignores errors for simplicity - don't do that at home.
buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
mhbuf, _ := EncodeName(buf, "sha1")
mhhex := hex.EncodeToString(mhbuf)
fmt.Printf("hex: %v\n", mhhex)
// Output:
// hex: 11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33
}
func TestDecode(t *testing.T) {
for _, tc := range testCases {
ob, err := hex.DecodeString(tc.hex)
@ -114,6 +147,18 @@ func TestTable(t *testing.T) {
}
}
func ExampleDecode() {
// ignores errors for simplicity - don't do that at home.
buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
mhbuf, _ := EncodeName(buf, "sha1")
o, _ := Decode(mhbuf)
mhhex := hex.EncodeToString(o.Digest)
fmt.Printf("obj: %v 0x%x %d %s\n", o.Name, o.Code, o.Length, mhhex)
// Output:
// obj: sha1 0x11 20 0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33
}
func TestValidCode(t *testing.T) {
for i := 0; i < 0xff; i++ {
_, ok := tCodes[i]
@ -127,7 +172,7 @@ func TestValidCode(t *testing.T) {
func TestAppCode(t *testing.T) {
for i := 0; i < 0xff; i++ {
b := i > 0 && i < 0x10
b := i >= 0 && i < 0x10
if AppCode(i) != b {
t.Error("AppCode incorrect for: ", i)
}
@ -190,3 +235,36 @@ func TestHex(t *testing.T) {
}
}
}
func BenchmarkEncode(b *testing.B) {
tc := testCases[0]
ob, err := hex.DecodeString(tc.hex)
if err != nil {
b.Error(err)
return
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
Encode(ob, tc.code)
}
}
func BenchmarkDecode(b *testing.B) {
tc := testCases[0]
ob, err := hex.DecodeString(tc.hex)
if err != nil {
b.Error(err)
return
}
pre := make([]byte, 2)
pre[0] = byte(uint8(tc.code))
pre[1] = byte(uint8(len(ob)))
nb := append(pre, ob...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
Decode(nb)
}
}

View File

@ -4,10 +4,14 @@ import (
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"errors"
"fmt"
sha3 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.crypto/sha3"
)
var ErrSumNotSupported = errors.New("Function not implemented. Complain to lib maintainer.")
func Sum(data []byte, code int, length int) (Multihash, error) {
m := Multihash{}
err := error(nil)
@ -26,7 +30,7 @@ func Sum(data []byte, code int, length int) (Multihash, error) {
case SHA3:
d, err = sumSHA3(data)
default:
return m, fmt.Errorf("Function not implemented. Complain to lib maintainer.")
return m, ErrSumNotSupported
}
if err != nil {
@ -60,7 +64,7 @@ func sumSHA512(data []byte) []byte {
}
func sumSHA3(data []byte) ([]byte, error) {
h := sha3.NewKeccak512()
h := sha3.New512()
if _, err := h.Write(data); err != nil {
return nil, err
}

View File

@ -57,3 +57,10 @@ func TestSum(t *testing.T) {
}
}
}
func BenchmarkSum(b *testing.B) {
tc := sumTestCases[0]
for i := 0; i < b.N; i++ {
Sum([]byte(tc.input), tc.code, tc.length)
}
}

View File

@ -1,21 +0,0 @@
package main
import (
"encoding/hex"
"fmt"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
)
func main() {
// ignores errors for simplicity.
// don't do that at home.
buf, _ := hex.DecodeString("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33")
mhbuf, _ := multihash.EncodeName(buf, "sha1")
mhhex := hex.EncodeToString(mhbuf)
fmt.Printf("hex: %v\n", mhhex)
o, _ := multihash.Decode(mhbuf)
mhhex = hex.EncodeToString(o.Digest)
fmt.Printf("obj: %v 0x%x %d %s\n", o.Name, o.Code, o.Length, mhhex)
}

Binary file not shown.

View File

@ -12,10 +12,10 @@ import (
cmds "github.com/jbenet/go-ipfs/commands"
config "github.com/jbenet/go-ipfs/config"
core "github.com/jbenet/go-ipfs/core"
ci "github.com/jbenet/go-ipfs/crypto"
imp "github.com/jbenet/go-ipfs/importer"
chunk "github.com/jbenet/go-ipfs/importer/chunk"
peer "github.com/jbenet/go-ipfs/peer"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
peer "github.com/jbenet/go-ipfs/p2p/peer"
repo "github.com/jbenet/go-ipfs/repo"
u "github.com/jbenet/go-ipfs/util"
debugerror "github.com/jbenet/go-ipfs/util/debugerror"

View File

@ -18,9 +18,9 @@ import (
"os/signal"
"syscall"
ci "github.com/jbenet/go-ipfs/crypto"
secio "github.com/jbenet/go-ipfs/crypto/secio"
peer "github.com/jbenet/go-ipfs/peer"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
secio "github.com/jbenet/go-ipfs/p2p/crypto/secio"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -6,7 +6,7 @@ import (
"os"
"path/filepath"
ic "github.com/jbenet/go-ipfs/crypto"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
u "github.com/jbenet/go-ipfs/util"
"github.com/jbenet/go-ipfs/util/debugerror"
)

View File

@ -8,8 +8,9 @@ import (
"time"
config "github.com/jbenet/go-ipfs/config"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
host "github.com/jbenet/go-ipfs/p2p/host"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
dht "github.com/jbenet/go-ipfs/routing/dht"
lgbl "github.com/jbenet/go-ipfs/util/eventlog/loggables"
math2 "github.com/jbenet/go-ipfs/util/math2"
@ -25,7 +26,7 @@ const (
)
func superviseConnections(parent context.Context,
n inet.Network,
h host.Host,
route *dht.IpfsDHT, // TODO depend on abstract interface for testing purposes
store peer.Peerstore,
peers []*config.BootstrapPeer) error {
@ -34,7 +35,7 @@ func superviseConnections(parent context.Context,
ctx, _ := context.WithTimeout(parent, connectiontimeout)
// TODO get config from disk so |peers| always reflects the latest
// information
if err := bootstrap(ctx, n, route, store, peers); err != nil {
if err := bootstrap(ctx, h, route, store, peers); err != nil {
log.Error(err)
}
select {
@ -47,30 +48,30 @@ func superviseConnections(parent context.Context,
}
func bootstrap(ctx context.Context,
n inet.Network,
h host.Host,
r *dht.IpfsDHT,
ps peer.Peerstore,
boots []*config.BootstrapPeer) error {
connectedPeers := n.Peers()
connectedPeers := h.Network().Peers()
if len(connectedPeers) >= recoveryThreshold {
log.Event(ctx, "bootstrapSkip", n.LocalPeer())
log.Event(ctx, "bootstrapSkip", h.ID())
log.Debugf("%s bootstrap skipped -- connected to %d (> %d) nodes",
n.LocalPeer(), len(connectedPeers), recoveryThreshold)
h.ID(), len(connectedPeers), recoveryThreshold)
return nil
}
numCxnsToCreate := recoveryThreshold - len(connectedPeers)
log.Event(ctx, "bootstrapStart", n.LocalPeer())
log.Debugf("%s bootstrapping to %d more nodes", n.LocalPeer(), numCxnsToCreate)
log.Event(ctx, "bootstrapStart", h.ID())
log.Debugf("%s bootstrapping to %d more nodes", h.ID(), numCxnsToCreate)
var bootstrapPeers []peer.PeerInfo
for _, bootstrap := range boots {
p, err := toPeer(bootstrap)
if err != nil {
log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err)
log.Event(ctx, "bootstrapError", h.ID(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", h.ID(), err)
return err
}
bootstrapPeers = append(bootstrapPeers, p)
@ -78,7 +79,7 @@ func bootstrap(ctx context.Context,
var notConnected []peer.PeerInfo
for _, p := range bootstrapPeers {
if n.Connectedness(p.ID) != inet.Connected {
if h.Network().Connectedness(p.ID) != inet.Connected {
notConnected = append(notConnected, p)
}
}
@ -86,17 +87,17 @@ func bootstrap(ctx context.Context,
if len(notConnected) < 1 {
s := "must bootstrap to %d more nodes, but already connected to all candidates"
err := fmt.Errorf(s, numCxnsToCreate)
log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err)
log.Event(ctx, "bootstrapError", h.ID(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", h.ID(), err)
return err
}
var randomSubset = randomSubsetOfPeers(notConnected, numCxnsToCreate)
log.Debugf("%s bootstrapping to %d nodes: %s", n.LocalPeer(), numCxnsToCreate, randomSubset)
log.Debugf("%s bootstrapping to %d nodes: %s", h.ID(), numCxnsToCreate, randomSubset)
if err := connect(ctx, ps, r, randomSubset); err != nil {
log.Event(ctx, "bootstrapError", n.LocalPeer(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", n.LocalPeer(), err)
log.Event(ctx, "bootstrapError", h.ID(), lgbl.Error(err))
log.Errorf("%s bootstrap error: %s", h.ID(), err)
return err
}
return nil

View File

@ -3,7 +3,7 @@ package core
import (
"testing"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)

View File

@ -11,8 +11,8 @@ import (
b58 "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-base58"
cmds "github.com/jbenet/go-ipfs/commands"
ic "github.com/jbenet/go-ipfs/crypto"
"github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
"github.com/jbenet/go-ipfs/p2p/peer"
kb "github.com/jbenet/go-ipfs/routing/kbucket"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -6,8 +6,8 @@ import (
cmds "github.com/jbenet/go-ipfs/commands"
core "github.com/jbenet/go-ipfs/core"
crypto "github.com/jbenet/go-ipfs/crypto"
nsys "github.com/jbenet/go-ipfs/namesys"
crypto "github.com/jbenet/go-ipfs/p2p/crypto"
u "github.com/jbenet/go-ipfs/util"
)
@ -53,7 +53,7 @@ Publish a <ref> to another public key:
args := req.Arguments()
if n.Network == nil {
if n.PeerHost == nil {
return nil, errNotOnline
}

View File

@ -47,7 +47,7 @@ Resolve te value of another name:
var name string
if n.Network == nil {
if n.PeerHost == nil {
return nil, errNotOnline
}

View File

@ -6,7 +6,7 @@ import (
"path"
cmds "github.com/jbenet/go-ipfs/commands"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
errors "github.com/jbenet/go-ipfs/util/debugerror"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
@ -51,11 +51,11 @@ ipfs swarm peers lists the set of peers this node is connected to.
return nil, err
}
if n.Network == nil {
if n.PeerHost == nil {
return nil, errNotOnline
}
conns := n.Network.Conns()
conns := n.PeerHost.Network().Conns()
addrs := make([]string, len(conns))
for i, c := range conns {
pid := c.RemotePeer()
@ -95,7 +95,7 @@ ipfs swarm connect /ip4/104.131.131.82/tcp/4001/QmaCpDMGvV2BGHeYERUEnRQAwe3N8Szb
addrs := req.Arguments()
if n.Network == nil {
if n.PeerHost == nil {
return nil, errNotOnline
}
@ -108,7 +108,7 @@ ipfs swarm connect /ip4/104.131.131.82/tcp/4001/QmaCpDMGvV2BGHeYERUEnRQAwe3N8Szb
for i, p := range peers {
output[i] = "connect " + p.Pretty()
err := n.Network.DialPeer(ctx, p)
err := n.PeerHost.Connect(ctx, peer.PeerInfo{ID: p})
if err != nil {
output[i] += " failure: " + err.Error()
} else {

View File

@ -11,7 +11,6 @@ import (
bstore "github.com/jbenet/go-ipfs/blocks/blockstore"
bserv "github.com/jbenet/go-ipfs/blockservice"
config "github.com/jbenet/go-ipfs/config"
ic "github.com/jbenet/go-ipfs/crypto"
diag "github.com/jbenet/go-ipfs/diagnostics"
exchange "github.com/jbenet/go-ipfs/exchange"
bitswap "github.com/jbenet/go-ipfs/exchange/bitswap"
@ -20,9 +19,12 @@ import (
mount "github.com/jbenet/go-ipfs/fuse/mount"
merkledag "github.com/jbenet/go-ipfs/merkledag"
namesys "github.com/jbenet/go-ipfs/namesys"
inet "github.com/jbenet/go-ipfs/net"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
p2phost "github.com/jbenet/go-ipfs/p2p/host"
p2pbhost "github.com/jbenet/go-ipfs/p2p/host/basic"
swarm "github.com/jbenet/go-ipfs/p2p/net/swarm"
peer "github.com/jbenet/go-ipfs/p2p/peer"
path "github.com/jbenet/go-ipfs/path"
peer "github.com/jbenet/go-ipfs/peer"
pin "github.com/jbenet/go-ipfs/pin"
routing "github.com/jbenet/go-ipfs/routing"
dht "github.com/jbenet/go-ipfs/routing/dht"
@ -52,7 +54,7 @@ type IpfsNode struct {
// Services
Peerstore peer.Peerstore // storage for other Peer instances
Network inet.Network // the network message stream
PeerHost p2phost.Host // the network host (server+client)
Routing routing.IpfsRouting // the routing system. recommend ipfs-dht
Exchange exchange.Interface // the block exchange + strategy (bitswap)
Blocks *bserv.BlockService // the block service, get/add blocks.
@ -121,16 +123,17 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
return nil, debugerror.Wrap(err)
}
n.Network, err = inet.NewNetwork(ctx, listenAddrs, n.Identity, n.Peerstore)
network, err := swarm.NewNetwork(ctx, listenAddrs, n.Identity, n.Peerstore)
if err != nil {
return nil, debugerror.Wrap(err)
}
n.AddChildGroup(n.Network.CtxGroup())
n.AddChildGroup(network.CtxGroup())
n.PeerHost = p2pbhost.New(network)
// explicitly set these as our listen addrs.
// (why not do it inside inet.NewNetwork? because this way we can
// listen on addresses without necessarily advertising those publicly.)
addrs, err := n.Network.InterfaceListenAddresses()
addrs, err := n.PeerHost.Network().InterfaceListenAddresses()
if err != nil {
return nil, debugerror.Wrap(err)
}
@ -138,10 +141,10 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
n.Peerstore.AddAddresses(n.Identity, addrs)
// setup diagnostics service
n.Diagnostics = diag.NewDiagnostics(n.Identity, n.Network)
n.Diagnostics = diag.NewDiagnostics(n.Identity, n.PeerHost)
// setup routing service
dhtRouting := dht.NewDHT(ctx, n.Identity, n.Network, n.Datastore)
dhtRouting := dht.NewDHT(ctx, n.PeerHost, n.Datastore)
dhtRouting.Validators[IpnsValidatorTag] = namesys.ValidateIpnsRecord
// TODO(brian): perform this inside NewDHT factory method
@ -150,7 +153,7 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
// setup exchange service
const alwaysSendToPeer = true // use YesManStrategy
bitswapNetwork := bsnet.NewFromIpfsNetwork(n.Network, n.Routing)
bitswapNetwork := bsnet.NewFromIpfsHost(n.PeerHost, n.Routing)
n.Exchange = bitswap.New(ctx, n.Identity, bitswapNetwork, blockstore, alwaysSendToPeer)
@ -160,7 +163,7 @@ func NewIpfsNode(ctx context.Context, cfg *config.Config, online bool) (n *IpfsN
// an Exchange, Network, or Routing component and have the constructor
// manage the wiring. In that scenario, this dangling function is a bit
// awkward.
go superviseConnections(ctx, n.Network, dhtRouting, n.Peerstore, n.Config.Bootstrap)
go superviseConnections(ctx, n.PeerHost, dhtRouting, n.Peerstore, n.Config.Bootstrap)
}
// TODO(brian): when offline instantiate the BlockService with a bitswap

View File

@ -7,16 +7,16 @@ import (
syncds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore/sync"
"github.com/jbenet/go-ipfs/blocks/blockstore"
blockservice "github.com/jbenet/go-ipfs/blockservice"
ci "github.com/jbenet/go-ipfs/crypto"
"github.com/jbenet/go-ipfs/exchange/offline"
mdag "github.com/jbenet/go-ipfs/merkledag"
nsys "github.com/jbenet/go-ipfs/namesys"
"github.com/jbenet/go-ipfs/net/mock"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
mocknet "github.com/jbenet/go-ipfs/p2p/net/mock"
peer "github.com/jbenet/go-ipfs/p2p/peer"
path "github.com/jbenet/go-ipfs/path"
peer "github.com/jbenet/go-ipfs/peer"
dht "github.com/jbenet/go-ipfs/routing/dht"
ds2 "github.com/jbenet/go-ipfs/util/datastore2"
"github.com/jbenet/go-ipfs/util/testutil"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
// TODO this is super sketch. Deprecate and initialize one that shares code
@ -44,16 +44,18 @@ func NewMockNode() (*IpfsNode, error) {
nd.Peerstore = peer.NewPeerstore()
nd.Peerstore.AddPrivKey(p, sk)
nd.Peerstore.AddPubKey(p, pk)
nd.Network, err = mocknet.New(ctx).AddPeer(sk, testutil.RandLocalTCPAddress()) // effectively offline
nd.PeerHost, err = mocknet.New(ctx).AddPeer(sk, testutil.RandLocalTCPAddress()) // effectively offline
if err != nil {
return nil, err
}
// Temp Datastore
dstore := ds.NewMapDatastore()
nd.Datastore = ds2.CloserWrap(syncds.MutexWrap(dstore))
// Routing
dht := dht.NewDHT(ctx, nd.Identity, nd.Network, nd.Datastore)
dht := dht.NewDHT(ctx, nd.PeerHost, nd.Datastore)
nd.Routing = dht
// Bitswap

View File

@ -17,21 +17,27 @@ import (
ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
host "github.com/jbenet/go-ipfs/p2p/host"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
pb "github.com/jbenet/go-ipfs/diagnostics/internal/pb"
net "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
util "github.com/jbenet/go-ipfs/util"
)
var log = util.Logger("diagnostics")
// ProtocolDiag is the diagnostics protocol.ID
var ProtocolDiag protocol.ID = "/ipfs/diagnostics"
const ResponseTimeout = time.Second * 10
// Diagnostics is a net service that manages requesting and responding to diagnostic
// requests
type Diagnostics struct {
network net.Network
self peer.ID
host host.Host
self peer.ID
diagLock sync.Mutex
diagMap map[string]time.Time
@ -39,15 +45,15 @@ type Diagnostics struct {
}
// NewDiagnostics instantiates a new diagnostics service running on the given network
func NewDiagnostics(self peer.ID, inet net.Network) *Diagnostics {
func NewDiagnostics(self peer.ID, h host.Host) *Diagnostics {
d := &Diagnostics{
network: inet,
host: h,
self: self,
birth: time.Now(),
diagMap: make(map[string]time.Time),
}
inet.SetHandler(net.ProtocolDiag, d.handleNewStream)
h.SetStreamHandler(ProtocolDiag, d.handleNewStream)
return d
}
@ -92,7 +98,7 @@ func (di *DiagInfo) Marshal() []byte {
}
func (d *Diagnostics) getPeers() []peer.ID {
return d.network.Peers()
return d.host.Network().Peers()
}
func (d *Diagnostics) getDiagInfo() *DiagInfo {
@ -101,10 +107,11 @@ func (d *Diagnostics) getDiagInfo() *DiagInfo {
di.ID = d.self.Pretty()
di.LifeSpan = time.Since(d.birth)
di.Keys = nil // Currently no way to query datastore
di.BwIn, di.BwOut = d.network.BandwidthTotals()
// di.BwIn, di.BwOut = d.host.BandwidthTotals() //TODO fix this.
for _, p := range d.getPeers() {
d := connDiagInfo{d.network.Peerstore().LatencyEWMA(p), p.Pretty()}
d := connDiagInfo{d.host.Peerstore().LatencyEWMA(p), p.Pretty()}
di.Connections = append(di.Connections, d)
}
return di
@ -197,13 +204,13 @@ func newMessage(diagID string) *pb.Message {
func (d *Diagnostics) sendRequest(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
s, err := d.network.NewStream(net.ProtocolDiag, p)
s, err := d.host.NewStream(ProtocolDiag, p)
if err != nil {
return nil, err
}
defer s.Close()
r := ggio.NewDelimitedReader(s, net.MessageSizeMax)
r := ggio.NewDelimitedReader(s, inet.MessageSizeMax)
w := ggio.NewDelimitedWriter(s)
start := time.Now()
@ -274,7 +281,7 @@ func (d *Diagnostics) handleDiagnostic(p peer.ID, pmes *pb.Message) (*pb.Message
return resp, nil
}
func (d *Diagnostics) HandleMessage(ctx context.Context, s net.Stream) error {
func (d *Diagnostics) HandleMessage(ctx context.Context, s inet.Stream) error {
r := ggio.NewDelimitedReader(s, 32768) // maxsize
w := ggio.NewDelimitedWriter(s)
@ -312,10 +319,7 @@ func (d *Diagnostics) HandleMessage(ctx context.Context, s net.Stream) error {
return nil
}
func (d *Diagnostics) handleNewStream(s net.Stream) {
go func() {
d.HandleMessage(context.Background(), s)
}()
func (d *Diagnostics) handleNewStream(s inet.Stream) {
d.HandleMessage(context.Background(), s)
s.Close()
}

View File

@ -11,7 +11,7 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
random "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-random"
mocknet "github.com/jbenet/go-ipfs/net/mock"
mocknet "github.com/jbenet/go-ipfs/p2p/net/mock"
errors "github.com/jbenet/go-ipfs/util/debugerror"
)
@ -99,11 +99,11 @@ func DirectAddCat(data []byte, conf Config) error {
return errors.New("test initialization error")
}
adder, err := makeCore(ctx, MocknetTestRepo(peers[0], mn.Net(peers[0]), conf))
adder, err := makeCore(ctx, MocknetTestRepo(peers[0], mn.Host(peers[0]), conf))
if err != nil {
return err
}
catter, err := makeCore(ctx, MocknetTestRepo(peers[1], mn.Net(peers[1]), conf))
catter, err := makeCore(ctx, MocknetTestRepo(peers[1], mn.Host(peers[1]), conf))
if err != nil {
return err
}

View File

@ -15,9 +15,9 @@ import (
importer "github.com/jbenet/go-ipfs/importer"
chunk "github.com/jbenet/go-ipfs/importer/chunk"
merkledag "github.com/jbenet/go-ipfs/merkledag"
net "github.com/jbenet/go-ipfs/net"
host "github.com/jbenet/go-ipfs/p2p/host"
peer "github.com/jbenet/go-ipfs/p2p/peer"
path "github.com/jbenet/go-ipfs/path"
peer "github.com/jbenet/go-ipfs/peer"
dht "github.com/jbenet/go-ipfs/routing/dht"
uio "github.com/jbenet/go-ipfs/unixfs/io"
util "github.com/jbenet/go-ipfs/util"
@ -101,7 +101,7 @@ type repo struct {
blockstore blockstore.Blockstore
exchange exchange.Interface
datastore datastore.ThreadSafeDatastore
network net.Network
host host.Host
dht *dht.IpfsDHT
id peer.ID
}
@ -126,16 +126,16 @@ func (r *repo) Exchange() exchange.Interface {
return r.exchange
}
func MocknetTestRepo(p peer.ID, n net.Network, conf Config) RepoFactory {
func MocknetTestRepo(p peer.ID, h host.Host, conf Config) RepoFactory {
return func(ctx context.Context) (Repo, error) {
const kWriteCacheElems = 100
const alwaysSendToPeer = true
dsDelay := delay.Fixed(conf.BlockstoreLatency)
ds := sync.MutexWrap(datastore2.WithDelay(datastore.NewMapDatastore(), dsDelay))
log.Debugf("MocknetTestRepo: %s %s %s", p, n.LocalPeer(), n)
dhtt := dht.NewDHT(ctx, p, n, ds)
bsn := bsnet.NewFromIpfsNetwork(n, dhtt)
log.Debugf("MocknetTestRepo: %s %s %s", p, h.ID(), h)
dhtt := dht.NewDHT(ctx, h, ds)
bsn := bsnet.NewFromIpfsHost(h, dhtt)
bstore, err := blockstore.WriteCached(blockstore.NewBlockstore(ds), kWriteCacheElems)
if err != nil {
return nil, err
@ -146,7 +146,7 @@ func MocknetTestRepo(p peer.ID, n net.Network, conf Config) RepoFactory {
blockstore: bstore,
exchange: exch,
datastore: ds,
network: n,
host: h,
dht: dhtt,
id: p,
}, nil

View File

@ -7,7 +7,7 @@ import (
"testing"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
mocknet "github.com/jbenet/go-ipfs/net/mock"
mocknet "github.com/jbenet/go-ipfs/p2p/net/mock"
errors "github.com/jbenet/go-ipfs/util/debugerror"
)
@ -42,15 +42,15 @@ func RunThreeLeggedCat(data []byte, conf Config) error {
if len(peers) < numPeers {
return errors.New("test initialization error")
}
adder, err := makeCore(ctx, MocknetTestRepo(peers[0], mn.Net(peers[0]), conf))
adder, err := makeCore(ctx, MocknetTestRepo(peers[0], mn.Host(peers[0]), conf))
if err != nil {
return err
}
catter, err := makeCore(ctx, MocknetTestRepo(peers[1], mn.Net(peers[1]), conf))
catter, err := makeCore(ctx, MocknetTestRepo(peers[1], mn.Host(peers[1]), conf))
if err != nil {
return err
}
bootstrap, err := makeCore(ctx, MocknetTestRepo(peers[2], mn.Net(peers[2]), conf))
bootstrap, err := makeCore(ctx, MocknetTestRepo(peers[2], mn.Host(peers[2]), conf))
if err != nil {
return err
}

View File

@ -17,7 +17,7 @@ import (
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
notifications "github.com/jbenet/go-ipfs/exchange/bitswap/notifications"
wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
errors "github.com/jbenet/go-ipfs/util/debugerror"
"github.com/jbenet/go-ipfs/util/delay"

View File

@ -7,7 +7,7 @@ import (
bstore "github.com/jbenet/go-ipfs/blocks/blockstore"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -11,7 +11,7 @@ import (
blocks "github.com/jbenet/go-ipfs/blocks"
blockstore "github.com/jbenet/go-ipfs/blocks/blockstore"
message "github.com/jbenet/go-ipfs/exchange/bitswap/message"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
)
type peerAndEngine struct {

View File

@ -4,7 +4,7 @@ import (
"time"
wl "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -4,7 +4,7 @@ import (
"sync"
wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -6,7 +6,7 @@ import (
blocks "github.com/jbenet/go-ipfs/blocks"
pb "github.com/jbenet/go-ipfs/exchange/bitswap/message/internal/pb"
wantlist "github.com/jbenet/go-ipfs/exchange/bitswap/wantlist"
inet "github.com/jbenet/go-ipfs/net"
inet "github.com/jbenet/go-ipfs/p2p/net"
u "github.com/jbenet/go-ipfs/util"
ggio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"

View File

@ -4,10 +4,13 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
u "github.com/jbenet/go-ipfs/util"
)
var ProtocolBitswap protocol.ID = "/ipfs/bitswap"
// BitSwapNetwork provides network connectivity for BitSwap sessions
type BitSwapNetwork interface {

View File

@ -3,29 +3,29 @@ package network
import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
host "github.com/jbenet/go-ipfs/p2p/host"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
routing "github.com/jbenet/go-ipfs/routing"
util "github.com/jbenet/go-ipfs/util"
)
var log = util.Logger("bitswap_network")
// NewFromIpfsNetwork returns a BitSwapNetwork supported by underlying IPFS
// Dialer & Service
func NewFromIpfsNetwork(n inet.Network, r routing.IpfsRouting) BitSwapNetwork {
// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host
func NewFromIpfsHost(host host.Host, r routing.IpfsRouting) BitSwapNetwork {
bitswapNetwork := impl{
network: n,
host: host,
routing: r,
}
n.SetHandler(inet.ProtocolBitswap, bitswapNetwork.handleNewStream)
host.SetStreamHandler(ProtocolBitswap, bitswapNetwork.handleNewStream)
return &bitswapNetwork
}
// impl transforms the ipfs network interface, which sends and receives
// NetMessage objects, into the bitswap network interface.
type impl struct {
network inet.Network
host host.Host
routing routing.IpfsRouting
// inbound messages from the network are forwarded to the receiver
@ -33,7 +33,7 @@ type impl struct {
}
func (bsnet *impl) DialPeer(ctx context.Context, p peer.ID) error {
return bsnet.network.DialPeer(ctx, p)
return bsnet.host.Connect(ctx, peer.PeerInfo{ID: p})
}
func (bsnet *impl) SendMessage(
@ -41,7 +41,7 @@ func (bsnet *impl) SendMessage(
p peer.ID,
outgoing bsmsg.BitSwapMessage) error {
s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p)
s, err := bsnet.host.NewStream(ProtocolBitswap, p)
if err != nil {
return err
}
@ -55,7 +55,7 @@ func (bsnet *impl) SendRequest(
p peer.ID,
outgoing bsmsg.BitSwapMessage) (bsmsg.BitSwapMessage, error) {
s, err := bsnet.network.NewStream(inet.ProtocolBitswap, p)
s, err := bsnet.host.NewStream(ProtocolBitswap, p)
if err != nil {
return nil, err
}
@ -79,7 +79,7 @@ func (bsnet *impl) FindProvidersAsync(ctx context.Context, k util.Key, max int)
defer close(out)
providers := bsnet.routing.FindProvidersAsync(ctx, k, max)
for info := range providers {
bsnet.network.Peerstore().AddAddresses(info.ID, info.Addrs)
bsnet.host.Peerstore().AddAddresses(info.ID, info.Addrs)
select {
case <-ctx.Done():
case out <- info.ID:

View File

@ -2,7 +2,7 @@ package bitswap
import (
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
"github.com/jbenet/go-ipfs/util/testutil"
)

View File

@ -8,7 +8,7 @@ import (
blocks "github.com/jbenet/go-ipfs/blocks"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
delay "github.com/jbenet/go-ipfs/util/delay"
testutil "github.com/jbenet/go-ipfs/util/testutil"

View File

@ -4,8 +4,8 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ds "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
mockpeernet "github.com/jbenet/go-ipfs/net/mock"
peer "github.com/jbenet/go-ipfs/peer"
mockpeernet "github.com/jbenet/go-ipfs/p2p/net/mock"
peer "github.com/jbenet/go-ipfs/p2p/peer"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
testutil "github.com/jbenet/go-ipfs/util/testutil"
)
@ -25,7 +25,7 @@ func (pn *peernet) Adapter(p testutil.Identity) bsnet.BitSwapNetwork {
panic(err.Error())
}
routing := pn.routingserver.ClientWithDatastore(context.TODO(), p, ds.NewMapDatastore())
return bsnet.NewFromIpfsNetwork(client, routing)
return bsnet.NewFromIpfsHost(client, routing)
}
func (pn *peernet) HasPeer(p peer.ID) bool {

View File

@ -7,7 +7,7 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
bsmsg "github.com/jbenet/go-ipfs/exchange/bitswap/message"
bsnet "github.com/jbenet/go-ipfs/exchange/bitswap/network"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
routing "github.com/jbenet/go-ipfs/routing"
mockrouting "github.com/jbenet/go-ipfs/routing/mock"
util "github.com/jbenet/go-ipfs/util"

View File

@ -9,7 +9,7 @@ import (
blockstore "github.com/jbenet/go-ipfs/blocks/blockstore"
exchange "github.com/jbenet/go-ipfs/exchange"
tn "github.com/jbenet/go-ipfs/exchange/bitswap/testnet"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
datastore2 "github.com/jbenet/go-ipfs/util/datastore2"
delay "github.com/jbenet/go-ipfs/util/delay"
testutil "github.com/jbenet/go-ipfs/util/testutil"

View File

@ -14,9 +14,9 @@ import (
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
core "github.com/jbenet/go-ipfs/core"
ci "github.com/jbenet/go-ipfs/crypto"
chunk "github.com/jbenet/go-ipfs/importer/chunk"
mdag "github.com/jbenet/go-ipfs/merkledag"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
ft "github.com/jbenet/go-ipfs/unixfs"
uio "github.com/jbenet/go-ipfs/unixfs/io"
ftpb "github.com/jbenet/go-ipfs/unixfs/pb"

View File

@ -3,7 +3,7 @@ package namesys
import (
"errors"
ci "github.com/jbenet/go-ipfs/crypto"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
)
// ErrResolveFailed signals an error when attempting to resolve.

View File

@ -1,7 +1,7 @@
package namesys
import (
ci "github.com/jbenet/go-ipfs/crypto"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
routing "github.com/jbenet/go-ipfs/routing"
)

View File

@ -10,8 +10,8 @@ import (
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
ci "github.com/jbenet/go-ipfs/crypto"
pb "github.com/jbenet/go-ipfs/namesys/internal/pb"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
routing "github.com/jbenet/go-ipfs/routing"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -7,8 +7,8 @@ import (
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
mh "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multihash"
ci "github.com/jbenet/go-ipfs/crypto"
pb "github.com/jbenet/go-ipfs/namesys/internal/pb"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
routing "github.com/jbenet/go-ipfs/routing"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -1,52 +0,0 @@
package conn
import (
"fmt"
handshake "github.com/jbenet/go-ipfs/net/handshake"
hspb "github.com/jbenet/go-ipfs/net/handshake/pb"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ggprotoio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/gogoprotobuf/io"
)
// Handshake1 exchanges local and remote versions and compares them
// closes remote and returns an error in case of major difference
func Handshake1(ctx context.Context, c Conn) error {
rpeer := c.RemotePeer()
lpeer := c.LocalPeer()
// setup up protobuf io
maxSize := 4096
r := ggprotoio.NewDelimitedReader(c, maxSize)
w := ggprotoio.NewDelimitedWriter(c)
localH := handshake.Handshake1Msg()
remoteH := new(hspb.Handshake1)
// send the outgoing handshake message
if err := w.WriteMsg(localH); err != nil {
return err
}
log.Debugf("%p sent my version (%s) to %s", c, localH, rpeer)
log.Event(ctx, "handshake1Sent", lpeer)
select {
case <-ctx.Done():
return ctx.Err()
default:
}
if err := r.ReadMsg(remoteH); err != nil {
return fmt.Errorf("could not receive remote version: %q", err)
}
log.Debugf("%p received remote version (%s) from %s", c, remoteH, rpeer)
log.Event(ctx, "handshake1Received", lpeer)
if err := handshake.Handshake1Compatible(localH, remoteH); err != nil {
log.Infof("%s (%s) incompatible version with %s (%s)", lpeer, localH, rpeer, remoteH)
return err
}
log.Debugf("%s version handshake compatible %s", lpeer, rpeer)
return nil
}

View File

@ -1,27 +0,0 @@
# IFPS Handshake
The IPFS Protocol Handshake is divided into three sequential steps
1. Version Handshake (`Hanshake1`)
2. Secure Channel (`NewSecureConn`)
3. Services (`Handshake3`)
Currently these parts currently happen sequentially (costing an awful 5 RTT),
but can be optimized to 2 RTT.
### Version Handshake
The Version Handshake ensures that nodes speaking to each other can interoperate.
They send each other protocol versions and ensure there is a match on the major
version (semver).
### Secure Channel
The second part exchanges keys and establishes a secure comm channel. This
follows ECDHE TLS, but *isn't* TLS. (why will be written up elsewhere).
### Services
The Services portion sends any additional information on nodes needed
by the nodes, e.g. Listen Address (the received address could be a Dial addr),
and later on can include Service listing (dht, exchange, ipns, etc).

View File

@ -1,34 +0,0 @@
/*
package handshake implements the ipfs handshake protocol
IPFS Handshake
The IPFS Protocol Handshake is divided into three sequential steps
1. Version Handshake (`Hanshake1`)
2. Secure Channel (`NewSecureConn`)
3. Services (`Handshake3`)
Currently these parts currently happen sequentially (costing an awful 5 RTT),
but can be optimized to 2 RTT.
Version Handshake
The Version Handshake ensures that nodes speaking to each other can interoperate.
They send each other protocol versions and ensure there is a match on the major
version (semver).
Secure Channel
The second part exchanges keys and establishes a secure comm channel. This
follows ECDHE TLS, but *isn't* TLS. (why will be written up elsewhere).
Services
The Services portion sends any additional information on nodes needed
by the nodes, e.g. Listen Address (the received address could be a Dial addr),
and later on can include Service listing (dht, exchange, ipns, etc).
*/
package handshake

View File

@ -1,68 +0,0 @@
package handshake
import (
"errors"
"fmt"
config "github.com/jbenet/go-ipfs/config"
pb "github.com/jbenet/go-ipfs/net/handshake/pb"
u "github.com/jbenet/go-ipfs/util"
semver "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/coreos/go-semver/semver"
)
var log = u.Logger("handshake")
// IpfsVersion holds the current protocol version for a client running this code
var IpfsVersion *semver.Version
var ClientVersion = "go-ipfs/" + config.CurrentVersionNumber
func init() {
var err error
IpfsVersion, err = semver.NewVersion("0.0.1")
if err != nil {
panic(fmt.Errorf("invalid protocol version: %v", err))
}
}
// Handshake1Msg returns the current protocol version as a protobuf message
func Handshake1Msg() *pb.Handshake1 {
return NewHandshake1(IpfsVersion.String(), ClientVersion)
}
// ErrVersionMismatch is returned when two clients don't share a protocol version
var ErrVersionMismatch = errors.New("protocol missmatch")
// Handshake1Compatible checks whether two versions are compatible
// returns nil if they are fine
func Handshake1Compatible(handshakeA, handshakeB *pb.Handshake1) error {
a, err := semver.NewVersion(*handshakeA.ProtocolVersion)
if err != nil {
return err
}
b, err := semver.NewVersion(*handshakeB.ProtocolVersion)
if err != nil {
return err
}
if a.Major != b.Major {
return ErrVersionMismatch
}
return nil
}
// NewHandshake1 creates a new Handshake1 from the two strings
func NewHandshake1(protoVer, agentVer string) *pb.Handshake1 {
if protoVer == "" {
protoVer = IpfsVersion.String()
}
if agentVer == "" {
agentVer = ClientVersion
}
return &pb.Handshake1{
ProtocolVersion: &protoVer,
AgentVersion: &agentVer,
}
}

View File

@ -1,23 +0,0 @@
package handshake
import "testing"
func TestH1Compatible(t *testing.T) {
tcases := []struct {
a, b string
expected error
}{
{"0.0.0", "0.0.0", nil},
{"1.0.0", "1.1.0", nil},
{"1.0.0", "1.0.1", nil},
{"0.0.0", "1.0.0", ErrVersionMismatch},
{"1.0.0", "0.0.0", ErrVersionMismatch},
}
for i, tcase := range tcases {
if Handshake1Compatible(NewHandshake1(tcase.a, ""), NewHandshake1(tcase.b, "")) != tcase.expected {
t.Fatalf("case[%d] failed", i)
}
}
}

View File

@ -1,130 +0,0 @@
package net_test
import (
"testing"
"time"
inet "github.com/jbenet/go-ipfs/net"
handshake "github.com/jbenet/go-ipfs/net/handshake"
peer "github.com/jbenet/go-ipfs/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
func GenNetwork(t *testing.T, ctx context.Context) inet.Network {
p := testutil.RandPeerNetParamsOrFatal(t)
ps := peer.NewPeerstore()
ps.AddAddress(p.ID, p.Addr)
ps.AddPubKey(p.ID, p.PubKey)
ps.AddPrivKey(p.ID, p.PrivKey)
n, err := inet.NewNetwork(ctx, ps.Addresses(p.ID), p.ID, ps)
if err != nil {
t.Fatal(err)
}
return n
}
func DivulgeAddresses(a, b inet.Network) {
id := a.LocalPeer()
addrs := a.Peerstore().Addresses(id)
b.Peerstore().AddAddresses(id, addrs)
}
func subtestIDService(t *testing.T, postDialWait time.Duration) {
// the generated networks should have the id service wired in.
ctx := context.Background()
n1 := GenNetwork(t, ctx)
n2 := GenNetwork(t, ctx)
n1p := n1.LocalPeer()
n2p := n2.LocalPeer()
testKnowsAddrs(t, n1, n2p, []ma.Multiaddr{}) // nothing
testKnowsAddrs(t, n2, n1p, []ma.Multiaddr{}) // nothing
// have n2 tell n1, so we can dial...
DivulgeAddresses(n2, n1)
testKnowsAddrs(t, n1, n2p, n2.Peerstore().Addresses(n2p)) // has them
testKnowsAddrs(t, n2, n1p, []ma.Multiaddr{}) // nothing
if err := n1.DialPeer(ctx, n2p); err != nil {
t.Fatalf("Failed to dial:", err)
}
// we need to wait here if Dial returns before ID service is finished.
if postDialWait > 0 {
<-time.After(postDialWait)
}
// the IDService should be opened automatically, by the network.
// what we should see now is that both peers know about each others listen addresses.
testKnowsAddrs(t, n1, n2p, n2.Peerstore().Addresses(n2p)) // has them
testHasProtocolVersions(t, n1, n2p)
// now, this wait we do have to do. it's the wait for the Listening side
// to be done identifying the connection.
c := n2.ConnsToPeer(n1.LocalPeer())
if len(c) < 1 {
t.Fatal("should have connection by now at least.")
}
<-n2.IdentifyProtocol().IdentifyWait(c[0])
// and the protocol versions.
testKnowsAddrs(t, n2, n1p, n1.Peerstore().Addresses(n1p)) // has them
testHasProtocolVersions(t, n2, n1p)
}
func testKnowsAddrs(t *testing.T, n inet.Network, p peer.ID, expected []ma.Multiaddr) {
actual := n.Peerstore().Addresses(p)
if len(actual) != len(expected) {
t.Error("dont have the same addresses")
}
have := map[string]struct{}{}
for _, addr := range actual {
have[addr.String()] = struct{}{}
}
for _, addr := range expected {
if _, found := have[addr.String()]; !found {
t.Errorf("%s did not have addr for %s: %s", n.LocalPeer(), p, addr)
// panic("ahhhhhhh")
}
}
}
func testHasProtocolVersions(t *testing.T, n inet.Network, p peer.ID) {
v, err := n.Peerstore().Get(p, "ProtocolVersion")
if v == nil {
t.Error("no protocol version")
return
}
if v.(string) != handshake.IpfsVersion.String() {
t.Error("protocol mismatch", err)
}
v, err = n.Peerstore().Get(p, "AgentVersion")
if v.(string) != handshake.ClientVersion {
t.Error("agent version mismatch", err)
}
}
// TestIDServiceWait gives the ID service 100ms to finish after dialing
// this is becasue it used to be concurrent. Now, Dial wait till the
// id service is done.
func TestIDServiceWait(t *testing.T) {
N := 3
for i := 0; i < N; i++ {
subtestIDService(t, 100*time.Millisecond)
}
}
func TestIDServiceNoWait(t *testing.T) {
N := 3
for i := 0; i < N; i++ {
subtestIDService(t, 0)
}
}

View File

@ -1,302 +0,0 @@
// Package net provides an interface for ipfs to interact with the network through
package net
import (
"fmt"
ic "github.com/jbenet/go-ipfs/crypto"
swarm "github.com/jbenet/go-ipfs/net/swarm"
peer "github.com/jbenet/go-ipfs/peer"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
type stream swarm.Stream
func (s *stream) SwarmStream() *swarm.Stream {
return (*swarm.Stream)(s)
}
// Conn returns the connection this stream is part of.
func (s *stream) Conn() Conn {
c := s.SwarmStream().Conn()
return (*conn_)(c)
}
// Conn returns the connection this stream is part of.
func (s *stream) Close() error {
return s.SwarmStream().Close()
}
// Read reads bytes from a stream.
func (s *stream) Read(p []byte) (n int, err error) {
return s.SwarmStream().Read(p)
}
// Write writes bytes to a stream, flushing for each call.
func (s *stream) Write(p []byte) (n int, err error) {
return s.SwarmStream().Write(p)
}
type conn_ swarm.Conn
func (s *conn_) String() string {
return s.SwarmConn().String()
}
func (c *conn_) SwarmConn() *swarm.Conn {
return (*swarm.Conn)(c)
}
func (c *conn_) NewStreamWithProtocol(pr ProtocolID) (Stream, error) {
s, err := (*swarm.Conn)(c).NewStream()
if err != nil {
return nil, err
}
ss := (*stream)(s)
if err := WriteProtocolHeader(pr, ss); err != nil {
ss.Close()
return nil, err
}
return ss, nil
}
func (c *conn_) LocalMultiaddr() ma.Multiaddr {
return c.SwarmConn().LocalMultiaddr()
}
func (c *conn_) RemoteMultiaddr() ma.Multiaddr {
return c.SwarmConn().RemoteMultiaddr()
}
func (c *conn_) LocalPeer() peer.ID {
return c.SwarmConn().LocalPeer()
}
func (c *conn_) RemotePeer() peer.ID {
return c.SwarmConn().RemotePeer()
}
func (c *conn_) LocalPrivateKey() ic.PrivKey {
return c.SwarmConn().LocalPrivateKey()
}
func (c *conn_) RemotePublicKey() ic.PubKey {
return c.SwarmConn().RemotePublicKey()
}
// network implements the Network interface,
type network struct {
local peer.ID // local peer
mux Mux // protocol multiplexing
swarm *swarm.Swarm // peer connection multiplexing
ps peer.Peerstore
ids *IDService
cg ctxgroup.ContextGroup // for Context closing
}
// NewNetwork constructs a new network and starts listening on given addresses.
func NewNetwork(ctx context.Context, listen []ma.Multiaddr, local peer.ID,
peers peer.Peerstore) (Network, error) {
s, err := swarm.NewSwarm(ctx, listen, local, peers)
if err != nil {
return nil, err
}
n := &network{
local: local,
swarm: s,
mux: Mux{Handlers: StreamHandlerMap{}},
cg: ctxgroup.WithContext(ctx),
ps: peers,
}
n.cg.SetTeardown(n.close)
n.cg.AddChildGroup(s.CtxGroup())
s.SetStreamHandler(func(s *swarm.Stream) {
n.mux.Handle((*stream)(s))
})
// setup a conn handler that immediately "asks the other side about them"
// this is ProtocolIdentify.
n.ids = NewIDService(n)
s.SetConnHandler(n.newConnHandler)
return n, nil
}
func (n *network) newConnHandler(c *swarm.Conn) {
cc := (*conn_)(c)
n.ids.IdentifyConn(cc)
}
// DialPeer attempts to establish a connection to a given peer.
// Respects the context.
func (n *network) DialPeer(ctx context.Context, p peer.ID) error {
log.Debugf("[%s] network dialing peer [%s]", n.local, p)
sc, err := n.swarm.Dial(ctx, p)
if err != nil {
return err
}
// identify the connection before returning.
done := make(chan struct{})
go func() {
n.ids.IdentifyConn((*conn_)(sc))
close(done)
}()
// respect don contexteone
select {
case <-done:
case <-ctx.Done():
return ctx.Err()
}
log.Debugf("network for %s finished dialing %s", n.local, p)
return nil
}
func (n *network) Protocols() []ProtocolID {
return n.mux.Protocols()
}
// CtxGroup returns the network's ContextGroup
func (n *network) CtxGroup() ctxgroup.ContextGroup {
return n.cg
}
// Swarm returns the network's peerstream.Swarm
func (n *network) Swarm() *swarm.Swarm {
return n.Swarm()
}
// LocalPeer the network's LocalPeer
func (n *network) LocalPeer() peer.ID {
return n.swarm.LocalPeer()
}
// Peers returns the connected peers
func (n *network) Peers() []peer.ID {
return n.swarm.Peers()
}
// Peers returns the connected peers
func (n *network) Peerstore() peer.Peerstore {
return n.ps
}
// Conns returns the connected peers
func (n *network) Conns() []Conn {
conns1 := n.swarm.Connections()
out := make([]Conn, len(conns1))
for i, c := range conns1 {
out[i] = (*conn_)(c)
}
return out
}
// ConnsToPeer returns the connections in this Netowrk for given peer.
func (n *network) ConnsToPeer(p peer.ID) []Conn {
conns1 := n.swarm.ConnectionsToPeer(p)
out := make([]Conn, len(conns1))
for i, c := range conns1 {
out[i] = (*conn_)(c)
}
return out
}
// ClosePeer connection to peer
func (n *network) ClosePeer(p peer.ID) error {
return n.swarm.CloseConnection(p)
}
// close is the real teardown function
func (n *network) close() error {
return n.swarm.Close()
}
// Close calls the ContextCloser func
func (n *network) Close() error {
return n.cg.Close()
}
// BandwidthTotals returns the total amount of bandwidth transferred
func (n *network) BandwidthTotals() (in uint64, out uint64) {
// need to implement this. probably best to do it in swarm this time.
// need a "metrics" object
return 0, 0
}
// ListenAddresses returns a list of addresses at which this network listens.
func (n *network) ListenAddresses() []ma.Multiaddr {
return n.swarm.ListenAddresses()
}
// InterfaceListenAddresses returns a list of addresses at which this network
// listens. It expands "any interface" addresses (/ip4/0.0.0.0, /ip6/::) to
// use the known local interfaces.
func (n *network) InterfaceListenAddresses() ([]ma.Multiaddr, error) {
return swarm.InterfaceListenAddresses(n.swarm)
}
// Connectedness returns a state signaling connection capabilities
// For now only returns Connected || NotConnected. Expand into more later.
func (n *network) Connectedness(p peer.ID) Connectedness {
c := n.swarm.ConnectionsToPeer(p)
if c != nil && len(c) > 0 {
return Connected
}
return NotConnected
}
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
// If ProtocolID is "", writes no header.
func (n *network) NewStream(pr ProtocolID, p peer.ID) (Stream, error) {
log.Debugf("[%s] network opening stream to peer [%s]: %s", n.local, p, pr)
s, err := n.swarm.NewStreamWithPeer(p)
if err != nil {
return nil, err
}
ss := (*stream)(s)
if err := WriteProtocolHeader(pr, ss); err != nil {
ss.Close()
return nil, err
}
return ss, nil
}
// SetHandler sets the protocol handler on the Network's Muxer.
// This operation is threadsafe.
func (n *network) SetHandler(p ProtocolID, h StreamHandler) {
n.mux.SetHandler(p, h)
}
func (n *network) String() string {
return fmt.Sprintf("<Network %s>", n.LocalPeer())
}
func (n *network) IdentifyProtocol() *IDService {
return n.ids
}
func WriteProtocolHeader(pr ProtocolID, s Stream) error {
if pr != "" { // only write proper protocol headers
if err := WriteLengthPrefix(s, string(pr)); err != nil {
return err
}
}
return nil
}

View File

@ -21,7 +21,7 @@ import (
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
pb "github.com/jbenet/go-ipfs/crypto/internal/pb"
pb "github.com/jbenet/go-ipfs/p2p/crypto/internal/pb"
u "github.com/jbenet/go-ipfs/util"
)

View File

@ -1,7 +1,7 @@
package crypto_test
import (
. "github.com/jbenet/go-ipfs/crypto"
. "github.com/jbenet/go-ipfs/p2p/crypto"
"bytes"
tu "github.com/jbenet/go-ipfs/util/testutil"

View File

@ -10,7 +10,7 @@ import (
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
pb "github.com/jbenet/go-ipfs/crypto/internal/pb"
pb "github.com/jbenet/go-ipfs/p2p/crypto/internal/pb"
)
type RsaPrivateKey struct {

View File

@ -15,7 +15,7 @@ import (
bfish "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.crypto/blowfish"
ci "github.com/jbenet/go-ipfs/crypto"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
)
// List of supported ECDH curves

View File

@ -4,12 +4,12 @@ package secio
import (
"io"
ci "github.com/jbenet/go-ipfs/crypto"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
)
// SessionGenerator constructs secure communication sessions for a peer.

View File

@ -10,9 +10,9 @@ import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"
ci "github.com/jbenet/go-ipfs/crypto"
pb "github.com/jbenet/go-ipfs/crypto/secio/internal/pb"
peer "github.com/jbenet/go-ipfs/peer"
ci "github.com/jbenet/go-ipfs/p2p/crypto"
pb "github.com/jbenet/go-ipfs/p2p/crypto/secio/internal/pb"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
)

View File

@ -0,0 +1,149 @@
package basichost
import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
identify "github.com/jbenet/go-ipfs/p2p/protocol/identify"
relay "github.com/jbenet/go-ipfs/p2p/protocol/relay"
)
var log = eventlog.Logger("p2p/host/basic")
type BasicHost struct {
network inet.Network
mux protocol.Mux
ids *identify.IDService
relay *relay.RelayService
}
// New constructs and sets up a new *BasicHost with given Network
func New(net inet.Network) *BasicHost {
h := &BasicHost{
network: net,
mux: protocol.Mux{Handlers: protocol.StreamHandlerMap{}},
}
// setup host services
h.ids = identify.NewIDService(h)
h.relay = relay.NewRelayService(h, h.Mux().HandleSync)
net.SetConnHandler(h.newConnHandler)
net.SetStreamHandler(h.newStreamHandler)
return h
}
// newConnHandler is the remote-opened conn handler for inet.Network
func (h *BasicHost) newConnHandler(c inet.Conn) {
h.ids.IdentifyConn(c)
}
// newStreamHandler is the remote-opened stream handler for inet.Network
func (h *BasicHost) newStreamHandler(s inet.Stream) {
h.Mux().Handle(s)
}
// ID returns the (local) peer.ID associated with this Host
func (h *BasicHost) ID() peer.ID {
return h.Network().LocalPeer()
}
// Peerstore returns the Host's repository of Peer Addresses and Keys.
func (h *BasicHost) Peerstore() peer.Peerstore {
return h.Network().Peerstore()
}
// Networks returns the Network interface of the Host
func (h *BasicHost) Network() inet.Network {
return h.network
}
// Mux returns the Mux multiplexing incoming streams to protocol handlers
func (h *BasicHost) Mux() *protocol.Mux {
return &h.mux
}
func (h *BasicHost) IDService() *identify.IDService {
return h.ids
}
// SetStreamHandler sets the protocol handler on the Host's Mux.
// This is equivalent to:
// host.Mux().SetHandler(proto, handler)
// (Threadsafe)
func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler inet.StreamHandler) {
h.Mux().SetHandler(pid, handler)
}
// NewStream opens a new stream to given peer p, and writes a p2p/protocol
// header with given protocol.ID. If there is no connection to p, attempts
// to create one. If ProtocolID is "", writes no header.
// (Threadsafe)
func (h *BasicHost) NewStream(pid protocol.ID, p peer.ID) (inet.Stream, error) {
s, err := h.Network().NewStream(p)
if err != nil {
return nil, err
}
if err := protocol.WriteHeader(s, pid); err != nil {
s.Close()
return nil, err
}
return s, nil
}
// Connect ensures there is a connection between this host and the peer with
// given peer.ID. Connect will absorb the addresses in pi into its internal
// peerstore. If there is not an active connection, Connect will issue a
// h.Network.Dial, and block until a connection is open, or an error is
// returned. // TODO: Relay + NAT.
func (h *BasicHost) Connect(ctx context.Context, pi peer.PeerInfo) error {
// absorb addresses into peerstore
h.Peerstore().AddPeerInfo(pi)
cs := h.Network().ConnsToPeer(pi.ID)
if len(cs) > 0 {
return nil
}
return h.dialPeer(ctx, pi.ID)
}
// dialPeer opens a connection to peer, and makes sure to identify
// the connection once it has been opened.
func (h *BasicHost) dialPeer(ctx context.Context, p peer.ID) error {
log.Debugf("host %s dialing %s", h.ID, p)
c, err := h.Network().DialPeer(ctx, p)
if err != nil {
return err
}
// identify the connection before returning.
done := make(chan struct{})
go func() {
h.ids.IdentifyConn(c)
close(done)
}()
// respect don contexteone
select {
case <-done:
case <-ctx.Done():
return ctx.Err()
}
log.Debugf("host %s finished dialing %s", h.ID, p)
return nil
}
// Close shuts down the Host's services (network, etc).
func (h *BasicHost) Close() error {
return h.Network().Close()
}

View File

@ -0,0 +1,63 @@
package basichost_test
import (
"bytes"
"io"
"testing"
inet "github.com/jbenet/go-ipfs/p2p/net"
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
testutil "github.com/jbenet/go-ipfs/p2p/test/util"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
)
func TestHostSimple(t *testing.T) {
ctx := context.Background()
h1 := testutil.GenHostSwarm(t, ctx)
h2 := testutil.GenHostSwarm(t, ctx)
defer h1.Close()
defer h2.Close()
h2pi := h2.Peerstore().PeerInfo(h2.ID())
if err := h1.Connect(ctx, h2pi); err != nil {
t.Fatal(err)
}
piper, pipew := io.Pipe()
h2.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
defer s.Close()
w := io.MultiWriter(s, pipew)
io.Copy(w, s) // mirror everything
})
s, err := h1.NewStream(protocol.TestingID, h2pi.ID)
if err != nil {
t.Fatal(err)
}
// write to the stream
buf1 := []byte("abcdefghijkl")
if _, err := s.Write(buf1); err != nil {
t.Fatal(err)
}
// get it from the stream (echoed)
buf2 := make([]byte, len(buf1))
if _, err := io.ReadFull(s, buf2); err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf1, buf2) {
t.Fatal("buf1 != buf2 -- %x != %x", buf1, buf2)
}
// get it from the pipe (tee)
buf3 := make([]byte, len(buf1))
if _, err := io.ReadFull(piper, buf3); err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf1, buf3) {
t.Fatal("buf1 != buf3 -- %x != %x", buf1, buf3)
}
}

54
p2p/host/host.go Normal file
View File

@ -0,0 +1,54 @@
package host
import (
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
)
var log = eventlog.Logger("p2p/host")
// Host is an object participating in a p2p network, which
// implements protocols or provides services. It handles
// requests like a Server, and issues requests like a Client.
// It is called Host because it is both Server and Client (and Peer
// may be confusing).
type Host interface {
// ID returns the (local) peer.ID associated with this Host
ID() peer.ID
// Peerstore returns the Host's repository of Peer Addresses and Keys.
Peerstore() peer.Peerstore
// Networks returns the Network interface of the Host
Network() inet.Network
// Mux returns the Mux multiplexing incoming streams to protocol handlers
Mux() *protocol.Mux
// Connect ensures there is a connection between this host and the peer with
// given peer.ID. Connect will absorb the addresses in pi into its internal
// peerstore. If there is not an active connection, Connect will issue a
// h.Network.Dial, and block until a connection is open, or an error is
// returned. // TODO: Relay + NAT.
Connect(ctx context.Context, pi peer.PeerInfo) error
// SetStreamHandler sets the protocol handler on the Host's Mux.
// This is equivalent to:
// host.Mux().SetHandler(proto, handler)
// (Threadsafe)
SetStreamHandler(pid protocol.ID, handler inet.StreamHandler)
// NewStream opens a new stream to given peer p, and writes a p2p/protocol
// header with given protocol.ID. If there is no connection to p, attempts
// to create one. If ProtocolID is "", writes no header.
// (Threadsafe)
NewStream(pid protocol.ID, p peer.ID) (inet.Stream, error)
// Close shuts down the host, its Network, and services.
Close() error
}

View File

@ -11,19 +11,14 @@ import (
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
ic "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
)
var log = eventlog.Logger("conn")
const (
// MaxMessageSize is the size of the largest single message. (4MB)
MaxMessageSize = 1 << 22
)
// ReleaseBuffer puts the given byte array back into the buffer pool,
// first verifying that it is the correct size
func ReleaseBuffer(b []byte) {
@ -48,15 +43,8 @@ func newSingleConn(ctx context.Context, local, remote peer.ID, maconn manet.Conn
maconn: maconn,
msgrw: msgio.NewReadWriter(maconn),
}
log.Debugf("newSingleConn %p: %v to %v", conn, local, remote)
// version handshake
if err := Handshake1(ctx, conn); err != nil {
conn.Close()
return nil, fmt.Errorf("Handshake1 failed: %s", err)
}
log.Debugf("newSingleConn %p: %v to %v finished", conn, local, remote)
return conn, nil
}

View File

@ -8,7 +8,7 @@ import (
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
peer "github.com/jbenet/go-ipfs/peer"
peer "github.com/jbenet/go-ipfs/p2p/peer"
debugerror "github.com/jbenet/go-ipfs/util/debugerror"
)

View File

@ -51,7 +51,6 @@ func setupConn(t *testing.T, ctx context.Context, secure bool) (a, b Conn, p1, p
p1 = tu.RandPeerNetParamsOrFatal(t)
p2 = tu.RandPeerNetParamsOrFatal(t)
laddr := p1.Addr
key1 := p1.PrivKey
key2 := p2.PrivKey
@ -59,10 +58,11 @@ func setupConn(t *testing.T, ctx context.Context, secure bool) (a, b Conn, p1, p
key1 = nil
key2 = nil
}
l1, err := Listen(ctx, laddr, p1.ID, key1)
l1, err := Listen(ctx, p1.Addr, p1.ID, key1)
if err != nil {
t.Fatal(err)
}
p1.Addr = l1.Multiaddr() // Addr has been determined by kernel.
d2 := &Dialer{
LocalPeer: p2.ID,
@ -110,6 +110,7 @@ func testDialer(t *testing.T, secure bool) {
if err != nil {
t.Fatal(err)
}
p1.Addr = l1.Multiaddr() // Addr has been determined by kernel.
d2 := &Dialer{
LocalPeer: p2.ID,

View File

@ -5,8 +5,8 @@ import (
"net"
"time"
ic "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
peer "github.com/jbenet/go-ipfs/p2p/peer"
u "github.com/jbenet/go-ipfs/util"
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"

View File

@ -9,33 +9,32 @@ import (
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
manet "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr-net"
ic "github.com/jbenet/go-ipfs/crypto"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
peer "github.com/jbenet/go-ipfs/p2p/peer"
)
// listener is an object that can accept connections. It implements Listener
type listener struct {
manet.Listener
maddr ma.Multiaddr // Local multiaddr to listen on
local peer.ID // LocalPeer is the identity of the local Peer
privk ic.PrivKey // private key to use to initialize secure conns
local peer.ID // LocalPeer is the identity of the local Peer
privk ic.PrivKey // private key to use to initialize secure conns
cg ctxgroup.ContextGroup
}
func (l *listener) teardown() error {
defer log.Debugf("listener closed: %s %s", l.local, l.maddr)
defer log.Debugf("listener closed: %s %s", l.local, l.Multiaddr())
return l.Listener.Close()
}
func (l *listener) Close() error {
log.Debugf("listener closing: %s %s", l.local, l.maddr)
log.Debugf("listener closing: %s %s", l.local, l.Multiaddr())
return l.cg.Close()
}
func (l *listener) String() string {
return fmt.Sprintf("<Listener %s %s>", l.local, l.maddr)
return fmt.Sprintf("<Listener %s %s>", l.local, l.Multiaddr())
}
// Accept waits for and returns the next connection to the listener.
@ -73,8 +72,14 @@ func (l *listener) Addr() net.Addr {
}
// Multiaddr is the identity of the local Peer.
// If there is an error converting from net.Addr to ma.Multiaddr,
// the return value will be nil.
func (l *listener) Multiaddr() ma.Multiaddr {
return l.maddr
maddr, err := manet.FromNetAddr(l.Addr())
if err != nil {
return nil // error
}
return maddr
}
// LocalPeer is the identity of the local Peer.
@ -102,7 +107,6 @@ func Listen(ctx context.Context, addr ma.Multiaddr, local peer.ID, sk ic.PrivKey
l := &listener{
Listener: ml,
maddr: addr,
local: local,
privk: sk,
cg: ctxgroup.WithContext(ctx),

View File

@ -8,9 +8,9 @@ import (
msgio "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-msgio"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
ic "github.com/jbenet/go-ipfs/crypto"
secio "github.com/jbenet/go-ipfs/crypto/secio"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
secio "github.com/jbenet/go-ipfs/p2p/crypto/secio"
peer "github.com/jbenet/go-ipfs/p2p/peer"
errors "github.com/jbenet/go-ipfs/util/debugerror"
)

View File

@ -8,7 +8,7 @@ import (
"testing"
"time"
ic "github.com/jbenet/go-ipfs/crypto"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
)

View File

@ -3,28 +3,14 @@ package net
import (
"io"
conn "github.com/jbenet/go-ipfs/net/conn"
// swarm "github.com/jbenet/go-ipfs/net/swarm2"
peer "github.com/jbenet/go-ipfs/peer"
conn "github.com/jbenet/go-ipfs/p2p/net/conn"
peer "github.com/jbenet/go-ipfs/p2p/peer"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
// ProtocolID is an identifier used to write protocol headers in streams.
type ProtocolID string
// These are the ProtocolIDs of the protocols running. It is useful
// to keep them in one place.
const (
ProtocolTesting ProtocolID = "/ipfs/testing"
ProtocolBitswap ProtocolID = "/ipfs/bitswap"
ProtocolDHT ProtocolID = "/ipfs/dht"
ProtocolIdentify ProtocolID = "/ipfs/id"
ProtocolDiag ProtocolID = "/ipfs/diagnostics"
)
// MessageSizeMax is a soft (recommended) maximum for network messages.
// One can write more, as the interface is a stream. But it is useful
// to bunch it up into multiple read/writes when the whole message is
@ -44,12 +30,10 @@ type Stream interface {
Conn() Conn
}
// StreamHandler is the function protocols who wish to listen to
// incoming streams must implement.
// StreamHandler is the type of function used to listen for
// streams opened by the remote side.
type StreamHandler func(Stream)
type StreamHandlerMap map[ProtocolID]StreamHandler
// Conn is a connection to a remote peer. It multiplexes streams.
// Usually there is no need to use a Conn directly, but it may
// be useful to get information about the peer on the other side:
@ -57,11 +41,15 @@ type StreamHandlerMap map[ProtocolID]StreamHandler
type Conn interface {
conn.PeerConn
// NewStreamWithProtocol constructs a new Stream over this conn.
NewStreamWithProtocol(pr ProtocolID) (Stream, error)
// NewStream constructs a new Stream over this conn.
NewStream() (Stream, error)
}
// Network is the interface IPFS uses for connecting to the world.
// ConnHandler is the type of function used to listen for
// connections opened by the remote side.
type ConnHandler func(Conn)
// Network is the interface used to connect to the outside world.
// It dials and listens for connections. it uses a Swarm to pool
// connnections (see swarm pkg, and peerstream.Swarm). Connections
// are encrypted with a TLS-like protocol.
@ -69,22 +57,17 @@ type Network interface {
Dialer
io.Closer
// SetHandler sets the protocol handler on the Network's Muxer.
// This operation is threadsafe.
SetHandler(ProtocolID, StreamHandler)
// SetStreamHandler sets the handler for new streams opened by the
// remote side. This operation is threadsafe.
SetStreamHandler(StreamHandler)
// Protocols returns the list of protocols this network currently
// has registered handlers for.
Protocols() []ProtocolID
// SetConnHandler sets the handler for new connections opened by the
// remote side. This operation is threadsafe.
SetConnHandler(ConnHandler)
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
// If ProtocolID is "", writes no header.
NewStream(ProtocolID, peer.ID) (Stream, error)
// BandwidthTotals returns the total number of bytes passed through
// the network since it was instantiated
BandwidthTotals() (uint64, uint64)
NewStream(peer.ID) (Stream, error)
// ListenAddresses returns a list of addresses at which this network listens.
ListenAddresses() []ma.Multiaddr
@ -96,11 +79,6 @@ type Network interface {
// CtxGroup returns the network's contextGroup
CtxGroup() ctxgroup.ContextGroup
// IdentifyProtocol returns the instance of the object running the Identify
// Protocol. This is what runs the ifps handshake-- this should be removed
// if this abstracted out to its own package.
IdentifyProtocol() *IDService
}
// Dialer represents a service that can dial out to peers
@ -116,8 +94,8 @@ type Dialer interface {
// LocalPeer returns the local peer associated with this network
LocalPeer() peer.ID
// DialPeer attempts to establish a connection to a given peer
DialPeer(context.Context, peer.ID) error
// DialPeer establishes a connection to a given peer
DialPeer(context.Context, peer.ID) (Conn, error)
// ClosePeer closes the connection to a given peer
ClosePeer(peer.ID) error

View File

@ -10,9 +10,10 @@ import (
"io"
"time"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
host "github.com/jbenet/go-ipfs/p2p/host"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
@ -20,16 +21,18 @@ import (
type Mocknet interface {
// GenPeer generates a peer and its inet.Network in the Mocknet
GenPeer() (inet.Network, error)
GenPeer() (host.Host, error)
// AddPeer adds an existing peer. we need both a privkey and addr.
// ID is derived from PrivKey
AddPeer(ic.PrivKey, ma.Multiaddr) (inet.Network, error)
AddPeer(ic.PrivKey, ma.Multiaddr) (host.Host, error)
// retrieve things (with randomized iteration order)
Peers() []peer.ID
Net(peer.ID) inet.Network
Nets() []inet.Network
Host(peer.ID) host.Host
Hosts() []host.Host
Links() LinkMap
LinksBetweenPeers(a, b peer.ID) []Link
LinksBetweenNets(a, b inet.Network) []Link
@ -52,8 +55,8 @@ type Mocknet interface {
// Connections are the usual. Connecting means Dialing.
// **to succeed, peers must be linked beforehand**
ConnectPeers(peer.ID, peer.ID) error
ConnectNets(inet.Network, inet.Network) error
ConnectPeers(peer.ID, peer.ID) (inet.Conn, error)
ConnectNets(inet.Network, inet.Network) (inet.Conn, error)
DisconnectPeers(peer.ID, peer.ID) error
DisconnectNets(inet.Network, inet.Network) error
}

View File

@ -53,7 +53,7 @@ func FullMeshConnected(ctx context.Context, n int) (Mocknet, error) {
nets := m.Nets()
for _, n1 := range nets {
for _, n2 := range nets {
if err := m.ConnectNets(n1, n2); err != nil {
if _, err := m.ConnectNets(n1, n2); err != nil {
return nil, err
}
}

View File

@ -4,9 +4,9 @@ import (
"container/list"
"sync"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
ma "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-multiaddr"
)
@ -82,14 +82,10 @@ func (c *conn) openStream() *stream {
return sl
}
func (c *conn) NewStreamWithProtocol(pr inet.ProtocolID) (inet.Stream, error) {
func (c *conn) NewStream() (inet.Stream, error) {
log.Debugf("Conn.NewStreamWithProtocol: %s --> %s", c.local, c.remote)
s := c.openStream()
if err := inet.WriteProtocolHeader(pr, s); err != nil {
s.Close()
return nil, err
}
return s, nil
}

View File

@ -4,8 +4,8 @@ import (
"io"
"sync"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
)
// link implements mocknet.Link

View File

@ -3,10 +3,13 @@ package mocknet
import (
"fmt"
"sync"
"time"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
host "github.com/jbenet/go-ipfs/p2p/host"
bhost "github.com/jbenet/go-ipfs/p2p/host/basic"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
testutil "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
@ -16,9 +19,8 @@ import (
// mocknet implements mocknet.Mocknet
type mocknet struct {
// must map on peer.ID (instead of peer.ID) because
// each inet.Network has different peerstore
nets map[peer.ID]*peernet
nets map[peer.ID]*peernet
hosts map[peer.ID]*bhost.BasicHost
// links make it possible to connect two peers.
// think of links as the physical medium.
@ -35,33 +37,36 @@ type mocknet struct {
func New(ctx context.Context) Mocknet {
return &mocknet{
nets: map[peer.ID]*peernet{},
hosts: map[peer.ID]*bhost.BasicHost{},
links: map[peer.ID]map[peer.ID]map[*link]struct{}{},
cg: ctxgroup.WithContext(ctx),
}
}
func (mn *mocknet) GenPeer() (inet.Network, error) {
sk, _, err := testutil.SeededKeyPair(int64(len(mn.nets)))
func (mn *mocknet) GenPeer() (host.Host, error) {
sk, _, err := testutil.SeededKeyPair(time.Now().UnixNano())
if err != nil {
return nil, err
}
a := testutil.RandLocalTCPAddress()
n, err := mn.AddPeer(sk, a)
h, err := mn.AddPeer(sk, a)
if err != nil {
return nil, err
}
return n, nil
return h, nil
}
func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (inet.Network, error) {
func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (host.Host, error) {
n, err := newPeernet(mn.cg.Context(), mn, k, a)
if err != nil {
return nil, err
}
h := bhost.New(n)
// make sure to add listening address!
// this makes debugging things simpler as remembering to register
// an address may cause unexpected failure.
@ -72,8 +77,9 @@ func (mn *mocknet) AddPeer(k ic.PrivKey, a ma.Multiaddr) (inet.Network, error) {
mn.Lock()
mn.nets[n.peer] = n
mn.hosts[n.peer] = h
mn.Unlock()
return n, nil
return h, nil
}
func (mn *mocknet) Peers() []peer.ID {
@ -87,16 +93,29 @@ func (mn *mocknet) Peers() []peer.ID {
return cp
}
func (mn *mocknet) Host(pid peer.ID) host.Host {
mn.RLock()
host := mn.hosts[pid]
mn.RUnlock()
return host
}
func (mn *mocknet) Net(pid peer.ID) inet.Network {
mn.RLock()
n := mn.nets[pid]
mn.RUnlock()
return n
}
func (mn *mocknet) Hosts() []host.Host {
mn.RLock()
defer mn.RUnlock()
for _, n := range mn.nets {
if n.peer == pid {
return n
}
cp := make([]host.Host, 0, len(mn.hosts))
for _, h := range mn.hosts {
cp = append(cp, h)
}
return nil
return cp
}
func (mn *mocknet) Nets() []inet.Network {
@ -269,7 +288,7 @@ func (mn *mocknet) ConnectAll() error {
continue
}
if err := mn.ConnectNets(n1, n2); err != nil {
if _, err := mn.ConnectNets(n1, n2); err != nil {
return err
}
}
@ -277,11 +296,11 @@ func (mn *mocknet) ConnectAll() error {
return nil
}
func (mn *mocknet) ConnectPeers(a, b peer.ID) error {
func (mn *mocknet) ConnectPeers(a, b peer.ID) (inet.Conn, error) {
return mn.Net(a).DialPeer(mn.cg.Context(), b)
}
func (mn *mocknet) ConnectNets(a, b inet.Network) error {
func (mn *mocknet) ConnectNets(a, b inet.Network) (inet.Conn, error) {
return a.DialPeer(mn.cg.Context(), b.LocalPeer())
}

View File

@ -5,9 +5,9 @@ import (
"math/rand"
"sync"
ic "github.com/jbenet/go-ipfs/crypto"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
ic "github.com/jbenet/go-ipfs/p2p/crypto"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
ctxgroup "github.com/jbenet/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-ctxgroup"
@ -27,9 +27,9 @@ type peernet struct {
connsByPeer map[peer.ID]map[*conn]struct{}
connsByLink map[*link]map[*conn]struct{}
// needed to implement inet.Network
mux inet.Mux
ids *inet.IDService
// implement inet.Network
streamHandler inet.StreamHandler
connHandler inet.ConnHandler
cg ctxgroup.ContextGroup
sync.RWMutex
@ -54,7 +54,6 @@ func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey,
mocknet: m,
peer: p,
ps: ps,
mux: inet.Mux{Handlers: inet.StreamHandlerMap{}},
cg: ctxgroup.WithContext(ctx),
connsByPeer: map[peer.ID]map[*conn]struct{}{},
@ -62,11 +61,6 @@ func newPeernet(ctx context.Context, m *mocknet, k ic.PrivKey,
}
n.cg.SetTeardown(n.teardown)
// setup a conn handler that immediately "asks the other side about them"
// this is ProtocolIdentify.
n.ids = inet.NewIDService(n)
return n, nil
}
@ -97,10 +91,6 @@ func (pn *peernet) Close() error {
return pn.cg.Close()
}
func (pn *peernet) Protocols() []inet.ProtocolID {
return pn.mux.Protocols()
}
func (pn *peernet) Peerstore() peer.Peerstore {
return pn.ps
}
@ -109,24 +99,41 @@ func (pn *peernet) String() string {
return fmt.Sprintf("<mock.peernet %s - %d conns>", pn.peer, len(pn.allConns()))
}
// handleNewStream is an internal function to trigger the muxer handler
// handleNewStream is an internal function to trigger the client's handler
func (pn *peernet) handleNewStream(s inet.Stream) {
go pn.mux.Handle(s)
pn.RLock()
handler := pn.streamHandler
pn.RUnlock()
if handler != nil {
go handler(s)
}
}
// handleNewConn is an internal function to trigger the client's handler
func (pn *peernet) handleNewConn(c inet.Conn) {
pn.RLock()
handler := pn.connHandler
pn.RUnlock()
if handler != nil {
go handler(c)
}
}
// DialPeer attempts to establish a connection to a given peer.
// Respects the context.
func (pn *peernet) DialPeer(ctx context.Context, p peer.ID) error {
func (pn *peernet) DialPeer(ctx context.Context, p peer.ID) (inet.Conn, error) {
return pn.connect(p)
}
func (pn *peernet) connect(p peer.ID) error {
func (pn *peernet) connect(p peer.ID) (*conn, error) {
// first, check if we already have live connections
pn.RLock()
cs, found := pn.connsByPeer[p]
pn.RUnlock()
if found && len(cs) > 0 {
return nil
for c := range cs {
return c, nil
}
}
log.Debugf("%s (newly) dialing %s", pn.peer, p)
@ -134,7 +141,7 @@ func (pn *peernet) connect(p peer.ID) error {
// ok, must create a new connection. we need a link
links := pn.mocknet.LinksBetweenPeers(pn.peer, p)
if len(links) < 1 {
return fmt.Errorf("%s cannot connect to %s", pn.peer, p)
return nil, fmt.Errorf("%s cannot connect to %s", pn.peer, p)
}
// if many links found, how do we select? for now, randomly...
@ -144,8 +151,8 @@ func (pn *peernet) connect(p peer.ID) error {
log.Debugf("%s dialing %s openingConn", pn.peer, p)
// create a new connection with link
pn.openConn(p, l.(*link))
return nil
c := pn.openConn(p, l.(*link))
return c, nil
}
func (pn *peernet) openConn(r peer.ID, l *link) *conn {
@ -159,16 +166,15 @@ func (pn *peernet) openConn(r peer.ID, l *link) *conn {
func (pn *peernet) remoteOpenedConn(c *conn) {
log.Debugf("%s accepting connection from %s", pn.LocalPeer(), c.RemotePeer())
pn.addConn(c)
pn.handleNewConn(c)
}
// addConn constructs and adds a connection
// to given remote peer over given link
func (pn *peernet) addConn(c *conn) {
// run the Identify protocol/handshake.
pn.ids.IdentifyConn(c)
pn.Lock()
defer pn.Unlock()
cs, found := pn.connsByPeer[c.RemotePeer()]
if !found {
cs = map[*conn]struct{}{}
@ -182,7 +188,6 @@ func (pn *peernet) addConn(c *conn) {
pn.connsByLink[c.link] = cs
}
pn.connsByLink[c.link][c] = struct{}{}
pn.Unlock()
}
// removeConn removes a given conn
@ -307,15 +312,14 @@ func (pn *peernet) Connectedness(p peer.ID) inet.Connectedness {
// NewStream returns a new stream to given peer p.
// If there is no connection to p, attempts to create one.
// If ProtocolID is "", writes no header.
func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.ID) (inet.Stream, error) {
func (pn *peernet) NewStream(p peer.ID) (inet.Stream, error) {
pn.Lock()
defer pn.Unlock()
cs, found := pn.connsByPeer[p]
if !found || len(cs) < 1 {
pn.Unlock()
return nil, fmt.Errorf("no connection to peer")
}
pn.Unlock()
// if many conns are found, how do we select? for now, randomly...
// this would be an interesting place to test logic that can measure
@ -329,15 +333,21 @@ func (pn *peernet) NewStream(pr inet.ProtocolID, p peer.ID) (inet.Stream, error)
n--
}
return c.NewStreamWithProtocol(pr)
return c.NewStream()
}
// SetHandler sets the protocol handler on the Network's Muxer.
// SetStreamHandler sets the new stream handler on the Network.
// This operation is threadsafe.
func (pn *peernet) SetHandler(p inet.ProtocolID, h inet.StreamHandler) {
pn.mux.SetHandler(p, h)
func (pn *peernet) SetStreamHandler(h inet.StreamHandler) {
pn.Lock()
pn.streamHandler = h
pn.Unlock()
}
func (pn *peernet) IdentifyProtocol() *inet.IDService {
return pn.ids
// SetConnHandler sets the new conn handler on the Network.
// This operation is threadsafe.
func (pn *peernet) SetConnHandler(h inet.ConnHandler) {
pn.Lock()
pn.connHandler = h
pn.Unlock()
}

View File

@ -4,8 +4,8 @@ import (
"fmt"
"io"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
)
// separate object so our interfaces are separate :)

View File

@ -3,7 +3,7 @@ package mocknet
import (
"io"
inet "github.com/jbenet/go-ipfs/net"
inet "github.com/jbenet/go-ipfs/p2p/net"
)
// stream implements inet.Stream

View File

@ -7,8 +7,9 @@ import (
"sync"
"testing"
inet "github.com/jbenet/go-ipfs/net"
peer "github.com/jbenet/go-ipfs/peer"
inet "github.com/jbenet/go-ipfs/p2p/net"
peer "github.com/jbenet/go-ipfs/p2p/peer"
protocol "github.com/jbenet/go-ipfs/p2p/protocol"
testutil "github.com/jbenet/go-ipfs/util/testutil"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
@ -46,31 +47,44 @@ func TestNetworkSetup(t *testing.T) {
a2 := testutil.RandLocalTCPAddress()
a3 := testutil.RandLocalTCPAddress()
n1, err := mn.AddPeer(sk1, a1)
h1, err := mn.AddPeer(sk1, a1)
if err != nil {
t.Fatal(err)
}
p1 := n1.LocalPeer()
p1 := h1.ID()
n2, err := mn.AddPeer(sk2, a2)
h2, err := mn.AddPeer(sk2, a2)
if err != nil {
t.Fatal(err)
}
p2 := n2.LocalPeer()
p2 := h2.ID()
n3, err := mn.AddPeer(sk3, a3)
h3, err := mn.AddPeer(sk3, a3)
if err != nil {
t.Fatal(err)
}
p3 := n3.LocalPeer()
p3 := h3.ID()
// check peers and net
if mn.Host(p1) != h1 {
t.Error("host for p1.ID != h1")
}
if mn.Host(p2) != h2 {
t.Error("host for p2.ID != h2")
}
if mn.Host(p3) != h3 {
t.Error("host for p3.ID != h3")
}
n1 := h1.Network()
if mn.Net(p1) != n1 {
t.Error("net for p1.ID != n1")
}
n2 := h2.Network()
if mn.Net(p2) != n2 {
t.Error("net for p2.ID != n1")
}
n3 := h3.Network()
if mn.Net(p3) != n3 {
t.Error("net for p3.ID != n1")
}
@ -177,7 +191,7 @@ func TestNetworkSetup(t *testing.T) {
}
// connect p2->p3
if err := n2.DialPeer(ctx, p3); err != nil {
if _, err := n2.DialPeer(ctx, p3); err != nil {
t.Error(err)
}
@ -191,41 +205,41 @@ func TestNetworkSetup(t *testing.T) {
// p.NetworkConns(n3)
// can create a stream 2->3, 3->2,
if _, err := n2.NewStream(inet.ProtocolDiag, p3); err != nil {
if _, err := n2.NewStream(p3); err != nil {
t.Error(err)
}
if _, err := n3.NewStream(inet.ProtocolDiag, p2); err != nil {
if _, err := n3.NewStream(p2); err != nil {
t.Error(err)
}
// but not 1->2 nor 2->2 (not linked), nor 1->1 (not connected)
if _, err := n1.NewStream(inet.ProtocolDiag, p2); err == nil {
if _, err := n1.NewStream(p2); err == nil {
t.Error("should not be able to connect")
}
if _, err := n2.NewStream(inet.ProtocolDiag, p2); err == nil {
if _, err := n2.NewStream(p2); err == nil {
t.Error("should not be able to connect")
}
if _, err := n1.NewStream(inet.ProtocolDiag, p1); err == nil {
if _, err := n1.NewStream(p1); err == nil {
t.Error("should not be able to connect")
}
// connect p1->p1 (should work)
if err := n1.DialPeer(ctx, p1); err != nil {
if _, err := n1.DialPeer(ctx, p1); err != nil {
t.Error("p1 should be able to dial self.", err)
}
// and a stream too
if _, err := n1.NewStream(inet.ProtocolDiag, p1); err != nil {
if _, err := n1.NewStream(p1); err != nil {
t.Error(err)
}
// connect p1->p2
if err := n1.DialPeer(ctx, p2); err == nil {
if _, err := n1.DialPeer(ctx, p2); err == nil {
t.Error("p1 should not be able to dial p2, not connected...")
}
// connect p3->p1
if err := n3.DialPeer(ctx, p1); err == nil {
if _, err := n3.DialPeer(ctx, p1); err == nil {
t.Error("p3 should not be able to dial p1, not connected...")
}
@ -243,12 +257,12 @@ func TestNetworkSetup(t *testing.T) {
// should now be able to connect
// connect p1->p2
if err := n1.DialPeer(ctx, p2); err != nil {
if _, err := n1.DialPeer(ctx, p2); err != nil {
t.Error(err)
}
// and a stream should work now too :)
if _, err := n2.NewStream(inet.ProtocolDiag, p3); err != nil {
if _, err := n2.NewStream(p3); err != nil {
t.Error(err)
}
@ -262,27 +276,25 @@ func TestStreams(t *testing.T) {
}
handler := func(s inet.Stream) {
go func() {
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if !bytes.Equal(b, []byte("beep")) {
panic("bytes mismatch")
}
if _, err := s.Write([]byte("boop")); err != nil {
panic(err)
}
s.Close()
}()
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if !bytes.Equal(b, []byte("beep")) {
panic("bytes mismatch")
}
if _, err := s.Write([]byte("boop")); err != nil {
panic(err)
}
s.Close()
}
nets := mn.Nets()
for _, n := range nets {
n.SetHandler(inet.ProtocolDHT, handler)
hosts := mn.Hosts()
for _, h := range mn.Hosts() {
h.SetStreamHandler(protocol.TestingID, handler)
}
s, err := nets[0].NewStream(inet.ProtocolDHT, nets[1].LocalPeer())
s, err := hosts[0].NewStream(protocol.TestingID, hosts[1].ID())
if err != nil {
t.Fatal(err)
}
@ -352,17 +364,10 @@ func TestStreamsStress(t *testing.T) {
t.Fatal(err)
}
protos := []inet.ProtocolID{
inet.ProtocolDHT,
inet.ProtocolBitswap,
inet.ProtocolDiag,
}
nets := mn.Nets()
for _, n := range nets {
for _, p := range protos {
n.SetHandler(p, makePonger(string(p)))
}
hosts := mn.Hosts()
for _, h := range hosts {
ponger := makePonger(string(protocol.TestingID))
h.SetStreamHandler(protocol.TestingID, ponger)
}
var wg sync.WaitGroup
@ -370,18 +375,16 @@ func TestStreamsStress(t *testing.T) {
wg.Add(1)
go func(i int) {
defer wg.Done()
from := rand.Intn(len(nets))
to := rand.Intn(len(nets))
p := rand.Intn(3)
proto := protos[p]
s, err := nets[from].NewStream(protos[p], nets[to].LocalPeer())
from := rand.Intn(len(hosts))
to := rand.Intn(len(hosts))
s, err := hosts[from].NewStream(protocol.TestingID, hosts[to].ID())
if err != nil {
log.Debugf("%d (%s) %d (%s) %d (%s)", from, nets[from], to, nets[to], p, protos[p])
log.Debugf("%d (%s) %d (%s)", from, hosts[from], to, hosts[to])
panic(err)
}
log.Infof("%d start pinging", i)
makePinger(string(proto), rand.Intn(100))(s)
makePinger("pingpong", rand.Intn(100))(s)
log.Infof("%d done pinging", i)
}(i)
}
@ -401,12 +404,12 @@ func TestAdding(t *testing.T) {
}
a := testutil.RandLocalTCPAddress()
n, err := mn.AddPeer(sk, a)
h, err := mn.AddPeer(sk, a)
if err != nil {
t.Fatal(err)
}
peers = append(peers, n.LocalPeer())
peers = append(peers, h.ID())
}
p1 := peers[0]
@ -422,40 +425,38 @@ func TestAdding(t *testing.T) {
}
// set the new stream handler on p2
n2 := mn.Net(p2)
if n2 == nil {
t.Fatalf("no network for %s", p2)
h2 := mn.Host(p2)
if h2 == nil {
t.Fatalf("no host for %s", p2)
}
n2.SetHandler(inet.ProtocolBitswap, func(s inet.Stream) {
go func() {
defer s.Close()
h2.SetStreamHandler(protocol.TestingID, func(s inet.Stream) {
defer s.Close()
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if string(b) != "beep" {
panic("did not beep!")
}
b := make([]byte, 4)
if _, err := io.ReadFull(s, b); err != nil {
panic(err)
}
if string(b) != "beep" {
panic("did not beep!")
}
if _, err := s.Write([]byte("boop")); err != nil {
panic(err)
}
}()
if _, err := s.Write([]byte("boop")); err != nil {
panic(err)
}
})
// connect p1 to p2
if err := mn.ConnectPeers(p1, p2); err != nil {
if _, err := mn.ConnectPeers(p1, p2); err != nil {
t.Fatal(err)
}
// talk to p2
n1 := mn.Net(p1)
if n1 == nil {
h1 := mn.Host(p1)
if h1 == nil {
t.Fatalf("no network for %s", p1)
}
s, err := n1.NewStream(inet.ProtocolBitswap, p2)
s, err := h1.NewStream(protocol.TestingID, p2)
if err != nil {
t.Fatal(err)
}

View File

@ -1,7 +1,7 @@
package swarm
import (
conn "github.com/jbenet/go-ipfs/net/conn"
conn "github.com/jbenet/go-ipfs/p2p/net/conn"
eventlog "github.com/jbenet/go-ipfs/util/eventlog"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"

Some files were not shown because too many files have changed in this diff Show More