mirror of
https://github.com/ipfs/kubo.git
synced 2025-09-09 17:22:21 +08:00

this is a major refactor of the entire codebase it changes the monolithic peer.Peer into using a peer.ID and a peer.Peerstore. Other changes: - removed handshake3. - testutil vastly simplified peer - secio bugfix + debugging logs - testutil: RandKeyPair - backpressure bugfix: w.o.w. - peer: added hex enc/dec - peer: added a PeerInfo struct PeerInfo is a small struct used to pass around a peer with a set of addresses and keys. This is not meant to be a complete view of the system, but rather to model updates to the peerstore. It is used by things like the routing system. - updated peer/queue + peerset - latency metrics - testutil: use crand for PeerID gen RandPeerID generates random "valid" peer IDs. it does not NEED to generate keys because it is as if we lost the key right away. fine to read some randomness and hash it. to generate proper keys and an ID, use: sk, pk, _ := testutil.RandKeyPair() id, _ := peer.IDFromPublicKey(pk) Also added RandPeerIDFatal helper - removed old spipe - updated seccat - core: cleanup initIdentity - removed old getFromPeerList
191 lines
4.1 KiB
Go
191 lines
4.1 KiB
Go
package kbucket
|
|
|
|
import (
|
|
"math/rand"
|
|
"testing"
|
|
"time"
|
|
|
|
tu "github.com/jbenet/go-ipfs/util/testutil"
|
|
|
|
peer "github.com/jbenet/go-ipfs/peer"
|
|
)
|
|
|
|
// Test basic features of the bucket struct
|
|
func TestBucket(t *testing.T) {
|
|
b := newBucket()
|
|
|
|
peers := make([]peer.ID, 100)
|
|
for i := 0; i < 100; i++ {
|
|
peers[i] = tu.RandPeerIDFatal(t)
|
|
b.pushFront(peers[i])
|
|
}
|
|
|
|
local := tu.RandPeerIDFatal(t)
|
|
localID := ConvertPeerID(local)
|
|
|
|
i := rand.Intn(len(peers))
|
|
e := b.find(peers[i])
|
|
if e == nil {
|
|
t.Errorf("Failed to find peer: %v", peers[i])
|
|
}
|
|
|
|
spl := b.Split(0, ConvertPeerID(local))
|
|
llist := b.list
|
|
for e := llist.Front(); e != nil; e = e.Next() {
|
|
p := ConvertPeerID(e.Value.(peer.ID))
|
|
cpl := commonPrefixLen(p, localID)
|
|
if cpl > 0 {
|
|
t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket")
|
|
}
|
|
}
|
|
|
|
rlist := spl.list
|
|
for e := rlist.Front(); e != nil; e = e.Next() {
|
|
p := ConvertPeerID(e.Value.(peer.ID))
|
|
cpl := commonPrefixLen(p, localID)
|
|
if cpl == 0 {
|
|
t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket")
|
|
}
|
|
}
|
|
}
|
|
|
|
// Right now, this just makes sure that it doesnt hang or crash
|
|
func TestTableUpdate(t *testing.T) {
|
|
local := tu.RandPeerIDFatal(t)
|
|
m := peer.NewMetrics()
|
|
rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m)
|
|
|
|
peers := make([]peer.ID, 100)
|
|
for i := 0; i < 100; i++ {
|
|
peers[i] = tu.RandPeerIDFatal(t)
|
|
}
|
|
|
|
// Testing Update
|
|
for i := 0; i < 10000; i++ {
|
|
p := rt.Update(peers[rand.Intn(len(peers))])
|
|
if p != "" {
|
|
//t.Log("evicted peer.")
|
|
}
|
|
}
|
|
|
|
for i := 0; i < 100; i++ {
|
|
id := ConvertPeerID(tu.RandPeerIDFatal(t))
|
|
ret := rt.NearestPeers(id, 5)
|
|
if len(ret) == 0 {
|
|
t.Fatal("Failed to find node near ID.")
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestTableFind(t *testing.T) {
|
|
local := tu.RandPeerIDFatal(t)
|
|
m := peer.NewMetrics()
|
|
rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m)
|
|
|
|
peers := make([]peer.ID, 100)
|
|
for i := 0; i < 5; i++ {
|
|
peers[i] = tu.RandPeerIDFatal(t)
|
|
rt.Update(peers[i])
|
|
}
|
|
|
|
t.Logf("Searching for peer: '%s'", peers[2])
|
|
found := rt.NearestPeer(ConvertPeerID(peers[2]))
|
|
if !(found == peers[2]) {
|
|
t.Fatalf("Failed to lookup known node...")
|
|
}
|
|
}
|
|
|
|
func TestTableFindMultiple(t *testing.T) {
|
|
local := tu.RandPeerIDFatal(t)
|
|
m := peer.NewMetrics()
|
|
rt := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m)
|
|
|
|
peers := make([]peer.ID, 100)
|
|
for i := 0; i < 18; i++ {
|
|
peers[i] = tu.RandPeerIDFatal(t)
|
|
rt.Update(peers[i])
|
|
}
|
|
|
|
t.Logf("Searching for peer: '%s'", peers[2])
|
|
found := rt.NearestPeers(ConvertPeerID(peers[2]), 15)
|
|
if len(found) != 15 {
|
|
t.Fatalf("Got back different number of peers than we expected.")
|
|
}
|
|
}
|
|
|
|
// Looks for race conditions in table operations. For a more 'certain'
|
|
// test, increase the loop counter from 1000 to a much higher number
|
|
// and set GOMAXPROCS above 1
|
|
func TestTableMultithreaded(t *testing.T) {
|
|
local := peer.ID("localPeer")
|
|
m := peer.NewMetrics()
|
|
tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m)
|
|
var peers []peer.ID
|
|
for i := 0; i < 500; i++ {
|
|
peers = append(peers, tu.RandPeerIDFatal(t))
|
|
}
|
|
|
|
done := make(chan struct{})
|
|
go func() {
|
|
for i := 0; i < 1000; i++ {
|
|
n := rand.Intn(len(peers))
|
|
tab.Update(peers[n])
|
|
}
|
|
done <- struct{}{}
|
|
}()
|
|
|
|
go func() {
|
|
for i := 0; i < 1000; i++ {
|
|
n := rand.Intn(len(peers))
|
|
tab.Update(peers[n])
|
|
}
|
|
done <- struct{}{}
|
|
}()
|
|
|
|
go func() {
|
|
for i := 0; i < 1000; i++ {
|
|
n := rand.Intn(len(peers))
|
|
tab.Find(peers[n])
|
|
}
|
|
done <- struct{}{}
|
|
}()
|
|
<-done
|
|
<-done
|
|
<-done
|
|
}
|
|
|
|
func BenchmarkUpdates(b *testing.B) {
|
|
b.StopTimer()
|
|
local := ConvertKey("localKey")
|
|
m := peer.NewMetrics()
|
|
tab := NewRoutingTable(20, local, time.Hour, m)
|
|
|
|
var peers []peer.ID
|
|
for i := 0; i < b.N; i++ {
|
|
peers = append(peers, tu.RandPeerIDFatal(b))
|
|
}
|
|
|
|
b.StartTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
tab.Update(peers[i])
|
|
}
|
|
}
|
|
|
|
func BenchmarkFinds(b *testing.B) {
|
|
b.StopTimer()
|
|
local := ConvertKey("localKey")
|
|
m := peer.NewMetrics()
|
|
tab := NewRoutingTable(20, local, time.Hour, m)
|
|
|
|
var peers []peer.ID
|
|
for i := 0; i < b.N; i++ {
|
|
peers = append(peers, tu.RandPeerIDFatal(b))
|
|
tab.Update(peers[i])
|
|
}
|
|
|
|
b.StartTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
tab.Find(peers[i])
|
|
}
|
|
}
|