mirror of
https://github.com/ipfs/kubo.git
synced 2025-08-06 19:44:01 +08:00
Made routing code pass golint.
This commit is contained in:

committed by
Juan Batiz-Benet

parent
87bfdbc599
commit
a6851fa55b
@ -180,7 +180,7 @@ func TestProvides(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = dhts[3].PutLocal(u.Key("hello"), []byte("world"))
|
||||
err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -225,7 +225,7 @@ func TestLayeredGet(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = dhts[3].PutLocal(u.Key("hello"), []byte("world"))
|
||||
err = dhts[3].putLocal(u.Key("hello"), []byte("world"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func (dht *IpfsDHT) getDiagInfo() *diagInfo {
|
||||
di.LifeSpan = time.Since(dht.birth)
|
||||
di.Keys = nil // Currently no way to query datastore
|
||||
|
||||
for _, p := range dht.routingTables[0].Listpeers() {
|
||||
for _, p := range dht.routingTables[0].ListPeers() {
|
||||
di.Connections = append(di.Connections, connDiagInfo{p.GetLatency(), p.ID})
|
||||
}
|
||||
return di
|
||||
|
@ -105,9 +105,9 @@ func TestGetFailures(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp := DHTMessage{
|
||||
resp := Message{
|
||||
Type: pmes.GetType(),
|
||||
Id: pmes.GetId(),
|
||||
ID: pmes.GetId(),
|
||||
Response: true,
|
||||
Success: false,
|
||||
}
|
||||
@ -140,10 +140,10 @@ func TestGetFailures(t *testing.T) {
|
||||
})
|
||||
|
||||
// Now we test this DHT's handleGetValue failure
|
||||
req := DHTMessage{
|
||||
req := Message{
|
||||
Type: PBDHTMessage_GET_VALUE,
|
||||
Key: "hello",
|
||||
Id: GenerateMessageID(),
|
||||
ID: GenerateMessageID(),
|
||||
Value: []byte{0},
|
||||
}
|
||||
fn.Chan.Incoming <- swarm.NewMessage(other, req.ToProtobuf())
|
||||
|
@ -68,7 +68,7 @@ func (b *Bucket) Split(cpl int, target ID) *Bucket {
|
||||
newbuck.list = out
|
||||
e := b.list.Front()
|
||||
for e != nil {
|
||||
peerID := convertPeerID(e.Value.(*peer.Peer).ID)
|
||||
peerID := ConvertPeerID(e.Value.(*peer.Peer).ID)
|
||||
peerCPL := prefLen(peerID, target)
|
||||
if peerCPL > cpl {
|
||||
cur := e
|
||||
|
@ -28,7 +28,8 @@ type RoutingTable struct {
|
||||
bucketsize int
|
||||
}
|
||||
|
||||
func newRoutingTable(bucketsize int, localID ID, latency time.Duration) *RoutingTable {
|
||||
// NewRoutingTable creates a new routing table with a given bucketsize, local ID, and latency tolerance.
|
||||
func NewRoutingTable(bucketsize int, localID ID, latency time.Duration) *RoutingTable {
|
||||
rt := new(RoutingTable)
|
||||
rt.Buckets = []*Bucket{newBucket()}
|
||||
rt.bucketsize = bucketsize
|
||||
@ -42,7 +43,7 @@ func newRoutingTable(bucketsize int, localID ID, latency time.Duration) *Routing
|
||||
func (rt *RoutingTable) Update(p *peer.Peer) *peer.Peer {
|
||||
rt.tabLock.Lock()
|
||||
defer rt.tabLock.Unlock()
|
||||
peerID := convertPeerID(p.ID)
|
||||
peerID := ConvertPeerID(p.ID)
|
||||
cpl := xor(peerID, rt.local).commonPrefixLen()
|
||||
|
||||
bucketID := cpl
|
||||
@ -108,7 +109,7 @@ func (p peerSorterArr) Less(a, b int) bool {
|
||||
func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) peerSorterArr {
|
||||
for e := peerList.Front(); e != nil; e = e.Next() {
|
||||
p := e.Value.(*peer.Peer)
|
||||
pID := convertPeerID(p.ID)
|
||||
pID := ConvertPeerID(p.ID)
|
||||
pd := peerDistance{
|
||||
p: p,
|
||||
distance: xor(target, pID),
|
||||
@ -124,7 +125,7 @@ func copyPeersFromList(target ID, peerArr peerSorterArr, peerList *list.List) pe
|
||||
|
||||
// Find a specific peer by ID or return nil
|
||||
func (rt *RoutingTable) Find(id peer.ID) *peer.Peer {
|
||||
srch := rt.NearestPeers(convertPeerID(id), 1)
|
||||
srch := rt.NearestPeers(ConvertPeerID(id), 1)
|
||||
if len(srch) == 0 || !srch[0].ID.Equal(id) {
|
||||
return nil
|
||||
}
|
||||
@ -190,8 +191,9 @@ func (rt *RoutingTable) Size() int {
|
||||
return tot
|
||||
}
|
||||
|
||||
// ListPeers takes a RoutingTable and returns a list of all peers from all buckets in the table.
|
||||
// NOTE: This is potentially unsafe... use at your own risk
|
||||
func (rt *RoutingTable) listPeers() []*peer.Peer {
|
||||
func (rt *RoutingTable) ListPeers() []*peer.Peer {
|
||||
var peers []*peer.Peer
|
||||
for _, buck := range rt.Buckets {
|
||||
for e := buck.getIter(); e != nil; e = e.Next() {
|
||||
@ -201,10 +203,11 @@ func (rt *RoutingTable) listPeers() []*peer.Peer {
|
||||
return peers
|
||||
}
|
||||
|
||||
func (rt *RoutingTable) print() {
|
||||
// Print prints a descriptive statement about the provided RoutingTable
|
||||
func (rt *RoutingTable) Print() {
|
||||
fmt.Printf("Routing Table, bs = %d, Max latency = %d\n", rt.bucketsize, rt.maxLatency)
|
||||
rt.tabLock.RLock()
|
||||
peers := rt.listPeers()
|
||||
peers := rt.ListPeers()
|
||||
for i, p := range peers {
|
||||
fmt.Printf("%d) %s %s\n", i, p.ID.Pretty(), p.GetLatency().String())
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func TestBucket(t *testing.T) {
|
||||
}
|
||||
|
||||
local := _randPeer()
|
||||
localID := convertPeerID(local.ID)
|
||||
localID := ConvertPeerID(local.ID)
|
||||
|
||||
i := rand.Intn(len(peers))
|
||||
e := b.find(peers[i].ID)
|
||||
@ -44,10 +44,10 @@ func TestBucket(t *testing.T) {
|
||||
t.Errorf("Failed to find peer: %v", peers[i])
|
||||
}
|
||||
|
||||
spl := b.Split(0, convertPeerID(local.ID))
|
||||
spl := b.Split(0, ConvertPeerID(local.ID))
|
||||
llist := b.list
|
||||
for e := llist.Front(); e != nil; e = e.Next() {
|
||||
p := convertPeerID(e.Value.(*peer.Peer).ID)
|
||||
p := ConvertPeerID(e.Value.(*peer.Peer).ID)
|
||||
cpl := xor(p, localID).commonPrefixLen()
|
||||
if cpl > 0 {
|
||||
t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket")
|
||||
@ -56,7 +56,7 @@ func TestBucket(t *testing.T) {
|
||||
|
||||
rlist := spl.list
|
||||
for e := rlist.Front(); e != nil; e = e.Next() {
|
||||
p := convertPeerID(e.Value.(*peer.Peer).ID)
|
||||
p := ConvertPeerID(e.Value.(*peer.Peer).ID)
|
||||
cpl := xor(p, localID).commonPrefixLen()
|
||||
if cpl == 0 {
|
||||
t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket")
|
||||
@ -67,7 +67,7 @@ func TestBucket(t *testing.T) {
|
||||
// Right now, this just makes sure that it doesnt hang or crash
|
||||
func TestTableUpdate(t *testing.T) {
|
||||
local := _randPeer()
|
||||
rt := newRoutingTable(10, convertPeerID(local.ID), time.Hour)
|
||||
rt := NewRoutingTable(10, ConvertPeerID(local.ID), time.Hour)
|
||||
|
||||
peers := make([]*peer.Peer, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
@ -93,7 +93,7 @@ func TestTableUpdate(t *testing.T) {
|
||||
|
||||
func TestTableFind(t *testing.T) {
|
||||
local := _randPeer()
|
||||
rt := newRoutingTable(10, convertPeerID(local.ID), time.Hour)
|
||||
rt := NewRoutingTable(10, ConvertPeerID(local.ID), time.Hour)
|
||||
|
||||
peers := make([]*peer.Peer, 100)
|
||||
for i := 0; i < 5; i++ {
|
||||
@ -102,7 +102,7 @@ func TestTableFind(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Logf("Searching for peer: '%s'", peers[2].ID.Pretty())
|
||||
found := rt.NearestPeer(convertPeerID(peers[2].ID))
|
||||
found := rt.NearestPeer(ConvertPeerID(peers[2].ID))
|
||||
if !found.ID.Equal(peers[2].ID) {
|
||||
t.Fatalf("Failed to lookup known node...")
|
||||
}
|
||||
@ -110,7 +110,7 @@ func TestTableFind(t *testing.T) {
|
||||
|
||||
func TestTableFindMultiple(t *testing.T) {
|
||||
local := _randPeer()
|
||||
rt := newRoutingTable(20, convertPeerID(local.ID), time.Hour)
|
||||
rt := NewRoutingTable(20, ConvertPeerID(local.ID), time.Hour)
|
||||
|
||||
peers := make([]*peer.Peer, 100)
|
||||
for i := 0; i < 18; i++ {
|
||||
@ -119,7 +119,7 @@ func TestTableFindMultiple(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Logf("Searching for peer: '%s'", peers[2].ID.Pretty())
|
||||
found := rt.NearestPeers(convertPeerID(peers[2].ID), 15)
|
||||
found := rt.NearestPeers(ConvertPeerID(peers[2].ID), 15)
|
||||
if len(found) != 15 {
|
||||
t.Fatalf("Got back different number of peers than we expected.")
|
||||
}
|
||||
@ -130,7 +130,7 @@ func TestTableFindMultiple(t *testing.T) {
|
||||
// and set GOMAXPROCS above 1
|
||||
func TestTableMultithreaded(t *testing.T) {
|
||||
local := peer.ID("localPeer")
|
||||
tab := newRoutingTable(20, convertPeerID(local), time.Hour)
|
||||
tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour)
|
||||
var peers []*peer.Peer
|
||||
for i := 0; i < 500; i++ {
|
||||
peers = append(peers, _randPeer())
|
||||
@ -167,8 +167,8 @@ func TestTableMultithreaded(t *testing.T) {
|
||||
|
||||
func BenchmarkUpdates(b *testing.B) {
|
||||
b.StopTimer()
|
||||
local := convertKey("localKey")
|
||||
tab := newRoutingTable(20, local, time.Hour)
|
||||
local := ConvertKey("localKey")
|
||||
tab := NewRoutingTable(20, local, time.Hour)
|
||||
|
||||
var peers []*peer.Peer
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -183,8 +183,8 @@ func BenchmarkUpdates(b *testing.B) {
|
||||
|
||||
func BenchmarkFinds(b *testing.B) {
|
||||
b.StopTimer()
|
||||
local := convertKey("localKey")
|
||||
tab := newRoutingTable(20, local, time.Hour)
|
||||
local := ConvertKey("localKey")
|
||||
tab := NewRoutingTable(20, local, time.Hour)
|
||||
|
||||
var peers []*peer.Peer
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -76,21 +76,23 @@ func equalizeSizes(a, b ID) (ID, ID) {
|
||||
return a, b
|
||||
}
|
||||
|
||||
func convertPeerID(id peer.ID) ID {
|
||||
// ConvertPeerID creates a DHT ID by hashing a Peer ID (Multihash)
|
||||
func ConvertPeerID(id peer.ID) ID {
|
||||
hash := sha256.Sum256(id)
|
||||
return hash[:]
|
||||
}
|
||||
|
||||
func convertKey(id u.Key) ID {
|
||||
// ConvertKey creates a DHT ID by hashing a local key (String)
|
||||
func ConvertKey(id u.Key) ID {
|
||||
hash := sha256.Sum256([]byte(id))
|
||||
return hash[:]
|
||||
}
|
||||
|
||||
// Closer returns true if a is closer to key than b is
|
||||
func Closer(a, b peer.ID, key u.Key) bool {
|
||||
aid := convertPeerID(a)
|
||||
bid := convertPeerID(b)
|
||||
tgt := convertKey(key)
|
||||
aid := ConvertPeerID(a)
|
||||
bid := ConvertPeerID(b)
|
||||
tgt := ConvertKey(key)
|
||||
adist := xor(aid, tgt)
|
||||
bdist := xor(bid, tgt)
|
||||
|
||||
|
Reference in New Issue
Block a user