mirror of
https://github.com/ipfs/kubo.git
synced 2025-07-31 08:12:22 +08:00
refactor(unixfs) move proto to pb package
not internal since io needs it fix(fuse/ipns) use pb package fix(fuse) import protos from unixfs/pb package
This commit is contained in:
@ -17,6 +17,7 @@ import (
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
uio "github.com/jbenet/go-ipfs/unixfs/io"
|
||||
ftpb "github.com/jbenet/go-ipfs/unixfs/pb"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -206,11 +207,11 @@ type Node struct {
|
||||
Ipfs *core.IpfsNode
|
||||
Nd *mdag.Node
|
||||
dagMod *uio.DagModifier
|
||||
cached *ft.PBData
|
||||
cached *ftpb.PBData
|
||||
}
|
||||
|
||||
func (s *Node) loadData() error {
|
||||
s.cached = new(ft.PBData)
|
||||
s.cached = new(ftpb.PBData)
|
||||
return proto.Unmarshal(s.Nd.Data, s.cached)
|
||||
}
|
||||
|
||||
@ -223,9 +224,9 @@ func (s *Node) Attr() fuse.Attr {
|
||||
}
|
||||
}
|
||||
switch s.cached.GetType() {
|
||||
case ft.PBData_Directory:
|
||||
case ftpb.PBData_Directory:
|
||||
return fuse.Attr{Mode: os.ModeDir | 0555}
|
||||
case ft.PBData_File, ft.PBData_Raw:
|
||||
case ftpb.PBData_File, ftpb.PBData_Raw:
|
||||
size, err := ft.DataSize(s.Nd.Data)
|
||||
if err != nil {
|
||||
log.Error("Error getting size of file: %s", err)
|
||||
|
@ -20,8 +20,8 @@ import (
|
||||
|
||||
core "github.com/jbenet/go-ipfs/core"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
uio "github.com/jbenet/go-ipfs/unixfs/io"
|
||||
ftpb "github.com/jbenet/go-ipfs/unixfs/pb"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -81,11 +81,11 @@ type Node struct {
|
||||
Ipfs *core.IpfsNode
|
||||
Nd *mdag.Node
|
||||
fd *uio.DagReader
|
||||
cached *ft.PBData
|
||||
cached *ftpb.PBData
|
||||
}
|
||||
|
||||
func (s *Node) loadData() error {
|
||||
s.cached = new(ft.PBData)
|
||||
s.cached = new(ftpb.PBData)
|
||||
return proto.Unmarshal(s.Nd.Data, s.cached)
|
||||
}
|
||||
|
||||
@ -96,9 +96,9 @@ func (s *Node) Attr() fuse.Attr {
|
||||
s.loadData()
|
||||
}
|
||||
switch s.cached.GetType() {
|
||||
case ft.PBData_Directory:
|
||||
case ftpb.PBData_Directory:
|
||||
return fuse.Attr{Mode: os.ModeDir | 0555}
|
||||
case ft.PBData_File, ft.PBData_Raw:
|
||||
case ftpb.PBData_File, ftpb.PBData_Raw:
|
||||
size, _ := s.Nd.Size()
|
||||
return fuse.Attr{
|
||||
Mode: 0444,
|
||||
|
@ -5,15 +5,16 @@ package unixfs
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
pb "github.com/jbenet/go-ipfs/unixfs/pb"
|
||||
)
|
||||
|
||||
var ErrMalformedFileFormat = errors.New("malformed data in file format")
|
||||
var ErrInvalidDirLocation = errors.New("found directory node in unexpected place")
|
||||
var ErrUnrecognizedType = errors.New("unrecognized node type")
|
||||
|
||||
func FromBytes(data []byte) (*PBData, error) {
|
||||
pbdata := new(PBData)
|
||||
func FromBytes(data []byte) (*pb.PBData, error) {
|
||||
pbdata := new(pb.PBData)
|
||||
err := proto.Unmarshal(data, pbdata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -22,8 +23,8 @@ func FromBytes(data []byte) (*PBData, error) {
|
||||
}
|
||||
|
||||
func FilePBData(data []byte, totalsize uint64) []byte {
|
||||
pbfile := new(PBData)
|
||||
typ := PBData_File
|
||||
pbfile := new(pb.PBData)
|
||||
typ := pb.PBData_File
|
||||
pbfile.Type = &typ
|
||||
pbfile.Data = data
|
||||
pbfile.Filesize = proto.Uint64(totalsize)
|
||||
@ -42,8 +43,8 @@ func FilePBData(data []byte, totalsize uint64) []byte {
|
||||
|
||||
// Returns Bytes that represent a Directory
|
||||
func FolderPBData() []byte {
|
||||
pbfile := new(PBData)
|
||||
typ := PBData_Directory
|
||||
pbfile := new(pb.PBData)
|
||||
typ := pb.PBData_Directory
|
||||
pbfile.Type = &typ
|
||||
|
||||
data, err := proto.Marshal(pbfile)
|
||||
@ -55,8 +56,8 @@ func FolderPBData() []byte {
|
||||
}
|
||||
|
||||
func WrapData(b []byte) []byte {
|
||||
pbdata := new(PBData)
|
||||
typ := PBData_Raw
|
||||
pbdata := new(pb.PBData)
|
||||
typ := pb.PBData_Raw
|
||||
pbdata.Data = b
|
||||
pbdata.Type = &typ
|
||||
|
||||
@ -70,7 +71,7 @@ func WrapData(b []byte) []byte {
|
||||
}
|
||||
|
||||
func UnwrapData(data []byte) ([]byte, error) {
|
||||
pbdata := new(PBData)
|
||||
pbdata := new(pb.PBData)
|
||||
err := proto.Unmarshal(data, pbdata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -79,18 +80,18 @@ func UnwrapData(data []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func DataSize(data []byte) (uint64, error) {
|
||||
pbdata := new(PBData)
|
||||
pbdata := new(pb.PBData)
|
||||
err := proto.Unmarshal(data, pbdata)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch pbdata.GetType() {
|
||||
case PBData_Directory:
|
||||
case pb.PBData_Directory:
|
||||
return 0, errors.New("Cant get data size of directory!")
|
||||
case PBData_File:
|
||||
case pb.PBData_File:
|
||||
return pbdata.GetFilesize(), nil
|
||||
case PBData_Raw:
|
||||
case pb.PBData_Raw:
|
||||
return uint64(len(pbdata.GetData())), nil
|
||||
default:
|
||||
return 0, errors.New("Unrecognized node data type!")
|
||||
@ -109,8 +110,8 @@ func (mb *MultiBlock) AddBlockSize(s uint64) {
|
||||
}
|
||||
|
||||
func (mb *MultiBlock) GetBytes() ([]byte, error) {
|
||||
pbn := new(PBData)
|
||||
t := PBData_File
|
||||
pbn := new(pb.PBData)
|
||||
t := pb.PBData_File
|
||||
pbn.Type = &t
|
||||
pbn.Filesize = proto.Uint64(uint64(len(mb.Data)) + mb.subtotal)
|
||||
pbn.Blocksizes = mb.blocksizes
|
||||
|
@ -3,7 +3,8 @@ package unixfs
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
pb "github.com/jbenet/go-ipfs/unixfs/pb"
|
||||
)
|
||||
|
||||
func TestMultiBlock(t *testing.T) {
|
||||
@ -19,7 +20,7 @@ func TestMultiBlock(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pbn := new(PBData)
|
||||
pbn := new(pb.PBData)
|
||||
err = proto.Unmarshal(b, pbn)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -4,11 +4,12 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
|
||||
"github.com/jbenet/go-ipfs/importer/chunk"
|
||||
chunk "github.com/jbenet/go-ipfs/importer/chunk"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
ftpb "github.com/jbenet/go-ipfs/unixfs/pb"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -19,7 +20,7 @@ type DagModifier struct {
|
||||
dagserv *mdag.DAGService
|
||||
curNode *mdag.Node
|
||||
|
||||
pbdata *ft.PBData
|
||||
pbdata *ftpb.PBData
|
||||
splitter chunk.BlockSplitter
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
|
||||
mdag "github.com/jbenet/go-ipfs/merkledag"
|
||||
ft "github.com/jbenet/go-ipfs/unixfs"
|
||||
ftpb "github.com/jbenet/go-ipfs/unixfs/pb"
|
||||
u "github.com/jbenet/go-ipfs/util"
|
||||
)
|
||||
|
||||
@ -24,23 +25,23 @@ type DagReader struct {
|
||||
// NewDagReader creates a new reader object that reads the data represented by the given
|
||||
// node, using the passed in DAGService for data retreival
|
||||
func NewDagReader(n *mdag.Node, serv *mdag.DAGService) (io.Reader, error) {
|
||||
pb := new(ft.PBData)
|
||||
pb := new(ftpb.PBData)
|
||||
err := proto.Unmarshal(n.Data, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch pb.GetType() {
|
||||
case ft.PBData_Directory:
|
||||
case ftpb.PBData_Directory:
|
||||
// Dont allow reading directories
|
||||
return nil, ErrIsDir
|
||||
case ft.PBData_File:
|
||||
case ftpb.PBData_File:
|
||||
return &DagReader{
|
||||
node: n,
|
||||
serv: serv,
|
||||
buf: bytes.NewBuffer(pb.GetData()),
|
||||
}, nil
|
||||
case ft.PBData_Raw:
|
||||
case ftpb.PBData_Raw:
|
||||
// Raw block will just be a single level, return a byte buffer
|
||||
return bytes.NewBuffer(pb.GetData()), nil
|
||||
default:
|
||||
@ -63,7 +64,7 @@ func (dr *DagReader) precalcNextBuf() error {
|
||||
}
|
||||
nxt = nxtNode
|
||||
}
|
||||
pb := new(ft.PBData)
|
||||
pb := new(ftpb.PBData)
|
||||
err := proto.Unmarshal(nxt.Data, pb)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -71,13 +72,13 @@ func (dr *DagReader) precalcNextBuf() error {
|
||||
dr.position++
|
||||
|
||||
switch pb.GetType() {
|
||||
case ft.PBData_Directory:
|
||||
case ftpb.PBData_Directory:
|
||||
return ft.ErrInvalidDirLocation
|
||||
case ft.PBData_File:
|
||||
case ftpb.PBData_File:
|
||||
//TODO: this *should* work, needs testing first
|
||||
//return NewDagReader(nxt, dr.serv)
|
||||
panic("Not yet handling different layers of indirection!")
|
||||
case ft.PBData_Raw:
|
||||
case ftpb.PBData_Raw:
|
||||
dr.buf = bytes.NewBuffer(pb.GetData())
|
||||
return nil
|
||||
default:
|
||||
|
Reference in New Issue
Block a user