1
0
mirror of https://github.com/ipfs/kubo.git synced 2025-06-29 01:12:24 +08:00

fixed data size reporting

This commit is contained in:
Jeromy
2014-10-05 01:22:01 +00:00
parent 16e42f82b0
commit a13baff33d
7 changed files with 69 additions and 41 deletions

View File

@ -18,7 +18,7 @@ func randBytes(size int) []byte {
return b
}
func writeFile(t *testing.T, size int, path string) ([]byte, error) {
func writeFile(t *testing.T, size int, path string) []byte {
data := randBytes(size)
fi, err := os.Create(path)
if err != nil {
@ -39,7 +39,7 @@ func writeFile(t *testing.T, size int, path string) ([]byte, error) {
t.Fatal(err)
}
return data, nil
return data
}
func setupIpnsTest(t *testing.T, node *core.IpfsNode) (*core.IpfsNode, *fstest.Mount) {
@ -68,10 +68,7 @@ func TestIpnsBasicIO(t *testing.T) {
defer mnt.Close()
fname := mnt.Dir + "/local/testfile"
data, err := writeFile(t, 12345, fname)
if err != nil {
t.Fatal(err)
}
data := writeFile(t, 12345, fname)
rbuf, err := ioutil.ReadFile(fname)
if err != nil {
@ -87,10 +84,7 @@ func TestFilePersistence(t *testing.T) {
node, mnt := setupIpnsTest(t, nil)
fname := "/local/atestfile"
data, err := writeFile(t, 127, mnt.Dir+fname)
if err != nil {
t.Fatal(err)
}
data := writeFile(t, 127, mnt.Dir+fname)
// Wait for publish: TODO: make publish happen faster in tests
time.Sleep(time.Millisecond * 40)
@ -109,3 +103,20 @@ func TestFilePersistence(t *testing.T) {
t.Fatalf("File data changed between mounts! sizes differ: %d != %d", len(data), len(rbuf))
}
}
func TestFileSizeReporting(t *testing.T) {
_, mnt := setupIpnsTest(t, nil)
defer mnt.Close()
fname := mnt.Dir + "/local/sizecheck"
data := writeFile(t, 5555, fname)
finfo, err := os.Stat(fname)
if err != nil {
t.Fatal(err)
}
if finfo.Size() != int64(len(data)) {
t.Fatal("Read incorrect size from stat!")
}
}

View File

@ -219,10 +219,14 @@ func (s *Node) Attr() fuse.Attr {
case mdag.PBData_Directory:
return fuse.Attr{Mode: os.ModeDir | 0555}
case mdag.PBData_File, mdag.PBData_Raw:
size, _ := s.Nd.Size()
size, err := s.Nd.DataSize()
if err != nil {
log.Error("Error getting size of file: %s", err)
size = 0
}
return fuse.Attr{
Mode: 0666,
Size: uint64(size),
Size: size,
Blocks: uint64(len(s.Nd.Links)),
}
default:
@ -323,7 +327,6 @@ func (n *Node) Flush(req *fuse.FlushRequest, intr fs.Intr) fuse.Error {
// folder, bad things would happen.
buf := bytes.NewReader(n.writerBuf.Bytes())
newNode, err := imp.NewDagFromReader(buf)
log.Debug("flush: new data = %v", newNode.Data)
if err != nil {
log.Critical("error creating dag from writerBuf: %s", err)
return err
@ -457,7 +460,7 @@ func (n *Node) Create(req *fuse.CreateRequest, resp *fuse.CreateResponse, intr f
log.Debug("Got create request: %s", req.Name)
// New 'empty' file
nd := &mdag.Node{Data: mdag.FilePBData(nil)}
nd := &mdag.Node{Data: mdag.FilePBData(nil, 0)}
child := n.makeChild(req.Name, nd)
nnode := n.Nd.Copy()

View File

@ -28,10 +28,12 @@ func NewDagFromReader(r io.Reader) (*dag.Node, error) {
func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, error) {
blkChan := spl.Split(r)
first := <-blkChan
root := &dag.Node{Data: dag.FilePBData(first)}
root := &dag.Node{}
i := 0
totalsize := uint64(len(first))
for blk := range blkChan {
totalsize += uint64(len(blk))
child := &dag.Node{Data: dag.WrapData(blk)}
err := root.AddNodeLink(fmt.Sprintf("%d", i), child)
if err != nil {
@ -40,6 +42,7 @@ func NewDagFromReaderWithSplitter(r io.Reader, spl BlockSplitter) (*dag.Node, er
i++
}
root.Data = dag.FilePBData(first, totalsize)
return root, nil
}
@ -62,24 +65,3 @@ func NewDagFromFile(fpath string) (*dag.Node, error) {
return NewDagFromReader(f)
}
// TODO: this needs a better name
func NewDagInNode(r io.Reader, n *dag.Node) error {
n.Links = nil
blkChan := DefaultSplitter.Split(r)
first := <-blkChan
n.Data = dag.FilePBData(first)
i := 0
for blk := range blkChan {
child := &dag.Node{Data: dag.WrapData(blk)}
err := n.AddNodeLink(fmt.Sprintf("%d", i), child)
if err != nil {
return err
}
i++
}
return nil
}

View File

@ -13,7 +13,7 @@ It has these top-level messages:
*/
package merkledag
import proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
import proto "code.google.com/p/goprotobuf/proto"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
@ -59,6 +59,7 @@ func (x *PBData_DataType) UnmarshalJSON(data []byte) error {
type PBData struct {
Type *PBData_DataType `protobuf:"varint,1,req,enum=merkledag.PBData_DataType" json:"Type,omitempty"`
Data []byte `protobuf:"bytes,2,opt" json:"Data,omitempty"`
Filesize *uint64 `protobuf:"varint,3,opt,name=filesize" json:"filesize,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
@ -80,6 +81,13 @@ func (m *PBData) GetData() []byte {
return nil
}
func (m *PBData) GetFilesize() uint64 {
if m != nil && m.Filesize != nil {
return *m.Filesize
}
return 0
}
func init() {
proto.RegisterEnum("merkledag.PBData_DataType", PBData_DataType_name, PBData_DataType_value)
}

View File

@ -9,4 +9,5 @@ message PBData {
required DataType Type = 1;
optional bytes Data = 2;
optional uint64 filesize = 3;
}

View File

@ -1,6 +1,7 @@
package merkledag
import (
"errors"
"fmt"
proto "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/goprotobuf/proto"
@ -107,6 +108,25 @@ func (n *Node) Size() (uint64, error) {
return s, nil
}
func (n *Node) DataSize() (uint64, error) {
pbdata := new(PBData)
err := proto.Unmarshal(n.Data, pbdata)
if err != nil {
return 0, err
}
switch pbdata.GetType() {
case PBData_Directory:
return 0, errors.New("Cant get data size of directory!")
case PBData_File:
return pbdata.GetFilesize(), nil
case PBData_Raw:
return uint64(len(pbdata.GetData())), nil
default:
return 0, errors.New("Unrecognized node data type!")
}
}
// Multihash hashes the encoded data of this node.
func (n *Node) Multihash() (mh.Multihash, error) {
b, err := n.Encoded(false)
@ -211,11 +231,12 @@ func (n *DAGService) Get(k u.Key) (*Node, error) {
return Decoded(b.Data)
}
func FilePBData(data []byte) []byte {
func FilePBData(data []byte, totalsize uint64) []byte {
pbfile := new(PBData)
typ := PBData_File
pbfile.Type = &typ
pbfile.Data = data
pbfile.Filesize = proto.Uint64(totalsize)
data, err := proto.Marshal(pbfile)
if err != nil {

View File

@ -122,9 +122,11 @@ func SetupLogging() {
*/
logging.SetFormatter(logging.MustStringFormatter(LogFormat))
for _, n := range loggers {
logging.SetLevel(logging.ERROR, n)
}
/*
for _, n := range loggers {
logging.SetLevel(logging.ERROR, n)
}
*/
}
// Logger retrieves a particular logger + initializes it at a particular level