package commands import ( "fmt" "io" "os" "strings" cmdenv "github.com/ipfs/go-ipfs/core/commands/cmdenv" coreiface "github.com/ipfs/go-ipfs/core/coreapi/interface" options "github.com/ipfs/go-ipfs/core/coreapi/interface/options" mh "gx/ipfs/QmPnFwZ2JXKnXgMw8CdBPxn7FWh6LLdjUjxV1fKHuJnkr8/go-multihash" pb "gx/ipfs/QmPtj12fdwuAqj9sBSTNUxBNu8kCGNp8b3o8yUzMm5GHpq/pb" cmds "gx/ipfs/QmRRovo1DE6i5cMjCbf19mQCSuszF6SKwdZNUMS7MtBnH1/go-ipfs-cmds" files "gx/ipfs/QmZMWMvWMVKCbHetJ4RgndbuEF1io2UpUxwQwtNjtYPzSC/go-ipfs-files" cmdkit "gx/ipfs/Qmde5VP1qUkyQXKCfmEUA7bP64V2HAptbJ7phuPp7jXWwg/go-ipfs-cmdkit" ) // ErrDepthLimitExceeded indicates that the max depth has been exceeded. var ErrDepthLimitExceeded = fmt.Errorf("depth limit exceeded") const ( quietOptionName = "quiet" quieterOptionName = "quieter" silentOptionName = "silent" progressOptionName = "progress" trickleOptionName = "trickle" wrapOptionName = "wrap-with-directory" stdinPathName = "stdin-name" hiddenOptionName = "hidden" onlyHashOptionName = "only-hash" chunkerOptionName = "chunker" pinOptionName = "pin" rawLeavesOptionName = "raw-leaves" noCopyOptionName = "nocopy" fstoreCacheOptionName = "fscache" cidVersionOptionName = "cid-version" hashOptionName = "hash" inlineOptionName = "inline" inlineLimitOptionName = "inline-limit" ) const adderOutChanSize = 8 var AddCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "Add a file or directory to ipfs.", ShortDescription: ` Adds contents of to ipfs. Use -r to add directories (recursively). `, LongDescription: ` Adds contents of to ipfs. Use -r to add directories. Note that directories are added recursively, to form the ipfs MerkleDAG. The wrap option, '-w', wraps the file (or files, if using the recursive option) in a directory. This directory contains only the files which have been added, and means that the file retains its filename. For example: > ipfs add example.jpg added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg > ipfs add example.jpg -w added QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH example.jpg added QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx You can now refer to the added file in a gateway, like so: /ipfs/QmaG4FuMqEBnQNn3C8XJ5bpW8kLs7zq2ZXgHptJHbKDDVx/example.jpg The chunker option, '-s', specifies the chunking strategy that dictates how to break files into blocks. Blocks with same content can be deduplicated. The default is a fixed block size of 256 * 1024 bytes, 'size-262144'. Alternatively, you can use the rabin chunker for content defined chunking by specifying rabin-[min]-[avg]-[max] (where min/avg/max refer to the resulting chunk sizes). Using other chunking strategies will produce different hashes for the same file. > ipfs add --chunker=size-2048 ipfs-logo.svg added QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 ipfs-logo.svg > ipfs add --chunker=rabin-512-1024-2048 ipfs-logo.svg added Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn ipfs-logo.svg You can now check what blocks have been created by: > ipfs object links QmafrLBfzRLV4XSH1XcaMMeaXEUhDJjmtDfsYU95TrWG87 QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 Qmf7ZQeSxq2fJVJbCmgTrLLVN9tDR9Wy5k75DxQKuz5Gyt 1195 > ipfs object links Qmf1hDN65tR55Ubh2RN1FPxr69xq3giVBz1KApsresY8Gn QmY6yj1GsermExDXoosVE3aSPxdMNYr6aKuw3nA8LoWPRS 2059 QmerURi9k4XzKCaaPbsK6BL5pMEjF7PGphjDvkkjDtsVf3 868 QmQB28iwSriSUSMqG2nXDTLtdPHgWb4rebBrU7Q1j4vxPv 338 `, }, Arguments: []cmdkit.Argument{ cmdkit.FileArg("path", true, true, "The path to a file to be added to ipfs.").EnableRecursive().EnableStdin(), }, Options: []cmdkit.Option{ cmds.OptionRecursivePath, // a builtin option that allows recursive paths (-r, --recursive) cmdkit.BoolOption(quietOptionName, "q", "Write minimal output."), cmdkit.BoolOption(quieterOptionName, "Q", "Write only final hash."), cmdkit.BoolOption(silentOptionName, "Write no output."), cmdkit.BoolOption(progressOptionName, "p", "Stream progress data."), cmdkit.BoolOption(trickleOptionName, "t", "Use trickle-dag format for dag generation."), cmdkit.BoolOption(onlyHashOptionName, "n", "Only chunk and hash - do not write to disk."), cmdkit.BoolOption(wrapOptionName, "w", "Wrap files with a directory object."), cmdkit.StringOption(stdinPathName, "Assign a name if the file source is stdin."), cmdkit.BoolOption(hiddenOptionName, "H", "Include files that are hidden. Only takes effect on recursive add."), cmdkit.StringOption(chunkerOptionName, "s", "Chunking algorithm, size-[bytes] or rabin-[min]-[avg]-[max]").WithDefault("size-262144"), cmdkit.BoolOption(pinOptionName, "Pin this object when adding.").WithDefault(true), cmdkit.BoolOption(rawLeavesOptionName, "Use raw blocks for leaf nodes. (experimental)"), cmdkit.BoolOption(noCopyOptionName, "Add the file using filestore. Implies raw-leaves. (experimental)"), cmdkit.BoolOption(fstoreCacheOptionName, "Check the filestore for pre-existing blocks. (experimental)"), cmdkit.IntOption(cidVersionOptionName, "CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. (experimental)"), cmdkit.StringOption(hashOptionName, "Hash function to use. Implies CIDv1 if not sha2-256. (experimental)").WithDefault("sha2-256"), cmdkit.BoolOption(inlineOptionName, "Inline small blocks into CIDs. (experimental)"), cmdkit.IntOption(inlineLimitOptionName, "Maximum block size to inline. (experimental)").WithDefault(32), }, PreRun: func(req *cmds.Request, env cmds.Environment) error { quiet, _ := req.Options[quietOptionName].(bool) quieter, _ := req.Options[quieterOptionName].(bool) quiet = quiet || quieter silent, _ := req.Options[silentOptionName].(bool) if quiet || silent { return nil } // ipfs cli progress bar defaults to true unless quiet or silent is used _, found := req.Options[progressOptionName].(bool) if !found { req.Options[progressOptionName] = true } return nil }, Run: func(req *cmds.Request, res cmds.ResponseEmitter, env cmds.Environment) error { api, err := cmdenv.GetApi(env) if err != nil { return err } progress, _ := req.Options[progressOptionName].(bool) trickle, _ := req.Options[trickleOptionName].(bool) wrap, _ := req.Options[wrapOptionName].(bool) hash, _ := req.Options[onlyHashOptionName].(bool) hidden, _ := req.Options[hiddenOptionName].(bool) silent, _ := req.Options[silentOptionName].(bool) chunker, _ := req.Options[chunkerOptionName].(string) dopin, _ := req.Options[pinOptionName].(bool) rawblks, rbset := req.Options[rawLeavesOptionName].(bool) nocopy, _ := req.Options[noCopyOptionName].(bool) fscache, _ := req.Options[fstoreCacheOptionName].(bool) cidVer, cidVerSet := req.Options[cidVersionOptionName].(int) hashFunStr, _ := req.Options[hashOptionName].(string) inline, _ := req.Options[inlineOptionName].(bool) inlineLimit, _ := req.Options[inlineLimitOptionName].(int) pathName, _ := req.Options[stdinPathName].(string) local, _ := req.Options["local"].(bool) hashFunCode, ok := mh.Names[strings.ToLower(hashFunStr)] if !ok { return fmt.Errorf("unrecognized hash function: %s", strings.ToLower(hashFunStr)) } events := make(chan interface{}, adderOutChanSize) opts := []options.UnixfsAddOption{ options.Unixfs.Hash(hashFunCode), options.Unixfs.Inline(inline), options.Unixfs.InlineLimit(inlineLimit), options.Unixfs.Chunker(chunker), options.Unixfs.Pin(dopin), options.Unixfs.HashOnly(hash), options.Unixfs.Local(local), options.Unixfs.FsCache(fscache), options.Unixfs.Nocopy(nocopy), options.Unixfs.Wrap(wrap), options.Unixfs.Hidden(hidden), options.Unixfs.StdinName(pathName), options.Unixfs.Progress(progress), options.Unixfs.Silent(silent), options.Unixfs.Events(events), } if cidVerSet { opts = append(opts, options.Unixfs.CidVersion(cidVer)) } if rbset { opts = append(opts, options.Unixfs.RawLeaves(rawblks)) } if trickle { opts = append(opts, options.Unixfs.Layout(options.TrickleLayout)) } errCh := make(chan error) go func() { var err error defer func() { errCh <- err }() defer close(events) _, err = api.Unixfs().Add(req.Context, req.Files, opts...) }() err = res.Emit(events) if err != nil { return err } return <-errCh }, PostRun: cmds.PostRunMap{ cmds.CLI: func(res cmds.Response, re cmds.ResponseEmitter) error { sizeChan := make(chan int64, 1) outChan := make(chan interface{}) req := res.Request() sizeFile, ok := req.Files.(files.SizeFile) if ok { // Could be slow. go func() { size, err := sizeFile.Size() if err != nil { log.Warningf("error getting files size: %s", err) // see comment above return } sizeChan <- size }() } else { // we don't need to error, the progress bar just // won't know how big the files are log.Warning("cannot determine size of input file") } progressBar := func(wait chan struct{}) { defer close(wait) quiet, _ := req.Options[quietOptionName].(bool) quieter, _ := req.Options[quieterOptionName].(bool) quiet = quiet || quieter progress, _ := req.Options[progressOptionName].(bool) var bar *pb.ProgressBar if progress { bar = pb.New64(0).SetUnits(pb.U_BYTES) bar.ManualUpdate = true bar.ShowTimeLeft = false bar.ShowPercent = false bar.Output = os.Stderr bar.Start() } lastFile := "" lastHash := "" var totalProgress, prevFiles, lastBytes int64 LOOP: for { select { case out, ok := <-outChan: if !ok { if quieter { fmt.Fprintln(os.Stdout, lastHash) } break LOOP } output := out.(*coreiface.AddEvent) if len(output.Hash) > 0 { lastHash = output.Hash if quieter { continue } if progress { // clear progress bar line before we print "added x" output fmt.Fprintf(os.Stderr, "\033[2K\r") } if quiet { fmt.Fprintf(os.Stdout, "%s\n", output.Hash) } else { fmt.Fprintf(os.Stdout, "added %s %s\n", output.Hash, output.Name) } } else { if !progress { continue } if len(lastFile) == 0 { lastFile = output.Name } if output.Name != lastFile || output.Bytes < lastBytes { prevFiles += lastBytes lastFile = output.Name } lastBytes = output.Bytes delta := prevFiles + lastBytes - totalProgress totalProgress = bar.Add64(delta) } if progress { bar.Update() } case size := <-sizeChan: if progress { bar.Total = size bar.ShowPercent = true bar.ShowBar = true bar.ShowTimeLeft = true } case <-req.Context.Done(): // don't set or print error here, that happens in the goroutine below return } } } if e := res.Error(); e != nil { close(outChan) return e } wait := make(chan struct{}) go progressBar(wait) defer func() { <-wait }() defer close(outChan) for { v, err := res.Next() if err != nil { if err == io.EOF { return nil } return err } select { case outChan <- v: case <-req.Context.Done(): return req.Context.Err() } } }, }, Type: coreiface.AddEvent{}, }