diff --git a/blocks/blocks.go b/blocks/blocks.go
index d4a385f2961..4d5b64422fd 100644
--- a/blocks/blocks.go
+++ b/blocks/blocks.go
@@ -14,7 +14,6 @@ import (
var ErrWrongHash = errors.New("data did not match given hash!")
type Block interface {
- Multihash() mh.Multihash
RawData() []byte
Cid() *cid.Cid
String() string
diff --git a/blockservice/test/blocks_test.go b/blockservice/test/blocks_test.go
index bda0427b030..956420da283 100644
--- a/blockservice/test/blocks_test.go
+++ b/blockservice/test/blocks_test.go
@@ -18,18 +18,8 @@ import (
dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
-func newObject(data []byte) *testObject {
- return &testObject{
- Block: blocks.NewBlock(data),
- }
-}
-
-type testObject struct {
- blocks.Block
-}
-
-func (o *testObject) Cid() *cid.Cid {
- return cid.NewCidV0(o.Block.Multihash())
+func newObject(data []byte) blocks.Block {
+ return blocks.NewBlock(data)
}
func TestBlocks(t *testing.T) {
@@ -38,12 +28,8 @@ func TestBlocks(t *testing.T) {
defer bs.Close()
o := newObject([]byte("beep boop"))
- h := u.Hash([]byte("beep boop"))
- if !bytes.Equal(o.Multihash(), h) {
- t.Error("Block Multihash and data multihash not equal")
- }
-
- if !o.Cid().Equals(cid.NewCidV0(h)) {
+ h := cid.NewCidV0(u.Hash([]byte("beep boop")))
+ if !o.Cid().Equals(h) {
t.Error("Block key and data multihash key not equal")
}
@@ -74,8 +60,8 @@ func TestBlocks(t *testing.T) {
}
}
-func makeObjects(n int) []*testObject {
- var out []*testObject
+func makeObjects(n int) []blocks.Block {
+ var out []blocks.Block
for i := 0; i < n; i++ {
out = append(out, newObject([]byte(fmt.Sprintf("object %d", i))))
}
diff --git a/core/commands/files/files.go b/core/commands/files/files.go
index 4809f3d7d46..eff6b96e649 100644
--- a/core/commands/files/files.go
+++ b/core/commands/files/files.go
@@ -182,7 +182,7 @@ func statNode(ds dag.DAGService, fsn mfs.FSNode) (*Object, error) {
return &Object{
Hash: c.String(),
- Blocks: len(nd.Links),
+ Blocks: len(nd.Links()),
Size: d.GetFilesize(),
CumulativeSize: cumulsize,
Type: ndtype,
@@ -245,7 +245,7 @@ var FilesCpCmd = &cmds.Command{
},
}
-func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.Node, error) {
+func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.ProtoNode, error) {
switch {
case strings.HasPrefix(p, "/ipfs/"):
np, err := path.ParsePath(p)
@@ -253,7 +253,17 @@ func getNodeFromPath(ctx context.Context, node *core.IpfsNode, p string) (*dag.N
return nil, err
}
- return core.Resolve(ctx, node, np)
+ nd, err := core.Resolve(ctx, node, np)
+ if err != nil {
+ return nil, err
+ }
+
+ pbnd, ok := nd.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ return pbnd, nil
default:
fsn, err := mfs.Lookup(node.FilesRoot, p)
if err != nil {
diff --git a/core/commands/get.go b/core/commands/get.go
index 784f4ced759..c299336ef9c 100644
--- a/core/commands/get.go
+++ b/core/commands/get.go
@@ -13,6 +13,7 @@ import (
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
+ dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
tar "github.com/ipfs/go-ipfs/thirdparty/tar"
uarchive "github.com/ipfs/go-ipfs/unixfs/archive"
@@ -69,6 +70,12 @@ may also specify the level of compression by specifying '-l=<1-9>'.
return
}
+ pbnd, ok := dn.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(err, cmds.ErrNormal)
+ return
+ }
+
size, err := dn.Size()
if err != nil {
res.SetError(err, cmds.ErrNormal)
@@ -78,7 +85,7 @@ may also specify the level of compression by specifying '-l=<1-9>'.
res.SetLength(size)
archive, _, _ := req.Option("archive").Bool()
- reader, err := uarchive.DagArchive(ctx, dn, p.String(), node.DAG, archive, cmplvl)
+ reader, err := uarchive.DagArchive(ctx, pbnd, p.String(), node.DAG, archive, cmplvl)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
diff --git a/core/commands/ls.go b/core/commands/ls.go
index 47c470ab0e4..e2546699fa7 100644
--- a/core/commands/ls.go
+++ b/core/commands/ls.go
@@ -13,7 +13,7 @@ import (
unixfs "github.com/ipfs/go-ipfs/unixfs"
unixfspb "github.com/ipfs/go-ipfs/unixfs/pb"
- cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
type LsLink struct {
@@ -52,7 +52,7 @@ The JSON output contains type information.
cmds.BoolOption("resolve-type", "Resolve linked objects to find out their types.").Default(true),
},
Run: func(req cmds.Request, res cmds.Response) {
- node, err := req.InvocContext().GetNode()
+ nd, err := req.InvocContext().GetNode()
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -72,9 +72,9 @@ The JSON output contains type information.
paths := req.Arguments()
- var dagnodes []*merkledag.Node
+ var dagnodes []node.Node
for _, fpath := range paths {
- dagnode, err := core.Resolve(req.Context(), node, path.Path(fpath))
+ dagnode, err := core.Resolve(req.Context(), nd, path.Path(fpath))
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -86,14 +86,14 @@ The JSON output contains type information.
for i, dagnode := range dagnodes {
output[i] = LsObject{
Hash: paths[i],
- Links: make([]LsLink, len(dagnode.Links)),
+ Links: make([]LsLink, len(dagnode.Links())),
}
- for j, link := range dagnode.Links {
- var linkNode *merkledag.Node
+ for j, link := range dagnode.Links() {
+ var linkNode *merkledag.ProtoNode
t := unixfspb.Data_DataType(-1)
- linkKey := cid.NewCidV0(link.Hash)
- if ok, err := node.Blockstore.Has(linkKey); ok && err == nil {
- b, err := node.Blockstore.Get(linkKey)
+ linkKey := link.Cid
+ if ok, err := nd.Blockstore.Has(linkKey); ok && err == nil {
+ b, err := nd.Blockstore.Get(linkKey)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -106,11 +106,19 @@ The JSON output contains type information.
}
if linkNode == nil && resolve {
- linkNode, err = link.GetNode(req.Context(), node.DAG)
+ nd, err := link.GetNode(req.Context(), nd.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
+
+ pbnd, ok := nd.(*merkledag.ProtoNode)
+ if !ok {
+ res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ linkNode = pbnd
}
if linkNode != nil {
d, err := unixfs.FromBytes(linkNode.Data())
@@ -123,7 +131,7 @@ The JSON output contains type information.
}
output[i].Links[j] = LsLink{
Name: link.Name,
- Hash: link.Hash.B58String(),
+ Hash: link.Cid.String(),
Size: link.Size,
Type: t,
}
diff --git a/core/commands/object/diff.go b/core/commands/object/diff.go
index 1e62c0528ae..69697ef3edd 100644
--- a/core/commands/object/diff.go
+++ b/core/commands/object/diff.go
@@ -7,6 +7,7 @@ import (
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
+ dag "github.com/ipfs/go-ipfs/merkledag"
dagutils "github.com/ipfs/go-ipfs/merkledag/utils"
path "github.com/ipfs/go-ipfs/path"
)
@@ -85,7 +86,19 @@ Example:
return
}
- changes, err := dagutils.Diff(ctx, node.DAG, obj_a, obj_b)
+ pbobj_a, ok := obj_a.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ pbobj_b, ok := obj_b.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ changes, err := dagutils.Diff(ctx, node.DAG, pbobj_a, pbobj_b)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
diff --git a/core/commands/object/object.go b/core/commands/object/object.go
index 47e5f1d4d33..6f2f2d29122 100644
--- a/core/commands/object/object.go
+++ b/core/commands/object/object.go
@@ -12,13 +12,14 @@ import (
"strings"
"text/tabwriter"
- mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
-
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
ft "github.com/ipfs/go-ipfs/unixfs"
+
+ cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
// ErrObjectTooLarge is returned when too much data was read from stdin. current limit 2m
@@ -98,7 +99,14 @@ is the raw data of the object.
res.SetError(err, cmds.ErrNormal)
return
}
- res.SetOutput(bytes.NewReader(node.Data()))
+
+ pbnode, ok := node.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ res.SetOutput(bytes.NewReader(pbnode.Data()))
},
}
@@ -137,6 +145,7 @@ multihash.
res.SetError(err, cmds.ErrNormal)
return
}
+
output, err := getOutput(node)
if err != nil {
res.SetError(err, cmds.ErrNormal)
@@ -201,14 +210,20 @@ This command outputs data in the following encodings:
return
}
+ pbo, ok := object.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
node := &Node{
- Links: make([]Link, len(object.Links)),
- Data: string(object.Data()),
+ Links: make([]Link, len(object.Links())),
+ Data: string(pbo.Data()),
}
- for i, link := range object.Links {
+ for i, link := range object.Links() {
node.Links[i] = Link{
- Hash: link.Hash.B58String(),
+ Hash: link.Cid.String(),
Name: link.Name,
Size: link.Size,
}
@@ -276,10 +291,10 @@ var ObjectStatCmd = &cmds.Command{
res.SetOutput(ns)
},
- Type: dag.NodeStat{},
+ Type: node.NodeStat{},
Marshalers: cmds.MarshalerMap{
cmds.Text: func(res cmds.Response) (io.Reader, error) {
- ns := res.Output().(*dag.NodeStat)
+ ns := res.Output().(*node.NodeStat)
buf := new(bytes.Buffer)
w := func(s string, n int) {
@@ -413,7 +428,7 @@ Available templates:
return
}
- node := new(dag.Node)
+ node := new(dag.ProtoNode)
if len(req.Arguments()) == 1 {
template := req.Arguments()[0]
var err error
@@ -440,7 +455,7 @@ Available templates:
Type: Object{},
}
-func nodeFromTemplate(template string) (*dag.Node, error) {
+func nodeFromTemplate(template string) (*dag.ProtoNode, error) {
switch template {
case "unixfs-dir":
return ft.EmptyDirNode(), nil
@@ -464,7 +479,7 @@ func objectPut(n *core.IpfsNode, input io.Reader, encoding string, dataFieldEnco
return nil, ErrObjectTooLarge
}
- var dagnode *dag.Node
+ var dagnode *dag.ProtoNode
switch getObjectEnc(encoding) {
case objectEncodingJSON:
node := new(Node)
@@ -542,17 +557,17 @@ func getObjectEnc(o interface{}) objectEncoding {
return objectEncoding(v)
}
-func getOutput(dagnode *dag.Node) (*Object, error) {
+func getOutput(dagnode node.Node) (*Object, error) {
c := dagnode.Cid()
output := &Object{
Hash: c.String(),
- Links: make([]Link, len(dagnode.Links)),
+ Links: make([]Link, len(dagnode.Links())),
}
- for i, link := range dagnode.Links {
+ for i, link := range dagnode.Links() {
output.Links[i] = Link{
Name: link.Name,
- Hash: link.Hash.B58String(),
+ Hash: link.Cid.String(),
Size: link.Size,
}
}
@@ -560,29 +575,29 @@ func getOutput(dagnode *dag.Node) (*Object, error) {
return output, nil
}
-// converts the Node object into a real dag.Node
-func deserializeNode(node *Node, dataFieldEncoding string) (*dag.Node, error) {
- dagnode := new(dag.Node)
+// converts the Node object into a real dag.ProtoNode
+func deserializeNode(nd *Node, dataFieldEncoding string) (*dag.ProtoNode, error) {
+ dagnode := new(dag.ProtoNode)
switch dataFieldEncoding {
case "text":
- dagnode.SetData([]byte(node.Data))
+ dagnode.SetData([]byte(nd.Data))
case "base64":
- data, _ := base64.StdEncoding.DecodeString(node.Data)
+ data, _ := base64.StdEncoding.DecodeString(nd.Data)
dagnode.SetData(data)
default:
return nil, fmt.Errorf("Unkown data field encoding")
}
- dagnode.Links = make([]*dag.Link, len(node.Links))
- for i, link := range node.Links {
- hash, err := mh.FromB58String(link.Hash)
+ dagnode.SetLinks(make([]*node.Link, len(nd.Links)))
+ for i, link := range nd.Links {
+ c, err := cid.Decode(link.Hash)
if err != nil {
return nil, err
}
- dagnode.Links[i] = &dag.Link{
+ dagnode.Links()[i] = &node.Link{
Name: link.Name,
Size: link.Size,
- Hash: hash,
+ Cid: c,
}
}
diff --git a/core/commands/object/patch.go b/core/commands/object/patch.go
index 35b677ab4c5..5b933e9346c 100644
--- a/core/commands/object/patch.go
+++ b/core/commands/object/patch.go
@@ -79,6 +79,12 @@ the limit will not be respected by the network.
return
}
+ rtpb, ok := rootnd.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
fi, err := req.Files().NextFile()
if err != nil {
res.SetError(err, cmds.ErrNormal)
@@ -91,9 +97,9 @@ the limit will not be respected by the network.
return
}
- rootnd.SetData(append(rootnd.Data(), data...))
+ rtpb.SetData(append(rtpb.Data(), data...))
- newkey, err := nd.DAG.Add(rootnd)
+ newkey, err := nd.DAG.Add(rtpb)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -141,6 +147,12 @@ Example:
return
}
+ rtpb, ok := root.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
fi, err := req.Files().NextFile()
if err != nil {
res.SetError(err, cmds.ErrNormal)
@@ -153,9 +165,9 @@ Example:
return
}
- root.SetData(data)
+ rtpb.SetData(data)
- newkey, err := nd.DAG.Add(root)
+ newkey, err := nd.DAG.Add(rtpb)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -199,9 +211,15 @@ Removes a link by the given name from root.
return
}
+ rtpb, ok := root.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
path := req.Arguments()[1]
- e := dagutils.NewDagEditor(root, nd.DAG)
+ e := dagutils.NewDagEditor(rtpb, nd.DAG)
err = e.RmLink(req.Context(), path)
if err != nil {
@@ -268,6 +286,12 @@ to a file containing 'bar', and returns the hash of the new object.
return
}
+ rtpb, ok := root.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
npath := req.Arguments()[1]
childp, err := path.ParsePath(req.Arguments()[2])
if err != nil {
@@ -281,12 +305,12 @@ to a file containing 'bar', and returns the hash of the new object.
return
}
- var createfunc func() *dag.Node
+ var createfunc func() *dag.ProtoNode
if create {
createfunc = ft.EmptyDirNode
}
- e := dagutils.NewDagEditor(root, nd.DAG)
+ e := dagutils.NewDagEditor(rtpb, nd.DAG)
childnd, err := core.Resolve(req.Context(), nd, childp)
if err != nil {
@@ -294,7 +318,13 @@ to a file containing 'bar', and returns the hash of the new object.
return
}
- err = e.InsertNodeAtPath(req.Context(), npath, childnd, createfunc)
+ chpb, ok := childnd.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ err = e.InsertNodeAtPath(req.Context(), npath, chpb, createfunc)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
diff --git a/core/commands/refs.go b/core/commands/refs.go
index 8059aeb870a..3cd23291596 100644
--- a/core/commands/refs.go
+++ b/core/commands/refs.go
@@ -13,6 +13,7 @@ import (
path "github.com/ipfs/go-ipfs/path"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
@@ -195,8 +196,8 @@ var refsMarshallerMap = cmds.MarshalerMap{
},
}
-func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]*dag.Node, error) {
- objects := make([]*dag.Node, len(paths))
+func objectsForPaths(ctx context.Context, n *core.IpfsNode, paths []string) ([]node.Node, error) {
+ objects := make([]node.Node, len(paths))
for i, p := range paths {
o, err := core.Resolve(ctx, n, path.Path(p))
if err != nil {
@@ -225,24 +226,24 @@ type RefWriter struct {
}
// WriteRefs writes refs of the given object to the underlying writer.
-func (rw *RefWriter) WriteRefs(n *dag.Node) (int, error) {
+func (rw *RefWriter) WriteRefs(n node.Node) (int, error) {
if rw.Recursive {
return rw.writeRefsRecursive(n)
}
return rw.writeRefsSingle(n)
}
-func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) {
+func (rw *RefWriter) writeRefsRecursive(n node.Node) (int, error) {
nc := n.Cid()
var count int
for i, ng := range dag.GetDAG(rw.Ctx, rw.DAG, n) {
- lc := cid.NewCidV0(n.Links[i].Hash)
+ lc := n.Links()[i].Cid
if rw.skip(lc) {
continue
}
- if err := rw.WriteEdge(nc, lc, n.Links[i].Name); err != nil {
+ if err := rw.WriteEdge(nc, lc, n.Links()[i].Name); err != nil {
return count, err
}
@@ -260,7 +261,7 @@ func (rw *RefWriter) writeRefsRecursive(n *dag.Node) (int, error) {
return count, nil
}
-func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) {
+func (rw *RefWriter) writeRefsSingle(n node.Node) (int, error) {
c := n.Cid()
if rw.skip(c) {
@@ -268,9 +269,8 @@ func (rw *RefWriter) writeRefsSingle(n *dag.Node) (int, error) {
}
count := 0
- for _, l := range n.Links {
- lc := cid.NewCidV0(l.Hash)
-
+ for _, l := range n.Links() {
+ lc := l.Cid
if rw.skip(lc) {
continue
}
diff --git a/core/commands/tar.go b/core/commands/tar.go
index 306ac1a1686..00b757549ca 100644
--- a/core/commands/tar.go
+++ b/core/commands/tar.go
@@ -7,6 +7,7 @@ import (
cmds "github.com/ipfs/go-ipfs/commands"
core "github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/core/coreunix"
+ dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
tar "github.com/ipfs/go-ipfs/tar"
)
@@ -100,7 +101,13 @@ var tarCatCmd = &cmds.Command{
return
}
- r, err := tar.ExportTar(req.Context(), root, nd.DAG)
+ rootpb, ok := root.(*dag.ProtoNode)
+ if !ok {
+ res.SetError(dag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ r, err := tar.ExportTar(req.Context(), rootpb, nd.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
diff --git a/core/commands/unixfs/ls.go b/core/commands/unixfs/ls.go
index 36aeb061065..939edf2f589 100644
--- a/core/commands/unixfs/ls.go
+++ b/core/commands/unixfs/ls.go
@@ -103,7 +103,13 @@ possible, please use 'ipfs ls' instead.
continue
}
- unixFSNode, err := unixfs.FromBytes(merkleNode.Data())
+ ndpb, ok := merkleNode.(*merkledag.ProtoNode)
+ if !ok {
+ res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ unixFSNode, err := unixfs.FromBytes(ndpb.Data())
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -121,16 +127,21 @@ possible, please use 'ipfs ls' instead.
case unixfspb.Data_File:
break
case unixfspb.Data_Directory:
- links := make([]LsLink, len(merkleNode.Links))
+ links := make([]LsLink, len(merkleNode.Links()))
output.Objects[hash].Links = links
- for i, link := range merkleNode.Links {
- var linkNode *merkledag.Node
- linkNode, err = link.GetNode(ctx, node.DAG)
+ for i, link := range merkleNode.Links() {
+ linkNode, err := link.GetNode(ctx, node.DAG)
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
}
- d, err := unixfs.FromBytes(linkNode.Data())
+ lnpb, ok := linkNode.(*merkledag.ProtoNode)
+ if !ok {
+ res.SetError(merkledag.ErrNotProtobuf, cmds.ErrNormal)
+ return
+ }
+
+ d, err := unixfs.FromBytes(lnpb.Data())
if err != nil {
res.SetError(err, cmds.ErrNormal)
return
@@ -138,7 +149,7 @@ possible, please use 'ipfs ls' instead.
t := d.GetType()
lsLink := LsLink{
Name: link.Name,
- Hash: link.Hash.B58String(),
+ Hash: link.Cid.String(),
Type: t.String(),
}
if t == unixfspb.Data_File {
diff --git a/core/core.go b/core/core.go
index 1f4e59bbadd..46e0d954bd1 100644
--- a/core/core.go
+++ b/core/core.go
@@ -499,7 +499,7 @@ func (n *IpfsNode) loadFilesRoot() error {
return n.Repo.Datastore().Put(dsk, c.Bytes())
}
- var nd *merkledag.Node
+ var nd *merkledag.ProtoNode
val, err := n.Repo.Datastore().Get(dsk)
switch {
@@ -515,10 +515,17 @@ func (n *IpfsNode) loadFilesRoot() error {
return err
}
- nd, err = n.DAG.Get(n.Context(), c)
+ rnd, err := n.DAG.Get(n.Context(), c)
if err != nil {
return fmt.Errorf("error loading filesroot from DAG: %s", err)
}
+
+ pbnd, ok := rnd.(*merkledag.ProtoNode)
+ if !ok {
+ return merkledag.ErrNotProtobuf
+ }
+
+ nd = pbnd
default:
return err
}
diff --git a/core/corehttp/gateway_handler.go b/core/corehttp/gateway_handler.go
index 62cfa7269f2..9b7572d8821 100644
--- a/core/corehttp/gateway_handler.go
+++ b/core/corehttp/gateway_handler.go
@@ -45,7 +45,7 @@ func newGatewayHandler(node *core.IpfsNode, conf GatewayConfig) *gatewayHandler
}
// TODO(cryptix): find these helpers somewhere else
-func (i *gatewayHandler) newDagFromReader(r io.Reader) (*dag.Node, error) {
+func (i *gatewayHandler) newDagFromReader(r io.Reader) (*dag.ProtoNode, error) {
// TODO(cryptix): change and remove this helper once PR1136 is merged
// return ufs.AddFromReader(i.node, r.Body)
return importer.BuildDagFromReader(
@@ -163,6 +163,12 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
return
}
+ pbnd, ok := nd.(*dag.ProtoNode)
+ if !ok {
+ webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
+ return
+ }
+
etag := gopath.Base(urlPath)
if r.Header.Get("If-None-Match") == etag {
w.WriteHeader(http.StatusNotModified)
@@ -190,7 +196,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
w.Header().Set("Suborigin", pathRoot)
}
- dr, err := uio.NewDagReader(ctx, nd, i.node.DAG)
+ dr, err := uio.NewDagReader(ctx, pbnd, i.node.DAG)
if err != nil && err != uio.ErrIsDir {
// not a directory and still an error
internalWebError(w, err)
@@ -221,7 +227,7 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
var dirListing []directoryItem
// loop through files
foundIndex := false
- for _, link := range nd.Links {
+ for _, link := range nd.Links() {
if link.Name == "index.html" {
log.Debugf("found index.html link for %s", urlPath)
foundIndex = true
@@ -239,7 +245,14 @@ func (i *gatewayHandler) getOrHeadHandler(w http.ResponseWriter, r *http.Request
internalWebError(w, err)
return
}
- dr, err := uio.NewDagReader(ctx, nd, i.node.DAG)
+
+ pbnd, ok := nd.(*dag.ProtoNode)
+ if !ok {
+ internalWebError(w, dag.ErrNotProtobuf)
+ return
+ }
+
+ dr, err := uio.NewDagReader(ctx, pbnd, i.node.DAG)
if err != nil {
internalWebError(w, err)
return
@@ -340,7 +353,7 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
return
}
- var newnode *dag.Node
+ var newnode *dag.ProtoNode
if rsegs[len(rsegs)-1] == "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn" {
newnode = uio.NewEmptyDirectory()
} else {
@@ -376,7 +389,13 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
return
}
- e := dagutils.NewDagEditor(rnode, i.node.DAG)
+ pbnd, ok := rnode.(*dag.ProtoNode)
+ if !ok {
+ webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
+ return
+ }
+
+ e := dagutils.NewDagEditor(pbnd, i.node.DAG)
err = e.InsertNodeAtPath(ctx, newPath, newnode, uio.NewEmptyDirectory)
if err != nil {
webError(w, "putHandler: InsertNodeAtPath failed", err, http.StatusInternalServerError)
@@ -392,13 +411,19 @@ func (i *gatewayHandler) putHandler(w http.ResponseWriter, r *http.Request) {
newcid = nnode.Cid()
case nil:
+ pbnd, ok := rnode.(*dag.ProtoNode)
+ if !ok {
+ webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
+ return
+ }
+
// object set-data case
- rnode.SetData(newnode.Data())
+ pbnd.SetData(newnode.Data())
- newcid, err = i.node.DAG.Add(rnode)
+ newcid, err = i.node.DAG.Add(pbnd)
if err != nil {
nnk := newnode.Cid()
- rk := rnode.Cid()
+ rk := pbnd.Cid()
webError(w, fmt.Sprintf("putHandler: Could not add newnode(%q) to root(%q)", nnk.String(), rk.String()), err, http.StatusInternalServerError)
return
}
@@ -444,20 +469,33 @@ func (i *gatewayHandler) deleteHandler(w http.ResponseWriter, r *http.Request) {
return
}
+ pbnd, ok := pathNodes[len(pathNodes)-1].(*dag.ProtoNode)
+ if !ok {
+ webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
+ return
+ }
+
// TODO(cyrptix): assumes len(pathNodes) > 1 - not found is an error above?
- err = pathNodes[len(pathNodes)-1].RemoveNodeLink(components[len(components)-1])
+ err = pbnd.RemoveNodeLink(components[len(components)-1])
if err != nil {
webError(w, "Could not delete link", err, http.StatusBadRequest)
return
}
- newnode := pathNodes[len(pathNodes)-1]
+ var newnode *dag.ProtoNode = pbnd
for j := len(pathNodes) - 2; j >= 0; j-- {
if _, err := i.node.DAG.Add(newnode); err != nil {
webError(w, "Could not add node", err, http.StatusInternalServerError)
return
}
- newnode, err = pathNodes[j].UpdateNodeLink(components[j], newnode)
+
+ pathpb, ok := pathNodes[j].(*dag.ProtoNode)
+ if !ok {
+ webError(w, "Cannot read non protobuf nodes through gateway", dag.ErrNotProtobuf, http.StatusBadRequest)
+ return
+ }
+
+ newnode, err = pathpb.UpdateNodeLink(components[j], newnode)
if err != nil {
webError(w, "Could not update node links", err, http.StatusInternalServerError)
return
diff --git a/core/corerepo/pinning.go b/core/corerepo/pinning.go
index 60732148909..41ade74e94a 100644
--- a/core/corerepo/pinning.go
+++ b/core/corerepo/pinning.go
@@ -14,18 +14,18 @@ objects.
package corerepo
import (
+ "context"
"fmt"
"github.com/ipfs/go-ipfs/core"
- "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
- context "context"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
func Pin(n *core.IpfsNode, ctx context.Context, paths []string, recursive bool) ([]*cid.Cid, error) {
- dagnodes := make([]*merkledag.Node, 0)
+ dagnodes := make([]node.Node, 0)
for _, fpath := range paths {
dagnode, err := core.Resolve(ctx, n, path.Path(fpath))
if err != nil {
diff --git a/core/coreunix/add.go b/core/coreunix/add.go
index a2ee0b7b663..ff8303ce14d 100644
--- a/core/coreunix/add.go
+++ b/core/coreunix/add.go
@@ -100,7 +100,7 @@ type Adder struct {
Silent bool
Wrap bool
Chunker string
- root *dag.Node
+ root *dag.ProtoNode
mr *mfs.Root
unlocker bs.Unlocker
tempRoot *cid.Cid
@@ -111,7 +111,7 @@ func (adder *Adder) SetMfsRoot(r *mfs.Root) {
}
// Perform the actual add & pin locally, outputting results to reader
-func (adder Adder) add(reader io.Reader) (*dag.Node, error) {
+func (adder Adder) add(reader io.Reader) (*dag.ProtoNode, error) {
chnk, err := chunk.FromString(reader, adder.Chunker)
if err != nil {
return nil, err
@@ -129,7 +129,7 @@ func (adder Adder) add(reader io.Reader) (*dag.Node, error) {
)
}
-func (adder *Adder) RootNode() (*dag.Node, error) {
+func (adder *Adder) RootNode() (*dag.ProtoNode, error) {
// for memoizing
if adder.root != nil {
return adder.root, nil
@@ -141,11 +141,18 @@ func (adder *Adder) RootNode() (*dag.Node, error) {
}
// if not wrapping, AND one root file, use that hash as root.
- if !adder.Wrap && len(root.Links) == 1 {
- root, err = root.Links[0].GetNode(adder.ctx, adder.dagService)
+ if !adder.Wrap && len(root.Links()) == 1 {
+ nd, err := root.Links()[0].GetNode(adder.ctx, adder.dagService)
if err != nil {
return nil, err
}
+
+ pbnd, ok := nd.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ root = pbnd
}
adder.root = root
@@ -178,7 +185,7 @@ func (adder *Adder) PinRoot() error {
return adder.pinning.Flush()
}
-func (adder *Adder) Finalize() (*dag.Node, error) {
+func (adder *Adder) Finalize() (*dag.ProtoNode, error) {
root := adder.mr.GetValue()
// cant just call adder.RootNode() here as we need the name for printing
@@ -189,7 +196,7 @@ func (adder *Adder) Finalize() (*dag.Node, error) {
var name string
if !adder.Wrap {
- name = rootNode.Links[0].Name
+ name = rootNode.Links()[0].Name
dir, ok := adder.mr.GetValue().(*mfs.Directory)
if !ok {
@@ -300,7 +307,7 @@ func AddR(n *core.IpfsNode, root string) (key string, err error) {
// to preserve the filename.
// Returns the path of the added file ("
/filename"), the DAG node of
// the directory, and and error if any.
-func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.Node, error) {
+func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.ProtoNode, error) {
file := files.NewReaderFile(filename, filename, ioutil.NopCloser(r), nil)
fileAdder, err := NewAdder(n.Context(), n.Pinning, n.Blockstore, n.DAG)
if err != nil {
@@ -324,7 +331,7 @@ func AddWrapped(n *core.IpfsNode, r io.Reader, filename string) (string, *dag.No
return gopath.Join(c.String(), filename), dagnode, nil
}
-func (adder *Adder) addNode(node *dag.Node, path string) error {
+func (adder *Adder) addNode(node *dag.ProtoNode, path string) error {
// patch it into the root
if path == "" {
path = node.Cid().String()
@@ -449,7 +456,7 @@ func (adder *Adder) maybePauseForGC() error {
}
// outputDagnode sends dagnode info over the output channel
-func outputDagnode(out chan interface{}, name string, dn *dag.Node) error {
+func outputDagnode(out chan interface{}, name string, dn *dag.ProtoNode) error {
if out == nil {
return nil
}
@@ -475,18 +482,17 @@ func NewMemoryDagService() dag.DAGService {
}
// from core/commands/object.go
-func getOutput(dagnode *dag.Node) (*Object, error) {
+func getOutput(dagnode *dag.ProtoNode) (*Object, error) {
c := dagnode.Cid()
output := &Object{
Hash: c.String(),
- Links: make([]Link, len(dagnode.Links)),
+ Links: make([]Link, len(dagnode.Links())),
}
- for i, link := range dagnode.Links {
+ for i, link := range dagnode.Links() {
output.Links[i] = Link{
Name: link.Name,
- //Hash: link.Hash.B58String(),
Size: link.Size,
}
}
diff --git a/core/coreunix/cat.go b/core/coreunix/cat.go
index af3f43952c3..1d27ca13a43 100644
--- a/core/coreunix/cat.go
+++ b/core/coreunix/cat.go
@@ -1,8 +1,10 @@
package coreunix
import (
- context "context"
+ "context"
+
core "github.com/ipfs/go-ipfs/core"
+ dag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
uio "github.com/ipfs/go-ipfs/unixfs/io"
)
@@ -12,5 +14,11 @@ func Cat(ctx context.Context, n *core.IpfsNode, pstr string) (*uio.DagReader, er
if err != nil {
return nil, err
}
- return uio.NewDagReader(ctx, dagNode, n.DAG)
+
+ dnpb, ok := dagNode.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ return uio.NewDagReader(ctx, dnpb, n.DAG)
}
diff --git a/core/coreunix/metadata.go b/core/coreunix/metadata.go
index 1c832a1b506..19a1886bea0 100644
--- a/core/coreunix/metadata.go
+++ b/core/coreunix/metadata.go
@@ -18,7 +18,7 @@ func AddMetadataTo(n *core.IpfsNode, skey string, m *ft.Metadata) (string, error
return "", err
}
- mdnode := new(dag.Node)
+ mdnode := new(dag.ProtoNode)
mdata, err := ft.BytesForMetadata(m)
if err != nil {
return "", err
@@ -48,5 +48,10 @@ func Metadata(n *core.IpfsNode, skey string) (*ft.Metadata, error) {
return nil, err
}
- return ft.MetadataFromBytes(nd.Data())
+ pbnd, ok := nd.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ return ft.MetadataFromBytes(pbnd.Data())
}
diff --git a/core/coreunix/metadata_test.go b/core/coreunix/metadata_test.go
index 1f5fdf440d1..c13a820758b 100644
--- a/core/coreunix/metadata_test.go
+++ b/core/coreunix/metadata_test.go
@@ -72,7 +72,12 @@ func TestMetadata(t *testing.T) {
t.Fatal(err)
}
- ndr, err := uio.NewDagReader(ctx, retnode, ds)
+ rtnpb, ok := retnode.(*merkledag.ProtoNode)
+ if !ok {
+ t.Fatal("expected protobuf node")
+ }
+
+ ndr, err := uio.NewDagReader(ctx, rtnpb, ds)
if err != nil {
t.Fatal(err)
}
diff --git a/core/pathresolver.go b/core/pathresolver.go
index dc123297482..8bcf011695f 100644
--- a/core/pathresolver.go
+++ b/core/pathresolver.go
@@ -1,14 +1,14 @@
package core
import (
+ "context"
"errors"
"strings"
- context "context"
-
- merkledag "github.com/ipfs/go-ipfs/merkledag"
path "github.com/ipfs/go-ipfs/path"
+
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
// ErrNoNamesys is an explicit error for when an IPFS node doesn't
@@ -19,7 +19,7 @@ var ErrNoNamesys = errors.New(
// Resolve resolves the given path by parsing out protocol-specific
// entries (e.g. /ipns/) and then going through the /ipfs/
// entries and returning the final merkledag node.
-func Resolve(ctx context.Context, n *IpfsNode, p path.Path) (*merkledag.Node, error) {
+func Resolve(ctx context.Context, n *IpfsNode, p path.Path) (node.Node, error) {
if strings.HasPrefix(p.String(), "/ipns/") {
// resolve ipns paths
@@ -82,10 +82,10 @@ func ResolveToCid(ctx context.Context, n *IpfsNode, p path.Path) (*cid.Cid, erro
}
// Extract and return the key of the link to the target dag node.
- link, err := dagnode.GetNodeLink(tail)
+ link, _, err := dagnode.Resolve([]string{tail})
if err != nil {
return nil, err
}
- return cid.NewCidV0(link.Hash), nil
+ return link.Cid, nil
}
diff --git a/exchange/bitswap/workers.go b/exchange/bitswap/workers.go
index 9fba1b0c36f..3a5184e7474 100644
--- a/exchange/bitswap/workers.go
+++ b/exchange/bitswap/workers.go
@@ -60,7 +60,7 @@ func (bs *Bitswap) taskWorker(ctx context.Context, id int) {
log.Event(ctx, "Bitswap.TaskWorker.Work", logging.LoggableMap{
"ID": id,
"Target": envelope.Peer.Pretty(),
- "Block": envelope.Block.Multihash().B58String(),
+ "Block": envelope.Block.Cid().String(),
})
bs.wm.SendBlock(ctx, envelope)
diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go
index 507179a0281..aea9d88eb54 100644
--- a/fuse/ipns/ipns_unix.go
+++ b/fuse/ipns/ipns_unix.go
@@ -100,7 +100,12 @@ func loadRoot(ctx context.Context, rt *keyRoot, ipfs *core.IpfsNode, name string
return nil, err
}
- root, err := mfs.NewRoot(ctx, ipfs.DAG, node, ipnsPubFunc(ipfs, rt.k))
+ pbnode, ok := node.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ root, err := mfs.NewRoot(ctx, ipfs.DAG, pbnode, ipnsPubFunc(ipfs, rt.k))
if err != nil {
return nil, err
}
diff --git a/fuse/readonly/ipfs_test.go b/fuse/readonly/ipfs_test.go
index 2ebd3ed9600..53a642b7b70 100644
--- a/fuse/readonly/ipfs_test.go
+++ b/fuse/readonly/ipfs_test.go
@@ -33,7 +33,7 @@ func maybeSkipFuseTests(t *testing.T) {
}
}
-func randObj(t *testing.T, nd *core.IpfsNode, size int64) (*dag.Node, []byte) {
+func randObj(t *testing.T, nd *core.IpfsNode, size int64) (*dag.ProtoNode, []byte) {
buf := make([]byte, size)
u.NewTimeSeededRand().Read(buf)
read := bytes.NewReader(buf)
@@ -86,17 +86,23 @@ func TestIpfsBasicRead(t *testing.T) {
}
}
-func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.Node) []string {
- if len(n.Links) == 0 {
+func getPaths(t *testing.T, ipfs *core.IpfsNode, name string, n *dag.ProtoNode) []string {
+ if len(n.Links()) == 0 {
return []string{name}
}
var out []string
- for _, lnk := range n.Links {
+ for _, lnk := range n.Links() {
child, err := lnk.GetNode(ipfs.Context(), ipfs.DAG)
if err != nil {
t.Fatal(err)
}
- sub := getPaths(t, ipfs, path.Join(name, lnk.Name), child)
+
+ childpb, ok := child.(*dag.ProtoNode)
+ if !ok {
+ t.Fatal(dag.ErrNotProtobuf)
+ }
+
+ sub := getPaths(t, ipfs, path.Join(name, lnk.Name), childpb)
out = append(out, sub...)
}
return out
diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go
index d8106ef7985..a6a3cd82d7b 100644
--- a/fuse/readonly/readonly_unix.go
+++ b/fuse/readonly/readonly_unix.go
@@ -66,7 +66,13 @@ func (s *Root) Lookup(ctx context.Context, name string) (fs.Node, error) {
return nil, fuse.ENOENT
}
- return &Node{Ipfs: s.Ipfs, Nd: nd}, nil
+ pbnd, ok := nd.(*mdag.ProtoNode)
+ if !ok {
+ log.Error("fuse node was not a protobuf node")
+ return nil, fuse.ENOTSUP
+ }
+
+ return &Node{Ipfs: s.Ipfs, Nd: pbnd}, nil
}
// ReadDirAll reads a particular directory. Disallowed for root.
@@ -78,7 +84,7 @@ func (*Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
// Node is the core object representing a filesystem tree node.
type Node struct {
Ipfs *core.IpfsNode
- Nd *mdag.Node
+ Nd *mdag.ProtoNode
fd *uio.DagReader
cached *ftpb.Data
}
@@ -105,13 +111,13 @@ func (s *Node) Attr(ctx context.Context, a *fuse.Attr) error {
size := s.cached.GetFilesize()
a.Mode = 0444
a.Size = uint64(size)
- a.Blocks = uint64(len(s.Nd.Links))
+ a.Blocks = uint64(len(s.Nd.Links()))
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
case ftpb.Data_Raw:
a.Mode = 0444
a.Size = uint64(len(s.cached.GetData()))
- a.Blocks = uint64(len(s.Nd.Links))
+ a.Blocks = uint64(len(s.Nd.Links()))
a.Uid = uint32(os.Getuid())
a.Gid = uint32(os.Getgid())
case ftpb.Data_Symlink:
@@ -134,17 +140,23 @@ func (s *Node) Lookup(ctx context.Context, name string) (fs.Node, error) {
return nil, fuse.ENOENT
}
- return &Node{Ipfs: s.Ipfs, Nd: nodes[len(nodes)-1]}, nil
+ pbnd, ok := nodes[len(nodes)-1].(*mdag.ProtoNode)
+ if !ok {
+ log.Error("fuse lookup got non-protobuf node")
+ return nil, fuse.ENOTSUP
+ }
+
+ return &Node{Ipfs: s.Ipfs, Nd: pbnd}, nil
}
// ReadDirAll reads the link structure as directory entries
func (s *Node) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
log.Debug("Node ReadDir")
- entries := make([]fuse.Dirent, len(s.Nd.Links))
- for i, link := range s.Nd.Links {
+ entries := make([]fuse.Dirent, len(s.Nd.Links()))
+ for i, link := range s.Nd.Links() {
n := link.Name
if len(n) == 0 {
- n = link.Hash.B58String()
+ n = link.Cid.String()
}
entries[i] = fuse.Dirent{Name: n, Type: fuse.DT_File}
}
diff --git a/importer/balanced/balanced_test.go b/importer/balanced/balanced_test.go
index cb704a4a1b1..4a896cca780 100644
--- a/importer/balanced/balanced_test.go
+++ b/importer/balanced/balanced_test.go
@@ -22,7 +22,7 @@ import (
// TODO: extract these tests and more as a generic layout test suite
-func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
+func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
@@ -31,7 +31,7 @@ func buildTestDag(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
return BalancedLayout(dbp.New(spl))
}
-func getTestDag(t *testing.T, ds dag.DAGService, size int64, blksize int64) (*dag.Node, []byte) {
+func getTestDag(t *testing.T, ds dag.DAGService, size int64, blksize int64) (*dag.ProtoNode, []byte) {
data := make([]byte, size)
u.NewTimeSeededRand().Read(data)
r := bytes.NewReader(data)
diff --git a/importer/balanced/builder.go b/importer/balanced/builder.go
index 3e448e3b9e2..4250e7f81f9 100644
--- a/importer/balanced/builder.go
+++ b/importer/balanced/builder.go
@@ -7,7 +7,7 @@ import (
dag "github.com/ipfs/go-ipfs/merkledag"
)
-func BalancedLayout(db *h.DagBuilderHelper) (*dag.Node, error) {
+func BalancedLayout(db *h.DagBuilderHelper) (*dag.ProtoNode, error) {
var root *h.UnixfsNode
for level := 0; !db.Done(); level++ {
diff --git a/importer/helpers/dagbuilder.go b/importer/helpers/dagbuilder.go
index 4f2875a4c22..497bf036eaf 100644
--- a/importer/helpers/dagbuilder.go
+++ b/importer/helpers/dagbuilder.go
@@ -106,7 +106,7 @@ func (db *DagBuilderHelper) FillNodeWithData(node *UnixfsNode) error {
return nil
}
-func (db *DagBuilderHelper) Add(node *UnixfsNode) (*dag.Node, error) {
+func (db *DagBuilderHelper) Add(node *UnixfsNode) (*dag.ProtoNode, error) {
dn, err := node.GetDagNode()
if err != nil {
return nil, err
diff --git a/importer/helpers/helpers.go b/importer/helpers/helpers.go
index bfde68214a6..07fe3f99f37 100644
--- a/importer/helpers/helpers.go
+++ b/importer/helpers/helpers.go
@@ -37,14 +37,14 @@ var ErrSizeLimitExceeded = fmt.Errorf("object size limit exceeded")
// UnixfsNode is a struct created to aid in the generation
// of unixfs DAG trees
type UnixfsNode struct {
- node *dag.Node
+ node *dag.ProtoNode
ufmt *ft.FSNode
}
// NewUnixfsNode creates a new Unixfs node to represent a file
func NewUnixfsNode() *UnixfsNode {
return &UnixfsNode{
- node: new(dag.Node),
+ node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TFile},
}
}
@@ -52,13 +52,13 @@ func NewUnixfsNode() *UnixfsNode {
// NewUnixfsBlock creates a new Unixfs node to represent a raw data block
func NewUnixfsBlock() *UnixfsNode {
return &UnixfsNode{
- node: new(dag.Node),
+ node: new(dag.ProtoNode),
ufmt: &ft.FSNode{Type: ft.TRaw},
}
}
// NewUnixfsNodeFromDag reconstructs a Unixfs node from a given dag node
-func NewUnixfsNodeFromDag(nd *dag.Node) (*UnixfsNode, error) {
+func NewUnixfsNodeFromDag(nd *dag.ProtoNode) (*UnixfsNode, error) {
mb, err := ft.FSNodeFromBytes(nd.Data())
if err != nil {
return nil, err
@@ -75,12 +75,17 @@ func (n *UnixfsNode) NumChildren() int {
}
func (n *UnixfsNode) GetChild(ctx context.Context, i int, ds dag.DAGService) (*UnixfsNode, error) {
- nd, err := n.node.Links[i].GetNode(ctx, ds)
+ nd, err := n.node.Links()[i].GetNode(ctx, ds)
if err != nil {
return nil, err
}
- return NewUnixfsNodeFromDag(nd)
+ pbn, ok := nd.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ return NewUnixfsNodeFromDag(pbn)
}
// addChild will add the given UnixfsNode as a child of the receiver.
@@ -112,7 +117,7 @@ func (n *UnixfsNode) AddChild(child *UnixfsNode, db *DagBuilderHelper) error {
// Removes the child node at the given index
func (n *UnixfsNode) RemoveChild(index int, dbh *DagBuilderHelper) {
n.ufmt.RemoveBlockSize(index)
- n.node.Links = append(n.node.Links[:index], n.node.Links[index+1:]...)
+ n.node.SetLinks(append(n.node.Links()[:index], n.node.Links()[index+1:]...))
}
func (n *UnixfsNode) SetData(data []byte) {
@@ -121,7 +126,7 @@ func (n *UnixfsNode) SetData(data []byte) {
// getDagNode fills out the proper formatting for the unixfs node
// inside of a DAG node and returns the dag node
-func (n *UnixfsNode) GetDagNode() (*dag.Node, error) {
+func (n *UnixfsNode) GetDagNode() (*dag.ProtoNode, error) {
data, err := n.ufmt.GetBytes()
if err != nil {
return nil, err
diff --git a/importer/importer.go b/importer/importer.go
index 190500ba903..f5cddf3ff3c 100644
--- a/importer/importer.go
+++ b/importer/importer.go
@@ -19,7 +19,7 @@ var log = logging.Logger("importer")
// Builds a DAG from the given file, writing created blocks to disk as they are
// created
-func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) {
+func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.ProtoNode, error) {
stat, err := os.Lstat(fpath)
if err != nil {
return nil, err
@@ -38,7 +38,7 @@ func BuildDagFromFile(fpath string, ds dag.DAGService) (*dag.Node, error) {
return BuildDagFromReader(ds, chunk.NewSizeSplitter(f, chunk.DefaultBlockSize))
}
-func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
+func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
@@ -47,7 +47,7 @@ func BuildDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error
return bal.BalancedLayout(dbp.New(spl))
}
-func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.Node, error) {
+func BuildTrickleDagFromReader(ds dag.DAGService, spl chunk.Splitter) (*dag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
diff --git a/importer/importer_test.go b/importer/importer_test.go
index 2650ea850db..611a62d393c 100644
--- a/importer/importer_test.go
+++ b/importer/importer_test.go
@@ -14,7 +14,7 @@ import (
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
-func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) {
+func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.ProtoNode, dag.DAGService) {
ds := mdtest.Mock()
r := io.LimitReader(u.NewTimeSeededRand(), size)
nd, err := BuildDagFromReader(ds, chunk.NewSizeSplitter(r, blksize))
@@ -24,7 +24,7 @@ func getBalancedDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAG
return nd, ds
}
-func getTrickleDag(t testing.TB, size int64, blksize int64) (*dag.Node, dag.DAGService) {
+func getTrickleDag(t testing.TB, size int64, blksize int64) (*dag.ProtoNode, dag.DAGService) {
ds := mdtest.Mock()
r := io.LimitReader(u.NewTimeSeededRand(), size)
nd, err := BuildTrickleDagFromReader(ds, chunk.NewSizeSplitter(r, blksize))
@@ -100,7 +100,7 @@ func BenchmarkTrickleReadFull(b *testing.B) {
runReadBench(b, nd, ds)
}
-func runReadBench(b *testing.B, nd *dag.Node, ds dag.DAGService) {
+func runReadBench(b *testing.B, nd *dag.ProtoNode, ds dag.DAGService) {
for i := 0; i < b.N; i++ {
ctx, cancel := context.WithCancel(context.Background())
read, err := uio.NewDagReader(ctx, nd, ds)
diff --git a/importer/trickle/trickle_test.go b/importer/trickle/trickle_test.go
index 7d730c6be6f..bad45a7d9c3 100644
--- a/importer/trickle/trickle_test.go
+++ b/importer/trickle/trickle_test.go
@@ -20,7 +20,7 @@ import (
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
-func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.Node, error) {
+func buildTestDag(ds merkledag.DAGService, spl chunk.Splitter) (*merkledag.ProtoNode, error) {
dbp := h.DagBuilderParams{
Dagserv: ds,
Maxlinks: h.DefaultLinksPerBlock,
@@ -523,7 +523,7 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
data := []byte("AB")
- nd := new(merkledag.Node)
+ nd := new(merkledag.ProtoNode)
nd.SetData(ft.FilePBData(nil, 0))
dbp := &h.DagBuilderParams{
@@ -561,7 +561,7 @@ func TestAppendSingleBytesToEmpty(t *testing.T) {
}
}
-func printDag(nd *merkledag.Node, ds merkledag.DAGService, indent int) {
+func printDag(nd *merkledag.ProtoNode, ds merkledag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
@@ -571,17 +571,17 @@ func printDag(nd *merkledag.Node, ds merkledag.DAGService, indent int) {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, nc = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
- if len(nd.Links) > 0 {
+ if len(nd.Links()) > 0 {
fmt.Println()
}
- for _, lnk := range nd.Links {
+ for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
- printDag(child, ds, indent+1)
+ printDag(child.(*merkledag.ProtoNode), ds, indent+1)
}
- if len(nd.Links) > 0 {
+ if len(nd.Links()) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}
diff --git a/importer/trickle/trickledag.go b/importer/trickle/trickledag.go
index 466b9703768..9341407e14e 100644
--- a/importer/trickle/trickledag.go
+++ b/importer/trickle/trickledag.go
@@ -1,9 +1,9 @@
package trickle
import (
+ "context"
"errors"
-
- context "context"
+ "fmt"
h "github.com/ipfs/go-ipfs/importer/helpers"
dag "github.com/ipfs/go-ipfs/merkledag"
@@ -15,7 +15,7 @@ import (
// improves seek speeds.
const layerRepeat = 4
-func TrickleLayout(db *h.DagBuilderHelper) (*dag.Node, error) {
+func TrickleLayout(db *h.DagBuilderHelper) (*dag.ProtoNode, error) {
root := h.NewUnixfsNode()
if err := db.FillNodeLayer(root); err != nil {
return nil, err
@@ -66,7 +66,7 @@ func fillTrickleRec(db *h.DagBuilderHelper, node *h.UnixfsNode, depth int) error
}
// TrickleAppend appends the data in `db` to the dag, using the Trickledag format
-func TrickleAppend(ctx context.Context, base *dag.Node, db *h.DagBuilderHelper) (out *dag.Node, err_out error) {
+func TrickleAppend(ctx context.Context, base *dag.ProtoNode, db *h.DagBuilderHelper) (out *dag.ProtoNode, err_out error) {
defer func() {
if err_out == nil {
if err := db.Close(); err != nil {
@@ -229,15 +229,15 @@ func trickleDepthInfo(node *h.UnixfsNode, maxlinks int) (int, int) {
// VerifyTrickleDagStructure checks that the given dag matches exactly the trickle dag datastructure
// layout
-func VerifyTrickleDagStructure(nd *dag.Node, ds dag.DAGService, direct int, layerRepeat int) error {
+func VerifyTrickleDagStructure(nd *dag.ProtoNode, ds dag.DAGService, direct int, layerRepeat int) error {
return verifyTDagRec(nd, -1, direct, layerRepeat, ds)
}
// Recursive call for verifying the structure of a trickledag
-func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGService) error {
+func verifyTDagRec(nd *dag.ProtoNode, depth, direct, layerRepeat int, ds dag.DAGService) error {
if depth == 0 {
// zero depth dag is raw data block
- if len(nd.Links) > 0 {
+ if len(nd.Links()) > 0 {
return errors.New("expected direct block")
}
@@ -259,22 +259,27 @@ func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGServi
}
if pbn.GetType() != ft.TFile {
- return errors.New("expected file as branch node")
+ return fmt.Errorf("expected file as branch node, got: %s", pbn.GetType())
}
if len(pbn.Data) > 0 {
return errors.New("branch node should not have data")
}
- for i := 0; i < len(nd.Links); i++ {
- child, err := nd.Links[i].GetNode(context.TODO(), ds)
+ for i := 0; i < len(nd.Links()); i++ {
+ childi, err := nd.Links()[i].GetNode(context.TODO(), ds)
if err != nil {
return err
}
+ childpb, ok := childi.(*dag.ProtoNode)
+ if !ok {
+ return fmt.Errorf("cannot operate on non-protobuf nodes")
+ }
+
if i < direct {
// Direct blocks
- err := verifyTDagRec(child, 0, direct, layerRepeat, ds)
+ err := verifyTDagRec(childpb, 0, direct, layerRepeat, ds)
if err != nil {
return err
}
@@ -284,7 +289,7 @@ func verifyTDagRec(nd *dag.Node, depth, direct, layerRepeat int, ds dag.DAGServi
if rdepth >= depth && depth > 0 {
return errors.New("Child dag was too deep!")
}
- err := verifyTDagRec(child, rdepth, direct, layerRepeat, ds)
+ err := verifyTDagRec(childpb, rdepth, direct, layerRepeat, ds)
if err != nil {
return err
}
diff --git a/merkledag/coding.go b/merkledag/coding.go
index 3f68dd7f10a..c37a63db530 100644
--- a/merkledag/coding.go
+++ b/merkledag/coding.go
@@ -7,7 +7,7 @@ import (
pb "github.com/ipfs/go-ipfs/merkledag/pb"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
- mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
@@ -16,23 +16,23 @@ import (
// unmarshal decodes raw data into a *Node instance.
// The conversion uses an intermediate PBNode.
-func (n *Node) unmarshal(encoded []byte) error {
+func (n *ProtoNode) unmarshal(encoded []byte) error {
var pbn pb.PBNode
if err := pbn.Unmarshal(encoded); err != nil {
return fmt.Errorf("Unmarshal failed. %v", err)
}
pbnl := pbn.GetLinks()
- n.Links = make([]*Link, len(pbnl))
+ n.links = make([]*node.Link, len(pbnl))
for i, l := range pbnl {
- n.Links[i] = &Link{Name: l.GetName(), Size: l.GetTsize()}
- h, err := mh.Cast(l.GetHash())
+ n.links[i] = &node.Link{Name: l.GetName(), Size: l.GetTsize()}
+ c, err := cid.Cast(l.GetHash())
if err != nil {
return fmt.Errorf("Link hash #%d is not valid multihash. %v", i, err)
}
- n.Links[i].Hash = h
+ n.links[i].Cid = c
}
- sort.Stable(LinkSlice(n.Links)) // keep links sorted
+ sort.Stable(LinkSlice(n.links)) // keep links sorted
n.data = pbn.GetData()
n.encoded = encoded
@@ -41,7 +41,7 @@ func (n *Node) unmarshal(encoded []byte) error {
// Marshal encodes a *Node instance into a new byte slice.
// The conversion uses an intermediate PBNode.
-func (n *Node) Marshal() ([]byte, error) {
+func (n *ProtoNode) Marshal() ([]byte, error) {
pbn := n.getPBNode()
data, err := pbn.Marshal()
if err != nil {
@@ -50,18 +50,18 @@ func (n *Node) Marshal() ([]byte, error) {
return data, nil
}
-func (n *Node) getPBNode() *pb.PBNode {
+func (n *ProtoNode) getPBNode() *pb.PBNode {
pbn := &pb.PBNode{}
- if len(n.Links) > 0 {
- pbn.Links = make([]*pb.PBLink, len(n.Links))
+ if len(n.links) > 0 {
+ pbn.Links = make([]*pb.PBLink, len(n.links))
}
- sort.Stable(LinkSlice(n.Links)) // keep links sorted
- for i, l := range n.Links {
+ sort.Stable(LinkSlice(n.links)) // keep links sorted
+ for i, l := range n.links {
pbn.Links[i] = &pb.PBLink{}
pbn.Links[i].Name = &l.Name
pbn.Links[i].Tsize = &l.Size
- pbn.Links[i].Hash = []byte(l.Hash)
+ pbn.Links[i].Hash = l.Cid.Bytes()
}
if len(n.data) > 0 {
@@ -72,8 +72,8 @@ func (n *Node) getPBNode() *pb.PBNode {
// EncodeProtobuf returns the encoded raw data version of a Node instance.
// It may use a cached encoded version, unless the force flag is given.
-func (n *Node) EncodeProtobuf(force bool) ([]byte, error) {
- sort.Stable(LinkSlice(n.Links)) // keep links sorted
+func (n *ProtoNode) EncodeProtobuf(force bool) ([]byte, error) {
+ sort.Stable(LinkSlice(n.links)) // keep links sorted
if n.encoded == nil || force {
n.cached = nil
var err error
@@ -91,8 +91,8 @@ func (n *Node) EncodeProtobuf(force bool) ([]byte, error) {
}
// Decoded decodes raw data and returns a new Node instance.
-func DecodeProtobuf(encoded []byte) (*Node, error) {
- n := new(Node)
+func DecodeProtobuf(encoded []byte) (*ProtoNode, error) {
+ n := new(ProtoNode)
err := n.unmarshal(encoded)
if err != nil {
return nil, fmt.Errorf("incorrectly formatted merkledag node: %s", err)
diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go
index fa462db0aac..22392789297 100644
--- a/merkledag/merkledag.go
+++ b/merkledag/merkledag.go
@@ -13,6 +13,7 @@ import (
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
var log = logging.Logger("merkledag")
@@ -20,9 +21,9 @@ var ErrNotFound = fmt.Errorf("merkledag: not found")
// DAGService is an IPFS Merkle DAG service.
type DAGService interface {
- Add(*Node) (*cid.Cid, error)
- Get(context.Context, *cid.Cid) (*Node, error)
- Remove(*Node) error
+ Add(node.Node) (*cid.Cid, error)
+ Get(context.Context, *cid.Cid) (node.Node, error)
+ Remove(node.Node) error
// GetDAG returns, in order, all the single leve child
// nodes of the passed in node.
@@ -36,7 +37,7 @@ type DAGService interface {
type LinkService interface {
// Return all links for a node, may be more effect than
// calling Get in DAGService
- GetLinks(context.Context, *cid.Cid) ([]*Link, error)
+ GetLinks(context.Context, *cid.Cid) ([]*node.Link, error)
GetOfflineLinkService() LinkService
}
@@ -55,7 +56,7 @@ type dagService struct {
}
// Add adds a node to the dagService, storing the block in the BlockService
-func (n *dagService) Add(nd *Node) (*cid.Cid, error) {
+func (n *dagService) Add(nd node.Node) (*cid.Cid, error) {
if n == nil { // FIXME remove this assertion. protect with constructor invariant
return nil, fmt.Errorf("dagService is nil")
}
@@ -68,7 +69,7 @@ func (n *dagService) Batch() *Batch {
}
// Get retrieves a node from the dagService, fetching the block in the BlockService
-func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) {
+func (n *dagService) Get(ctx context.Context, c *cid.Cid) (node.Node, error) {
if n == nil {
return nil, fmt.Errorf("dagService is nil")
}
@@ -84,7 +85,7 @@ func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) {
return nil, fmt.Errorf("Failed to get block for %s: %v", c, err)
}
- var res *Node
+ var res node.Node
switch c.Type() {
case cid.Protobuf:
out, err := DecodeProtobuf(b.RawData())
@@ -94,22 +95,21 @@ func (n *dagService) Get(ctx context.Context, c *cid.Cid) (*Node, error) {
}
return nil, fmt.Errorf("Failed to decode Protocol Buffers: %v", err)
}
+ out.cached = c
res = out
default:
return nil, fmt.Errorf("unrecognized formatting type")
}
- res.cached = c
-
return res, nil
}
-func (n *dagService) GetLinks(ctx context.Context, c *cid.Cid) ([]*Link, error) {
+func (n *dagService) GetLinks(ctx context.Context, c *cid.Cid) ([]*node.Link, error) {
node, err := n.Get(ctx, c)
if err != nil {
return nil, err
}
- return node.Links, nil
+ return node.Links(), nil
}
func (n *dagService) GetOfflineLinkService() LinkService {
@@ -121,7 +121,7 @@ func (n *dagService) GetOfflineLinkService() LinkService {
}
}
-func (n *dagService) Remove(nd *Node) error {
+func (n *dagService) Remove(nd node.Node) error {
return n.Blocks.DeleteBlock(nd)
}
@@ -143,7 +143,7 @@ func FindLinks(links []*cid.Cid, c *cid.Cid, start int) []int {
}
type NodeOption struct {
- Node *Node
+ Node node.Node
Err error
}
@@ -166,7 +166,7 @@ func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *Node
c := b.Cid()
- var nd *Node
+ var nd node.Node
switch c.Type() {
case cid.Protobuf:
decnd, err := DecodeProtobuf(b.RawData())
@@ -174,7 +174,7 @@ func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *Node
out <- &NodeOption{Err: err}
return
}
- decnd.cached = cid.NewCidV0(b.Multihash())
+ decnd.cached = b.Cid()
nd = decnd
default:
out <- &NodeOption{Err: fmt.Errorf("unrecognized object type: %s", c.Type())}
@@ -197,10 +197,10 @@ func (ds *dagService) GetMany(ctx context.Context, keys []*cid.Cid) <-chan *Node
// GetDAG will fill out all of the links of the given Node.
// It returns a channel of nodes, which the caller can receive
// all the child nodes of 'root' on, in proper order.
-func GetDAG(ctx context.Context, ds DAGService, root *Node) []NodeGetter {
+func GetDAG(ctx context.Context, ds DAGService, root node.Node) []NodeGetter {
var cids []*cid.Cid
- for _, lnk := range root.Links {
- cids = append(cids, cid.NewCidV0(lnk.Hash))
+ for _, lnk := range root.Links() {
+ cids = append(cids, lnk.Cid)
}
return GetNodes(ctx, ds, cids)
@@ -269,16 +269,16 @@ func dedupeKeys(cids []*cid.Cid) []*cid.Cid {
func newNodePromise(ctx context.Context) NodeGetter {
return &nodePromise{
- recv: make(chan *Node, 1),
+ recv: make(chan node.Node, 1),
ctx: ctx,
err: make(chan error, 1),
}
}
type nodePromise struct {
- cache *Node
+ cache node.Node
clk sync.Mutex
- recv chan *Node
+ recv chan node.Node
ctx context.Context
err chan error
}
@@ -288,9 +288,9 @@ type nodePromise struct {
// from its internal channels, subsequent calls will return the
// cached node.
type NodeGetter interface {
- Get(context.Context) (*Node, error)
+ Get(context.Context) (node.Node, error)
Fail(err error)
- Send(*Node)
+ Send(node.Node)
}
func (np *nodePromise) Fail(err error) {
@@ -306,7 +306,7 @@ func (np *nodePromise) Fail(err error) {
np.err <- err
}
-func (np *nodePromise) Send(nd *Node) {
+func (np *nodePromise) Send(nd node.Node) {
var already bool
np.clk.Lock()
if np.cache != nil {
@@ -322,7 +322,7 @@ func (np *nodePromise) Send(nd *Node) {
np.recv <- nd
}
-func (np *nodePromise) Get(ctx context.Context) (*Node, error) {
+func (np *nodePromise) Get(ctx context.Context) (node.Node, error) {
np.clk.Lock()
c := np.cache
np.clk.Unlock()
@@ -350,14 +350,9 @@ type Batch struct {
MaxSize int
}
-func (t *Batch) Add(nd *Node) (*cid.Cid, error) {
- d, err := nd.EncodeProtobuf(false)
- if err != nil {
- return nil, err
- }
-
+func (t *Batch) Add(nd node.Node) (*cid.Cid, error) {
t.blocks = append(t.blocks, nd)
- t.size += len(d)
+ t.size += len(nd.RawData())
if t.size > t.MaxSize {
return nd.Cid(), t.Commit()
}
@@ -371,10 +366,6 @@ func (t *Batch) Commit() error {
return err
}
-func legacyCidFromLink(lnk *Link) *cid.Cid {
- return cid.NewCidV0(lnk.Hash)
-}
-
// EnumerateChildren will walk the dag below the given root node and add all
// unseen children to the passed in set.
// TODO: parallelize to avoid disk latency perf hits?
@@ -386,7 +377,7 @@ func EnumerateChildren(ctx context.Context, ds LinkService, root *cid.Cid, visit
return err
}
for _, lnk := range links {
- c := legacyCidFromLink(lnk)
+ c := lnk.Cid
if visit(c) {
err = EnumerateChildren(ctx, ds, c, visit, bestEffort)
if err != nil {
@@ -432,8 +423,8 @@ func EnumerateChildrenAsync(ctx context.Context, ds DAGService, c *cid.Cid, visi
live--
var cids []*cid.Cid
- for _, lnk := range nd.Links {
- c := legacyCidFromLink(lnk)
+ for _, lnk := range nd.Links() {
+ c := lnk.Cid
if visit(c) {
live++
cids = append(cids, c)
diff --git a/merkledag/merkledag_test.go b/merkledag/merkledag_test.go
index e65f4e4172a..310134fa099 100644
--- a/merkledag/merkledag_test.go
+++ b/merkledag/merkledag_test.go
@@ -2,6 +2,7 @@ package merkledag_test
import (
"bytes"
+ "context"
"errors"
"fmt"
"io"
@@ -19,10 +20,10 @@ import (
mdpb "github.com/ipfs/go-ipfs/merkledag/pb"
dstest "github.com/ipfs/go-ipfs/merkledag/test"
uio "github.com/ipfs/go-ipfs/unixfs/io"
- key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key"
- "context"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
@@ -38,13 +39,13 @@ func TestNode(t *testing.T) {
t.Error(err)
}
- printn := func(name string, n *Node) {
+ printn := func(name string, n *ProtoNode) {
fmt.Println(">", name)
fmt.Println("data:", string(n.Data()))
fmt.Println("links:")
- for _, l := range n.Links {
- fmt.Println("-", l.Name, l.Size, l.Hash)
+ for _, l := range n.Links() {
+ fmt.Println("-", l.Name, l.Size, l.Cid)
}
e, err := n.EncodeProtobuf(false)
@@ -70,7 +71,7 @@ func TestNode(t *testing.T) {
printn("beep boop", n3)
}
-func SubtestNodeStat(t *testing.T, n *Node) {
+func SubtestNodeStat(t *testing.T, n *ProtoNode) {
enc, err := n.EncodeProtobuf(true)
if err != nil {
t.Error("n.EncodeProtobuf(true) failed")
@@ -85,8 +86,8 @@ func SubtestNodeStat(t *testing.T, n *Node) {
k := n.Key()
- expected := NodeStat{
- NumLinks: len(n.Links),
+ expected := node.NodeStat{
+ NumLinks: len(n.Links()),
BlockSize: len(enc),
LinksSize: len(enc) - len(n.Data()), // includes framing.
DataSize: len(n.Data()),
@@ -174,7 +175,12 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
}
fmt.Println("Got first node back.")
- read, err := uio.NewDagReader(ctx, first, dagservs[i])
+ firstpb, ok := first.(*ProtoNode)
+ if !ok {
+ errs <- ErrNotProtobuf
+ }
+
+ read, err := uio.NewDagReader(ctx, firstpb, dagservs[i])
if err != nil {
errs <- err
}
@@ -201,7 +207,7 @@ func runBatchFetchTest(t *testing.T, read io.Reader) {
}
}
-func assertCanGet(t *testing.T, ds DAGService, n *Node) {
+func assertCanGet(t *testing.T, ds DAGService, n node.Node) {
if _, err := ds.Get(context.Background(), n.Cid()); err != nil {
t.Fatal(err)
}
@@ -263,13 +269,13 @@ func TestEnumerateChildren(t *testing.T) {
t.Fatal(err)
}
- var traverse func(n *Node)
- traverse = func(n *Node) {
+ var traverse func(n node.Node)
+ traverse = func(n node.Node) {
// traverse dag and check
- for _, lnk := range n.Links {
- c := cid.NewCidV0(lnk.Hash)
+ for _, lnk := range n.Links() {
+ c := lnk.Cid
if !set.Has(c) {
- t.Fatal("missing key in set! ", lnk.Hash.B58String())
+ t.Fatal("missing key in set! ", lnk.Cid.String())
}
child, err := ds.Get(context.Background(), c)
if err != nil {
@@ -286,7 +292,7 @@ func TestFetchFailure(t *testing.T) {
ds := dstest.Mock()
ds_bad := dstest.Mock()
- top := new(Node)
+ top := new(ProtoNode)
for i := 0; i < 10; i++ {
nd := NodeWithData([]byte{byte('a' + i)})
_, err := ds.Add(nd)
@@ -345,13 +351,13 @@ func TestUnmarshalFailure(t *testing.T) {
t.Fatal("should have failed to parse node with bad link")
}
- n := &Node{}
+ n := &ProtoNode{}
n.Marshal()
}
func TestBasicAddGet(t *testing.T) {
ds := dstest.Mock()
- nd := new(Node)
+ nd := new(ProtoNode)
c, err := ds.Add(nd)
if err != nil {
diff --git a/merkledag/node.go b/merkledag/node.go
index 91b9be64114..4c01c9c9c1b 100644
--- a/merkledag/node.go
+++ b/merkledag/node.go
@@ -1,21 +1,22 @@
package merkledag
import (
- "fmt"
-
"context"
+ "fmt"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
+var ErrNotProtobuf = fmt.Errorf("expected protobuf dag node")
var ErrLinkNotFound = fmt.Errorf("no link by that name")
// Node represents a node in the IPFS Merkle DAG.
// nodes have opaque data and a set of navigable links.
-type Node struct {
- Links []*Link
+type ProtoNode struct {
+ links []*node.Link
data []byte
// cache encoded/marshaled value
@@ -24,74 +25,27 @@ type Node struct {
cached *cid.Cid
}
-// NodeStat is a statistics object for a Node. Mostly sizes.
-type NodeStat struct {
- Hash string
- NumLinks int // number of links in link table
- BlockSize int // size of the raw, encoded data
- LinksSize int // size of the links segment
- DataSize int // size of the data segment
- CumulativeSize int // cumulative size of object and its references
-}
-
-func (ns NodeStat) String() string {
- f := "NodeStat{NumLinks: %d, BlockSize: %d, LinksSize: %d, DataSize: %d, CumulativeSize: %d}"
- return fmt.Sprintf(f, ns.NumLinks, ns.BlockSize, ns.LinksSize, ns.DataSize, ns.CumulativeSize)
-}
-
-// Link represents an IPFS Merkle DAG Link between Nodes.
-type Link struct {
- // utf string name. should be unique per object
- Name string // utf8
-
- // cumulative size of target object
- Size uint64
-
- // multihash of the target object
- Hash mh.Multihash
-}
-
-type LinkSlice []*Link
+type LinkSlice []*node.Link
func (ls LinkSlice) Len() int { return len(ls) }
func (ls LinkSlice) Swap(a, b int) { ls[a], ls[b] = ls[b], ls[a] }
func (ls LinkSlice) Less(a, b int) bool { return ls[a].Name < ls[b].Name }
-// MakeLink creates a link to the given node
-func MakeLink(n *Node) (*Link, error) {
- s, err := n.Size()
- if err != nil {
- return nil, err
- }
-
- h := n.Multihash()
-
- return &Link{
- Size: s,
- Hash: h,
- }, nil
-}
-
-// GetNode returns the MDAG Node that this link points to
-func (l *Link) GetNode(ctx context.Context, serv DAGService) (*Node, error) {
- return serv.Get(ctx, legacyCidFromLink(l))
-}
-
-func NodeWithData(d []byte) *Node {
- return &Node{data: d}
+func NodeWithData(d []byte) *ProtoNode {
+ return &ProtoNode{data: d}
}
// AddNodeLink adds a link to another node.
-func (n *Node) AddNodeLink(name string, that *Node) error {
+func (n *ProtoNode) AddNodeLink(name string, that *ProtoNode) error {
n.encoded = nil
- lnk, err := MakeLink(that)
-
- lnk.Name = name
+ lnk, err := node.MakeLink(that)
if err != nil {
return err
}
+ lnk.Name = name
+
n.AddRawLink(name, lnk)
return nil
@@ -99,9 +53,9 @@ func (n *Node) AddNodeLink(name string, that *Node) error {
// AddNodeLinkClean adds a link to another node. without keeping a reference to
// the child node
-func (n *Node) AddNodeLinkClean(name string, that *Node) error {
+func (n *ProtoNode) AddNodeLinkClean(name string, that node.Node) error {
n.encoded = nil
- lnk, err := MakeLink(that)
+ lnk, err := node.MakeLink(that)
if err != nil {
return err
}
@@ -111,31 +65,31 @@ func (n *Node) AddNodeLinkClean(name string, that *Node) error {
}
// AddRawLink adds a copy of a link to this node
-func (n *Node) AddRawLink(name string, l *Link) error {
+func (n *ProtoNode) AddRawLink(name string, l *node.Link) error {
n.encoded = nil
- n.Links = append(n.Links, &Link{
+ n.links = append(n.links, &node.Link{
Name: name,
Size: l.Size,
- Hash: l.Hash,
+ Cid: l.Cid,
})
return nil
}
// Remove a link on this node by the given name
-func (n *Node) RemoveNodeLink(name string) error {
+func (n *ProtoNode) RemoveNodeLink(name string) error {
n.encoded = nil
- good := make([]*Link, 0, len(n.Links))
+ good := make([]*node.Link, 0, len(n.links))
var found bool
- for _, l := range n.Links {
+ for _, l := range n.links {
if l.Name != name {
good = append(good, l)
} else {
found = true
}
}
- n.Links = good
+ n.links = good
if !found {
return ErrNotFound
@@ -145,20 +99,34 @@ func (n *Node) RemoveNodeLink(name string) error {
}
// Return a copy of the link with given name
-func (n *Node) GetNodeLink(name string) (*Link, error) {
- for _, l := range n.Links {
+func (n *ProtoNode) GetNodeLink(name string) (*node.Link, error) {
+ for _, l := range n.links {
if l.Name == name {
- return &Link{
+ return &node.Link{
Name: l.Name,
Size: l.Size,
- Hash: l.Hash,
+ Cid: l.Cid,
}, nil
}
}
return nil, ErrLinkNotFound
}
-func (n *Node) GetLinkedNode(ctx context.Context, ds DAGService, name string) (*Node, error) {
+func (n *ProtoNode) GetLinkedProtoNode(ctx context.Context, ds DAGService, name string) (*ProtoNode, error) {
+ nd, err := n.GetLinkedNode(ctx, ds, name)
+ if err != nil {
+ return nil, err
+ }
+
+ pbnd, ok := nd.(*ProtoNode)
+ if !ok {
+ return nil, ErrNotProtobuf
+ }
+
+ return pbnd, nil
+}
+
+func (n *ProtoNode) GetLinkedNode(ctx context.Context, ds DAGService, name string) (node.Node, error) {
lnk, err := n.GetNodeLink(name)
if err != nil {
return nil, err
@@ -169,30 +137,30 @@ func (n *Node) GetLinkedNode(ctx context.Context, ds DAGService, name string) (*
// Copy returns a copy of the node.
// NOTE: Does not make copies of Node objects in the links.
-func (n *Node) Copy() *Node {
- nnode := new(Node)
+func (n *ProtoNode) Copy() *ProtoNode {
+ nnode := new(ProtoNode)
if len(n.data) > 0 {
nnode.data = make([]byte, len(n.data))
copy(nnode.data, n.data)
}
- if len(n.Links) > 0 {
- nnode.Links = make([]*Link, len(n.Links))
- copy(nnode.Links, n.Links)
+ if len(n.links) > 0 {
+ nnode.links = make([]*node.Link, len(n.links))
+ copy(nnode.links, n.links)
}
return nnode
}
-func (n *Node) RawData() []byte {
+func (n *ProtoNode) RawData() []byte {
out, _ := n.EncodeProtobuf(false)
return out
}
-func (n *Node) Data() []byte {
+func (n *ProtoNode) Data() []byte {
return n.data
}
-func (n *Node) SetData(d []byte) {
+func (n *ProtoNode) SetData(d []byte) {
n.encoded = nil
n.cached = nil
n.data = d
@@ -200,7 +168,7 @@ func (n *Node) SetData(d []byte) {
// UpdateNodeLink return a copy of the node with the link name set to point to
// that. If a link of the same name existed, it is removed.
-func (n *Node) UpdateNodeLink(name string, that *Node) (*Node, error) {
+func (n *ProtoNode) UpdateNodeLink(name string, that *ProtoNode) (*ProtoNode, error) {
newnode := n.Copy()
err := newnode.RemoveNodeLink(name)
err = nil // ignore error
@@ -210,21 +178,21 @@ func (n *Node) UpdateNodeLink(name string, that *Node) (*Node, error) {
// Size returns the total size of the data addressed by node,
// including the total sizes of references.
-func (n *Node) Size() (uint64, error) {
+func (n *ProtoNode) Size() (uint64, error) {
b, err := n.EncodeProtobuf(false)
if err != nil {
return 0, err
}
s := uint64(len(b))
- for _, l := range n.Links {
+ for _, l := range n.links {
s += l.Size
}
return s, nil
}
// Stat returns statistics on the node.
-func (n *Node) Stat() (*NodeStat, error) {
+func (n *ProtoNode) Stat() (*node.NodeStat, error) {
enc, err := n.EncodeProtobuf(false)
if err != nil {
return nil, err
@@ -235,9 +203,9 @@ func (n *Node) Stat() (*NodeStat, error) {
return nil, err
}
- return &NodeStat{
+ return &node.NodeStat{
Hash: n.Key().B58String(),
- NumLinks: len(n.Links),
+ NumLinks: len(n.links),
BlockSize: len(enc),
LinksSize: len(enc) - len(n.data), // includes framing.
DataSize: len(n.data),
@@ -245,28 +213,28 @@ func (n *Node) Stat() (*NodeStat, error) {
}, nil
}
-func (n *Node) Key() key.Key {
+func (n *ProtoNode) Key() key.Key {
return key.Key(n.Multihash())
}
-func (n *Node) Loggable() map[string]interface{} {
+func (n *ProtoNode) Loggable() map[string]interface{} {
return map[string]interface{}{
"node": n.String(),
}
}
-func (n *Node) Cid() *cid.Cid {
+func (n *ProtoNode) Cid() *cid.Cid {
h := n.Multihash()
return cid.NewCidV0(h)
}
-func (n *Node) String() string {
+func (n *ProtoNode) String() string {
return n.Cid().String()
}
// Multihash hashes the encoded data of this node.
-func (n *Node) Multihash() mh.Multihash {
+func (n *ProtoNode) Multihash() mh.Multihash {
// NOTE: EncodeProtobuf generates the hash and puts it in n.cached.
_, err := n.EncodeProtobuf(false)
if err != nil {
@@ -276,3 +244,32 @@ func (n *Node) Multihash() mh.Multihash {
return n.cached.Hash()
}
+
+func (n *ProtoNode) Links() []*node.Link {
+ return n.links
+}
+
+func (n *ProtoNode) SetLinks(links []*node.Link) {
+ n.links = links
+}
+
+func (n *ProtoNode) Resolve(path []string) (*node.Link, []string, error) {
+ if len(path) == 0 {
+ return nil, nil, fmt.Errorf("end of path, no more links to resolve")
+ }
+
+ lnk, err := n.GetNodeLink(path[0])
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return lnk, path[1:], nil
+}
+
+func (n *ProtoNode) Tree() []string {
+ out := make([]string, 0, len(n.links))
+ for _, lnk := range n.links {
+ out = append(out, lnk.Name)
+ }
+ return out
+}
diff --git a/merkledag/node_test.go b/merkledag/node_test.go
index f123346140b..392a51ea282 100644
--- a/merkledag/node_test.go
+++ b/merkledag/node_test.go
@@ -1,40 +1,40 @@
package merkledag_test
import (
+ "context"
"testing"
. "github.com/ipfs/go-ipfs/merkledag"
mdtest "github.com/ipfs/go-ipfs/merkledag/test"
- "context"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
func TestRemoveLink(t *testing.T) {
- nd := &Node{
- Links: []*Link{
- &Link{Name: "a"},
- &Link{Name: "b"},
- &Link{Name: "a"},
- &Link{Name: "a"},
- &Link{Name: "c"},
- &Link{Name: "a"},
- },
- }
+ nd := &ProtoNode{}
+ nd.SetLinks([]*node.Link{
+ {Name: "a"},
+ {Name: "b"},
+ {Name: "a"},
+ {Name: "a"},
+ {Name: "c"},
+ {Name: "a"},
+ })
err := nd.RemoveNodeLink("a")
if err != nil {
t.Fatal(err)
}
- if len(nd.Links) != 2 {
+ if len(nd.Links()) != 2 {
t.Fatal("number of links incorrect")
}
- if nd.Links[0].Name != "b" {
+ if nd.Links()[0].Name != "b" {
t.Fatal("link order wrong")
}
- if nd.Links[1].Name != "c" {
+ if nd.Links()[1].Name != "c" {
t.Fatal("link order wrong")
}
@@ -45,33 +45,32 @@ func TestRemoveLink(t *testing.T) {
}
// ensure nothing else got touched
- if len(nd.Links) != 2 {
+ if len(nd.Links()) != 2 {
t.Fatal("number of links incorrect")
}
- if nd.Links[0].Name != "b" {
+ if nd.Links()[0].Name != "b" {
t.Fatal("link order wrong")
}
- if nd.Links[1].Name != "c" {
+ if nd.Links()[1].Name != "c" {
t.Fatal("link order wrong")
}
}
func TestFindLink(t *testing.T) {
ds := mdtest.Mock()
- k, err := ds.Add(new(Node))
+ k, err := ds.Add(new(ProtoNode))
if err != nil {
t.Fatal(err)
}
- nd := &Node{
- Links: []*Link{
- &Link{Name: "a", Hash: k.Hash()},
- &Link{Name: "c", Hash: k.Hash()},
- &Link{Name: "b", Hash: k.Hash()},
- },
- }
+ nd := &ProtoNode{}
+ nd.SetLinks([]*node.Link{
+ {Name: "a", Cid: k},
+ {Name: "c", Cid: k},
+ {Name: "b", Cid: k},
+ })
_, err = ds.Add(nd)
if err != nil {
@@ -107,19 +106,19 @@ func TestFindLink(t *testing.T) {
t.Fatal(err)
}
- if olnk.Hash.B58String() == k.String() {
+ if olnk.Cid.String() == k.String() {
t.Fatal("new link should have different hash")
}
}
func TestNodeCopy(t *testing.T) {
- nd := &Node{
- Links: []*Link{
- &Link{Name: "a"},
- &Link{Name: "c"},
- &Link{Name: "b"},
- },
- }
+ nd := &ProtoNode{}
+ nd.SetLinks([]*node.Link{
+ {Name: "a"},
+ {Name: "c"},
+ {Name: "b"},
+ })
+
nd.SetData([]byte("testing"))
ond := nd.Copy()
diff --git a/merkledag/traverse/traverse.go b/merkledag/traverse/traverse.go
index 37c05042663..17e1b666c83 100644
--- a/merkledag/traverse/traverse.go
+++ b/merkledag/traverse/traverse.go
@@ -2,11 +2,10 @@
package traverse
import (
- "errors"
-
"context"
+ "errors"
- mdag "github.com/ipfs/go-ipfs/merkledag"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
// Order is an identifier for traversal algorithm orders
@@ -20,7 +19,7 @@ const (
// Options specifies a series of traversal options
type Options struct {
- DAG mdag.DAGService // the dagservice to fetch nodes
+ DAG node.NodeGetter // the dagservice to fetch nodes
Order Order // what order to traverse in
Func Func // the function to perform at each step
ErrFunc ErrFunc // see ErrFunc. Optional
@@ -30,7 +29,7 @@ type Options struct {
// State is a current traversal state
type State struct {
- Node *mdag.Node
+ Node node.Node
Depth int
}
@@ -39,13 +38,13 @@ type traversal struct {
seen map[string]struct{}
}
-func (t *traversal) shouldSkip(n *mdag.Node) (bool, error) {
+func (t *traversal) shouldSkip(n node.Node) (bool, error) {
if t.opts.SkipDuplicates {
- k := n.Key()
- if _, found := t.seen[string(k)]; found {
+ k := n.Cid()
+ if _, found := t.seen[k.KeyString()]; found {
return true, nil
}
- t.seen[string(k)] = struct{}{}
+ t.seen[k.KeyString()] = struct{}{}
}
return false, nil
@@ -59,9 +58,9 @@ func (t *traversal) callFunc(next State) error {
// stop processing. if it returns a nil node, just skip it.
//
// the error handling is a little complicated.
-func (t *traversal) getNode(link *mdag.Link) (*mdag.Node, error) {
+func (t *traversal) getNode(link *node.Link) (node.Node, error) {
- getNode := func(l *mdag.Link) (*mdag.Node, error) {
+ getNode := func(l *node.Link) (node.Node, error) {
next, err := l.GetNode(context.TODO(), t.opts.DAG)
if err != nil {
return nil, err
@@ -99,7 +98,7 @@ type Func func(current State) error
//
type ErrFunc func(err error) error
-func Traverse(root *mdag.Node, o Options) error {
+func Traverse(root node.Node, o Options) error {
t := traversal{
opts: o,
seen: map[string]struct{}{},
@@ -145,7 +144,7 @@ func dfsPostTraverse(state State, t *traversal) error {
}
func dfsDescend(df dfsFunc, curr State, t *traversal) error {
- for _, l := range curr.Node.Links {
+ for _, l := range curr.Node.Links() {
node, err := t.getNode(l)
if err != nil {
return err
@@ -184,7 +183,7 @@ func bfsTraverse(root State, t *traversal) error {
return err
}
- for _, l := range curr.Node.Links {
+ for _, l := range curr.Node.Links() {
node, err := t.getNode(l)
if err != nil {
return err
diff --git a/merkledag/traverse/traverse_test.go b/merkledag/traverse/traverse_test.go
index 2bd3444116e..fc8d053fa1d 100644
--- a/merkledag/traverse/traverse_test.go
+++ b/merkledag/traverse/traverse_test.go
@@ -7,6 +7,8 @@ import (
mdag "github.com/ipfs/go-ipfs/merkledag"
mdagtest "github.com/ipfs/go-ipfs/merkledag/test"
+
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
func TestDFSPreNoSkip(t *testing.T) {
@@ -321,12 +323,12 @@ func TestBFSSkip(t *testing.T) {
`))
}
-func testWalkOutputs(t *testing.T, root *mdag.Node, opts Options, expect []byte) {
+func testWalkOutputs(t *testing.T, root node.Node, opts Options, expect []byte) {
expect = bytes.TrimLeft(expect, "\n")
buf := new(bytes.Buffer)
walk := func(current State) error {
- s := fmt.Sprintf("%d %s\n", current.Depth, current.Node.Data())
+ s := fmt.Sprintf("%d %s\n", current.Depth, current.Node.(*mdag.ProtoNode).Data())
t.Logf("walk: %s", s)
buf.Write([]byte(s))
return nil
@@ -348,7 +350,7 @@ func testWalkOutputs(t *testing.T, root *mdag.Node, opts Options, expect []byte)
}
}
-func newFan(t *testing.T, ds mdag.DAGService) *mdag.Node {
+func newFan(t *testing.T, ds mdag.DAGService) node.Node {
a := mdag.NodeWithData([]byte("/a"))
addLink(t, ds, a, child(t, ds, a, "aa"))
addLink(t, ds, a, child(t, ds, a, "ab"))
@@ -357,7 +359,7 @@ func newFan(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
-func newLinkedList(t *testing.T, ds mdag.DAGService) *mdag.Node {
+func newLinkedList(t *testing.T, ds mdag.DAGService) node.Node {
a := mdag.NodeWithData([]byte("/a"))
aa := child(t, ds, a, "aa")
aaa := child(t, ds, aa, "aaa")
@@ -370,7 +372,7 @@ func newLinkedList(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
-func newBinaryTree(t *testing.T, ds mdag.DAGService) *mdag.Node {
+func newBinaryTree(t *testing.T, ds mdag.DAGService) node.Node {
a := mdag.NodeWithData([]byte("/a"))
aa := child(t, ds, a, "aa")
ab := child(t, ds, a, "ab")
@@ -383,7 +385,7 @@ func newBinaryTree(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
-func newBinaryDAG(t *testing.T, ds mdag.DAGService) *mdag.Node {
+func newBinaryDAG(t *testing.T, ds mdag.DAGService) node.Node {
a := mdag.NodeWithData([]byte("/a"))
aa := child(t, ds, a, "aa")
aaa := child(t, ds, aa, "aaa")
@@ -400,16 +402,16 @@ func newBinaryDAG(t *testing.T, ds mdag.DAGService) *mdag.Node {
return a
}
-func addLink(t *testing.T, ds mdag.DAGService, a, b *mdag.Node) {
- to := string(a.Data()) + "2" + string(b.Data())
+func addLink(t *testing.T, ds mdag.DAGService, a, b node.Node) {
+ to := string(a.(*mdag.ProtoNode).Data()) + "2" + string(b.(*mdag.ProtoNode).Data())
if _, err := ds.Add(b); err != nil {
t.Error(err)
}
- if err := a.AddNodeLink(to, b); err != nil {
+ if err := a.(*mdag.ProtoNode).AddNodeLink(to, b.(*mdag.ProtoNode)); err != nil {
t.Error(err)
}
}
-func child(t *testing.T, ds mdag.DAGService, a *mdag.Node, name string) *mdag.Node {
- return mdag.NodeWithData([]byte(string(a.Data()) + "/" + name))
+func child(t *testing.T, ds mdag.DAGService, a node.Node, name string) node.Node {
+ return mdag.NodeWithData([]byte(string(a.(*mdag.ProtoNode).Data()) + "/" + name))
}
diff --git a/merkledag/utils/diff.go b/merkledag/utils/diff.go
index 2756004435d..2b5ddb72b2e 100644
--- a/merkledag/utils/diff.go
+++ b/merkledag/utils/diff.go
@@ -1,7 +1,6 @@
package dagutils
import (
- "bytes"
"fmt"
"path"
@@ -37,7 +36,7 @@ func (c *Change) String() string {
}
}
-func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Change) (*dag.Node, error) {
+func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.ProtoNode, cs []*Change) (*dag.ProtoNode, error) {
e := NewDagEditor(nd, ds)
for _, c := range cs {
switch c.Type {
@@ -46,7 +45,13 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha
if err != nil {
return nil, err
}
- err = e.InsertNodeAtPath(ctx, c.Path, child, nil)
+
+ childpb, ok := child.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil)
if err != nil {
return nil, err
}
@@ -66,7 +71,13 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha
if err != nil {
return nil, err
}
- err = e.InsertNodeAtPath(ctx, c.Path, child, nil)
+
+ childpb, ok := child.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ err = e.InsertNodeAtPath(ctx, c.Path, childpb, nil)
if err != nil {
return nil, err
}
@@ -76,8 +87,8 @@ func ApplyChange(ctx context.Context, ds dag.DAGService, nd *dag.Node, cs []*Cha
return e.Finalize(ds)
}
-func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, error) {
- if len(a.Links) == 0 && len(b.Links) == 0 {
+func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.ProtoNode) ([]*Change, error) {
+ if len(a.Links()) == 0 && len(b.Links()) == 0 {
return []*Change{
&Change{
Type: Mod,
@@ -92,10 +103,10 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er
clean_b := b.Copy()
// strip out unchanged stuff
- for _, lnk := range a.Links {
+ for _, lnk := range a.Links() {
l, err := b.GetNodeLink(lnk.Name)
if err == nil {
- if bytes.Equal(l.Hash, lnk.Hash) {
+ if l.Cid.Equals(lnk.Cid) {
// no change... ignore it
} else {
anode, err := lnk.GetNode(ctx, ds)
@@ -108,7 +119,17 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er
return nil, err
}
- sub, err := Diff(ctx, ds, anode, bnode)
+ anodepb, ok := anode.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ bnodepb, ok := bnode.(*dag.ProtoNode)
+ if !ok {
+ return nil, dag.ErrNotProtobuf
+ }
+
+ sub, err := Diff(ctx, ds, anodepb, bnodepb)
if err != nil {
return nil, err
}
@@ -123,18 +144,18 @@ func Diff(ctx context.Context, ds dag.DAGService, a, b *dag.Node) ([]*Change, er
}
}
- for _, lnk := range clean_a.Links {
+ for _, lnk := range clean_a.Links() {
out = append(out, &Change{
Type: Remove,
Path: lnk.Name,
- Before: cid.NewCidV0(lnk.Hash),
+ Before: lnk.Cid,
})
}
- for _, lnk := range clean_b.Links {
+ for _, lnk := range clean_b.Links() {
out = append(out, &Change{
Type: Add,
Path: lnk.Name,
- After: cid.NewCidV0(lnk.Hash),
+ After: lnk.Cid,
})
}
diff --git a/merkledag/utils/utils.go b/merkledag/utils/utils.go
index a6f117ba4d4..a44d94621bc 100644
--- a/merkledag/utils/utils.go
+++ b/merkledag/utils/utils.go
@@ -15,7 +15,7 @@ import (
)
type Editor struct {
- root *dag.Node
+ root *dag.ProtoNode
// tmp is a temporary in memory (for now) dagstore for all of the
// intermediary nodes to be stored in
@@ -34,7 +34,7 @@ func NewMemoryDagService() dag.DAGService {
}
// root is the node to be modified, source is the dagstore to pull nodes from (optional)
-func NewDagEditor(root *dag.Node, source dag.DAGService) *Editor {
+func NewDagEditor(root *dag.ProtoNode, source dag.DAGService) *Editor {
return &Editor{
root: root,
tmp: NewMemoryDagService(),
@@ -42,7 +42,7 @@ func NewDagEditor(root *dag.Node, source dag.DAGService) *Editor {
}
}
-func (e *Editor) GetNode() *dag.Node {
+func (e *Editor) GetNode() *dag.ProtoNode {
return e.root.Copy()
}
@@ -50,7 +50,7 @@ func (e *Editor) GetDagService() dag.DAGService {
return e.tmp
}
-func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname string, childnd *dag.Node) (*dag.Node, error) {
+func addLink(ctx context.Context, ds dag.DAGService, root *dag.ProtoNode, childname string, childnd *dag.ProtoNode) (*dag.ProtoNode, error) {
if childname == "" {
return nil, errors.New("cannot create link with no name!")
}
@@ -76,7 +76,7 @@ func addLink(ctx context.Context, ds dag.DAGService, root *dag.Node, childname s
return root, nil
}
-func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert *dag.Node, create func() *dag.Node) error {
+func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert *dag.ProtoNode, create func() *dag.ProtoNode) error {
splpath := path.SplitList(pth)
nd, err := e.insertNodeAtPath(ctx, e.root, splpath, toinsert, create)
if err != nil {
@@ -86,12 +86,12 @@ func (e *Editor) InsertNodeAtPath(ctx context.Context, pth string, toinsert *dag
return nil
}
-func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.Node, path []string, toinsert *dag.Node, create func() *dag.Node) (*dag.Node, error) {
+func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.ProtoNode, path []string, toinsert *dag.ProtoNode, create func() *dag.ProtoNode) (*dag.ProtoNode, error) {
if len(path) == 1 {
return addLink(ctx, e.tmp, root, path[0], toinsert)
}
- nd, err := root.GetLinkedNode(ctx, e.tmp, path[0])
+ nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0])
if err != nil {
// if 'create' is true, we create directories on the way down as needed
if err == dag.ErrLinkNotFound && create != nil {
@@ -99,7 +99,7 @@ func (e *Editor) insertNodeAtPath(ctx context.Context, root *dag.Node, path []st
err = nil // no longer an error case
} else if err == dag.ErrNotFound {
// try finding it in our source dagstore
- nd, err = root.GetLinkedNode(ctx, e.src, path[0])
+ nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0])
}
// if we receive an ErrNotFound, then our second 'GetLinkedNode' call
@@ -140,7 +140,7 @@ func (e *Editor) RmLink(ctx context.Context, pth string) error {
return nil
}
-func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*dag.Node, error) {
+func (e *Editor) rmLink(ctx context.Context, root *dag.ProtoNode, path []string) (*dag.ProtoNode, error) {
if len(path) == 1 {
// base case, remove node in question
err := root.RemoveNodeLink(path[0])
@@ -157,9 +157,9 @@ func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*da
}
// search for node in both tmp dagstore and source dagstore
- nd, err := root.GetLinkedNode(ctx, e.tmp, path[0])
+ nd, err := root.GetLinkedProtoNode(ctx, e.tmp, path[0])
if err == dag.ErrNotFound {
- nd, err = root.GetLinkedNode(ctx, e.src, path[0])
+ nd, err = root.GetLinkedProtoNode(ctx, e.src, path[0])
}
if err != nil {
@@ -187,19 +187,19 @@ func (e *Editor) rmLink(ctx context.Context, root *dag.Node, path []string) (*da
return root, nil
}
-func (e *Editor) Finalize(ds dag.DAGService) (*dag.Node, error) {
+func (e *Editor) Finalize(ds dag.DAGService) (*dag.ProtoNode, error) {
nd := e.GetNode()
err := copyDag(nd, e.tmp, ds)
return nd, err
}
-func copyDag(nd *dag.Node, from, to dag.DAGService) error {
+func copyDag(nd *dag.ProtoNode, from, to dag.DAGService) error {
_, err := to.Add(nd)
if err != nil {
return err
}
- for _, lnk := range nd.Links {
+ for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), from)
if err != nil {
if err == dag.ErrNotFound {
@@ -210,7 +210,12 @@ func copyDag(nd *dag.Node, from, to dag.DAGService) error {
return err
}
- err = copyDag(child, from, to)
+ childpb, ok := child.(*dag.ProtoNode)
+ if !ok {
+ return dag.ErrNotProtobuf
+ }
+
+ err = copyDag(childpb, from, to)
if err != nil {
return err
}
diff --git a/merkledag/utils/utils_test.go b/merkledag/utils/utils_test.go
index bf11cb8ee97..4f822e5cd4a 100644
--- a/merkledag/utils/utils_test.go
+++ b/merkledag/utils/utils_test.go
@@ -20,7 +20,7 @@ func TestAddLink(t *testing.T) {
t.Fatal(err)
}
- nd := new(dag.Node)
+ nd := new(dag.ProtoNode)
nnode, err := addLink(context.Background(), ds, nd, "fish", fishnode)
if err != nil {
t.Fatal(err)
@@ -37,11 +37,11 @@ func TestAddLink(t *testing.T) {
}
}
-func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth string, exp *cid.Cid) {
+func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.ProtoNode, pth string, exp *cid.Cid) {
parts := path.SplitList(pth)
cur := root
for _, e := range parts {
- nxt, err := cur.GetLinkedNode(context.Background(), ds, e)
+ nxt, err := cur.GetLinkedProtoNode(context.Background(), ds, e)
if err != nil {
t.Fatal(err)
}
@@ -56,7 +56,7 @@ func assertNodeAtPath(t *testing.T, ds dag.DAGService, root *dag.Node, pth strin
}
func TestInsertNode(t *testing.T) {
- root := new(dag.Node)
+ root := new(dag.ProtoNode)
e := NewDagEditor(root, nil)
testInsert(t, e, "a", "anodefortesting", false, "")
@@ -83,10 +83,10 @@ func testInsert(t *testing.T, e *Editor, path, data string, create bool, experr
t.Fatal(err)
}
- var c func() *dag.Node
+ var c func() *dag.ProtoNode
if create {
- c = func() *dag.Node {
- return &dag.Node{}
+ c = func() *dag.ProtoNode {
+ return &dag.ProtoNode{}
}
}
diff --git a/mfs/dir.go b/mfs/dir.go
index 8bc486cb7a6..3a1c7be8ef3 100644
--- a/mfs/dir.go
+++ b/mfs/dir.go
@@ -28,7 +28,7 @@ type Directory struct {
files map[string]*File
lock sync.Mutex
- node *dag.Node
+ node *dag.ProtoNode
ctx context.Context
modTime time.Time
@@ -36,7 +36,7 @@ type Directory struct {
name string
}
-func NewDirectory(ctx context.Context, name string, node *dag.Node, parent childCloser, dserv dag.DAGService) *Directory {
+func NewDirectory(ctx context.Context, name string, node *dag.ProtoNode, parent childCloser, dserv dag.DAGService) *Directory {
return &Directory{
dserv: dserv,
ctx: ctx,
@@ -51,7 +51,7 @@ func NewDirectory(ctx context.Context, name string, node *dag.Node, parent child
// closeChild updates the child by the given name to the dag node 'nd'
// and changes its own dag node
-func (d *Directory) closeChild(name string, nd *dag.Node, sync bool) error {
+func (d *Directory) closeChild(name string, nd *dag.ProtoNode, sync bool) error {
mynd, err := d.closeChildUpdate(name, nd, sync)
if err != nil {
return err
@@ -64,7 +64,7 @@ func (d *Directory) closeChild(name string, nd *dag.Node, sync bool) error {
}
// closeChildUpdate is the portion of closeChild that needs to be locked around
-func (d *Directory) closeChildUpdate(name string, nd *dag.Node, sync bool) (*dag.Node, error) {
+func (d *Directory) closeChildUpdate(name string, nd *dag.ProtoNode, sync bool) (*dag.ProtoNode, error) {
d.lock.Lock()
defer d.lock.Unlock()
@@ -79,7 +79,7 @@ func (d *Directory) closeChildUpdate(name string, nd *dag.Node, sync bool) (*dag
return nil, nil
}
-func (d *Directory) flushCurrentNode() (*dag.Node, error) {
+func (d *Directory) flushCurrentNode() (*dag.ProtoNode, error) {
_, err := d.dserv.Add(d.node)
if err != nil {
return nil, err
@@ -88,7 +88,7 @@ func (d *Directory) flushCurrentNode() (*dag.Node, error) {
return d.node.Copy(), nil
}
-func (d *Directory) updateChild(name string, nd *dag.Node) error {
+func (d *Directory) updateChild(name string, nd *dag.ProtoNode) error {
err := d.node.RemoveNodeLink(name)
if err != nil && err != dag.ErrNotFound {
return err
@@ -120,7 +120,7 @@ func (d *Directory) childNode(name string) (FSNode, error) {
}
// cacheNode caches a node into d.childDirs or d.files and returns the FSNode.
-func (d *Directory) cacheNode(name string, nd *dag.Node) (FSNode, error) {
+func (d *Directory) cacheNode(name string, nd *dag.ProtoNode) (FSNode, error) {
i, err := ft.FromBytes(nd.Data())
if err != nil {
return nil, err
@@ -161,14 +161,16 @@ func (d *Directory) Uncache(name string) {
// childFromDag searches through this directories dag node for a child link
// with the given name
-func (d *Directory) childFromDag(name string) (*dag.Node, error) {
- for _, lnk := range d.node.Links {
- if lnk.Name == name {
- return lnk.GetNode(d.ctx, d.dserv)
- }
+func (d *Directory) childFromDag(name string) (*dag.ProtoNode, error) {
+ pbn, err := d.node.GetLinkedProtoNode(d.ctx, d.dserv, name)
+ switch err {
+ case nil:
+ return pbn, nil
+ case dag.ErrLinkNotFound:
+ return nil, os.ErrNotExist
+ default:
+ return nil, err
}
-
- return nil, os.ErrNotExist
}
// childUnsync returns the child under this directory by the given name
@@ -206,7 +208,7 @@ func (d *Directory) ListNames() []string {
names[n] = struct{}{}
}
- for _, l := range d.node.Links {
+ for _, l := range d.node.Links() {
names[l.Name] = struct{}{}
}
@@ -224,7 +226,7 @@ func (d *Directory) List() ([]NodeListing, error) {
defer d.lock.Unlock()
var out []NodeListing
- for _, l := range d.node.Links {
+ for _, l := range d.node.Links() {
child := NodeListing{}
child.Name = l.Name
@@ -270,7 +272,7 @@ func (d *Directory) Mkdir(name string) (*Directory, error) {
}
}
- ndir := new(dag.Node)
+ ndir := new(dag.ProtoNode)
ndir.SetData(ft.FolderPBData())
_, err = d.dserv.Add(ndir)
@@ -321,7 +323,7 @@ func (d *Directory) Flush() error {
}
// AddChild adds the node 'nd' under this directory giving it the name 'name'
-func (d *Directory) AddChild(name string, nd *dag.Node) error {
+func (d *Directory) AddChild(name string, nd *dag.ProtoNode) error {
d.lock.Lock()
defer d.lock.Unlock()
@@ -382,7 +384,7 @@ func (d *Directory) Path() string {
return out
}
-func (d *Directory) GetNode() (*dag.Node, error) {
+func (d *Directory) GetNode() (*dag.ProtoNode, error) {
d.lock.Lock()
defer d.lock.Unlock()
diff --git a/mfs/file.go b/mfs/file.go
index bbd7b48c212..373a9dd1dca 100644
--- a/mfs/file.go
+++ b/mfs/file.go
@@ -19,12 +19,12 @@ type File struct {
desclock sync.RWMutex
dserv dag.DAGService
- node *dag.Node
+ node *dag.ProtoNode
nodelk sync.Mutex
}
// NewFile returns a NewFile object with the given parameters
-func NewFile(name string, node *dag.Node, parent childCloser, dserv dag.DAGService) (*File, error) {
+func NewFile(name string, node *dag.ProtoNode, parent childCloser, dserv dag.DAGService) (*File, error) {
return &File{
dserv: dserv,
parent: parent,
@@ -94,7 +94,7 @@ func (fi *File) Size() (int64, error) {
}
// GetNode returns the dag node associated with this file
-func (fi *File) GetNode() (*dag.Node, error) {
+func (fi *File) GetNode() (*dag.ProtoNode, error) {
fi.nodelk.Lock()
defer fi.nodelk.Unlock()
return fi.node, nil
diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go
index 70e96c20042..dcec37356c1 100644
--- a/mfs/mfs_test.go
+++ b/mfs/mfs_test.go
@@ -2,6 +2,7 @@ package mfs
import (
"bytes"
+ "context"
"errors"
"fmt"
"io"
@@ -23,14 +24,14 @@ import (
ft "github.com/ipfs/go-ipfs/unixfs"
uio "github.com/ipfs/go-ipfs/unixfs/io"
- "context"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
u "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore"
dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
-func emptyDirNode() *dag.Node {
+func emptyDirNode() *dag.ProtoNode {
return dag.NodeWithData(ft.FolderPBData())
}
@@ -41,12 +42,12 @@ func getDagserv(t *testing.T) dag.DAGService {
return dag.NewDAGService(blockserv)
}
-func getRandFile(t *testing.T, ds dag.DAGService, size int64) *dag.Node {
+func getRandFile(t *testing.T, ds dag.DAGService, size int64) *dag.ProtoNode {
r := io.LimitReader(u.NewTimeSeededRand(), size)
return fileNodeFromReader(t, ds, r)
}
-func fileNodeFromReader(t *testing.T, ds dag.DAGService, r io.Reader) *dag.Node {
+func fileNodeFromReader(t *testing.T, ds dag.DAGService, r io.Reader) *dag.ProtoNode {
nd, err := importer.BuildDagFromReader(ds, chunk.DefaultSplitter(r))
if err != nil {
t.Fatal(err)
@@ -124,7 +125,7 @@ func compStrArrs(a, b []string) bool {
return true
}
-func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.Node, pth string) error {
+func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.ProtoNode, pth string) error {
parts := path.SplitList(pth)
cur := root
for i, d := range parts[:len(parts)-1] {
@@ -173,7 +174,7 @@ func assertFileAtPath(ds dag.DAGService, root *Directory, exp *dag.Node, pth str
return nil
}
-func catNode(ds dag.DAGService, nd *dag.Node) ([]byte, error) {
+func catNode(ds dag.DAGService, nd *dag.ProtoNode) ([]byte, error) {
r, err := uio.NewDagReader(context.TODO(), nd, ds)
if err != nil {
return nil, err
@@ -280,7 +281,7 @@ func TestDirectoryLoadFromDag(t *testing.T) {
t.Fatal(err)
}
- fihash := nd.Multihash()
+ fihash := nd.Cid()
dir := emptyDirNode()
_, err = ds.Add(dir)
@@ -288,19 +289,19 @@ func TestDirectoryLoadFromDag(t *testing.T) {
t.Fatal(err)
}
- dirhash := dir.Multihash()
+ dirhash := dir.Cid()
top := emptyDirNode()
- top.Links = []*dag.Link{
- &dag.Link{
+ top.SetLinks([]*node.Link{
+ {
Name: "a",
- Hash: fihash,
+ Cid: fihash,
},
- &dag.Link{
+ {
Name: "b",
- Hash: dirhash,
+ Cid: dirhash,
},
- }
+ })
err = rootdir.AddChild("foo", top)
if err != nil {
diff --git a/mfs/ops.go b/mfs/ops.go
index 94c6c30df8b..6464d840411 100644
--- a/mfs/ops.go
+++ b/mfs/ops.go
@@ -87,7 +87,7 @@ func lookupDir(r *Root, path string) (*Directory, error) {
}
// PutNode inserts 'nd' at 'path' in the given mfs
-func PutNode(r *Root, path string, nd *dag.Node) error {
+func PutNode(r *Root, path string, nd *dag.ProtoNode) error {
dirp, filename := gopath.Split(path)
if filename == "" {
return fmt.Errorf("cannot create file with empty name")
diff --git a/mfs/system.go b/mfs/system.go
index 8f10a93c7ee..2a69a187883 100644
--- a/mfs/system.go
+++ b/mfs/system.go
@@ -29,7 +29,7 @@ var log = logging.Logger("mfs")
var ErrIsDirectory = errors.New("error: is a directory")
type childCloser interface {
- closeChild(string, *dag.Node, bool) error
+ closeChild(string, *dag.ProtoNode, bool) error
}
type NodeType int
@@ -41,7 +41,7 @@ const (
// FSNode represents any node (directory, root, or file) in the mfs filesystem
type FSNode interface {
- GetNode() (*dag.Node, error)
+ GetNode() (*dag.ProtoNode, error)
Flush() error
Type() NodeType
}
@@ -49,7 +49,7 @@ type FSNode interface {
// Root represents the root of a filesystem tree
type Root struct {
// node is the merkledag root
- node *dag.Node
+ node *dag.ProtoNode
// val represents the node. It can either be a File or a Directory
val FSNode
@@ -64,7 +64,7 @@ type Root struct {
type PubFunc func(context.Context, *cid.Cid) error
// newRoot creates a new Root and starts up a republisher routine for it
-func NewRoot(parent context.Context, ds dag.DAGService, node *dag.Node, pf PubFunc) (*Root, error) {
+func NewRoot(parent context.Context, ds dag.DAGService, node *dag.ProtoNode, pf PubFunc) (*Root, error) {
var repub *Republisher
if pf != nil {
@@ -118,7 +118,7 @@ func (kr *Root) Flush() error {
// closeChild implements the childCloser interface, and signals to the publisher that
// there are changes ready to be published
-func (kr *Root) closeChild(name string, nd *dag.Node, sync bool) error {
+func (kr *Root) closeChild(name string, nd *dag.ProtoNode, sync bool) error {
c, err := kr.dserv.Add(nd)
if err != nil {
return err
diff --git a/package.json b/package.json
index 69764805b4d..e411881fe34 100644
--- a/package.json
+++ b/package.json
@@ -269,6 +269,12 @@
"hash": "QmTgcWwxttM74AY7UYA6qMP9WpzfBEjbZntx7ZWLttRMJJ",
"name": "floodsub",
"version": "0.7.0"
+ },
+ {
+ "author": "whyrusleeping",
+ "hash": "QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB",
+ "name": "go-ipld-node",
+ "version": "0.1.0"
}
],
"gxVersion": "0.4.0",
diff --git a/path/resolver.go b/path/resolver.go
index fb9cbf37e0d..e4bfe8f796c 100644
--- a/path/resolver.go
+++ b/path/resolver.go
@@ -2,16 +2,16 @@
package path
import (
+ "context"
"errors"
"fmt"
"time"
- "context"
- mh "gx/ipfs/QmYDds3421prZgqKbLpEK7T9Aa2eVdQ7o3YarX1LVLdP2J/go-multihash"
-
merkledag "github.com/ipfs/go-ipfs/merkledag"
+
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
var log = logging.Logger("path")
@@ -23,11 +23,11 @@ var ErrNoComponents = errors.New(
// ErrNoLink is returned when a link is not found in a path
type ErrNoLink struct {
Name string
- Node mh.Multihash
+ Node *cid.Cid
}
func (e ErrNoLink) Error() string {
- return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.B58String())
+ return fmt.Sprintf("no link named %q under %s", e.Name, e.Node.String())
}
// Resolver provides path resolution to IPFS
@@ -62,7 +62,7 @@ func SplitAbsPath(fpath Path) (*cid.Cid, []string, error) {
// ResolvePath fetches the node for given path. It returns the last item
// returned by ResolvePathComponents.
-func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (*merkledag.Node, error) {
+func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (node.Node, error) {
// validate path
if err := fpath.IsValid(); err != nil {
return nil, err
@@ -78,7 +78,7 @@ func (s *Resolver) ResolvePath(ctx context.Context, fpath Path) (*merkledag.Node
// ResolvePathComponents fetches the nodes for each segment of the given path.
// It uses the first path component as a hash (key) of the first node, then
// resolves all other components walking the links, with ResolveLinks.
-func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*merkledag.Node, error) {
+func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]node.Node, error) {
h, parts, err := SplitAbsPath(fpath)
if err != nil {
return nil, err
@@ -100,28 +100,33 @@ func (s *Resolver) ResolvePathComponents(ctx context.Context, fpath Path) ([]*me
//
// ResolveLinks(nd, []string{"foo", "bar", "baz"})
// would retrieve "baz" in ("bar" in ("foo" in nd.Links).Links).Links
-func (s *Resolver) ResolveLinks(ctx context.Context, ndd *merkledag.Node, names []string) ([]*merkledag.Node, error) {
+func (s *Resolver) ResolveLinks(ctx context.Context, ndd node.Node, names []string) ([]node.Node, error) {
- result := make([]*merkledag.Node, 0, len(names)+1)
+ result := make([]node.Node, 0, len(names)+1)
result = append(result, ndd)
nd := ndd // dup arg workaround
// for each of the path components
- for _, name := range names {
-
+ for len(names) > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Minute)
defer cancel()
- nextnode, err := nd.GetLinkedNode(ctx, s.DAG, name)
+ lnk, rest, err := nd.Resolve(names)
if err == merkledag.ErrLinkNotFound {
- n := nd.Multihash()
- return result, ErrNoLink{Name: name, Node: n}
+ n := nd.Cid()
+ return result, ErrNoLink{Name: names[0], Node: n}
} else if err != nil {
- return append(result, nextnode), err
+ return result, err
+ }
+
+ nextnode, err := s.DAG.Get(ctx, lnk.Cid)
+ if err != nil {
+ return result, err
}
nd = nextnode
+ names = rest
result = append(result, nextnode)
}
return result, nil
diff --git a/path/resolver_test.go b/path/resolver_test.go
index 77e7a27e10c..652f3879653 100644
--- a/path/resolver_test.go
+++ b/path/resolver_test.go
@@ -1,20 +1,21 @@
package path_test
import (
+ "context"
"fmt"
"testing"
- context "context"
-
merkledag "github.com/ipfs/go-ipfs/merkledag"
dagmock "github.com/ipfs/go-ipfs/merkledag/test"
path "github.com/ipfs/go-ipfs/path"
+
key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
util "gx/ipfs/Qmb912gdngC1UWwTkhuW8knyRbcWeu5kqkxBpveLmW8bSr/go-ipfs-util"
)
-func randNode() (*merkledag.Node, key.Key) {
- node := new(merkledag.Node)
+func randNode() (*merkledag.ProtoNode, key.Key) {
+ node := new(merkledag.ProtoNode)
node.SetData(make([]byte, 32))
util.NewTimeSeededRand().Read(node.Data())
k := node.Key()
@@ -39,7 +40,7 @@ func TestRecurivePathResolution(t *testing.T) {
t.Fatal(err)
}
- for _, n := range []*merkledag.Node{a, b, c} {
+ for _, n := range []node.Node{a, b, c} {
_, err = dagService.Add(n)
if err != nil {
t.Fatal(err)
@@ -60,7 +61,7 @@ func TestRecurivePathResolution(t *testing.T) {
t.Fatal(err)
}
- key := node.Key()
+ key := node.Cid()
if key.String() != cKey.String() {
t.Fatal(fmt.Errorf(
"recursive path resolution failed for %s: %s != %s",
diff --git a/pin/pin.go b/pin/pin.go
index ca36a558889..10c60c25698 100644
--- a/pin/pin.go
+++ b/pin/pin.go
@@ -3,17 +3,17 @@
package pin
import (
+ "context"
"fmt"
"os"
"sync"
"time"
mdag "github.com/ipfs/go-ipfs/merkledag"
- key "gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key"
- context "context"
logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
ds "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore"
)
@@ -83,7 +83,7 @@ func StringToPinMode(s string) (PinMode, bool) {
type Pinner interface {
IsPinned(*cid.Cid) (string, bool, error)
IsPinnedWithType(*cid.Cid, PinMode) (string, bool, error)
- Pin(context.Context, *mdag.Node, bool) error
+ Pin(context.Context, node.Node, bool) error
Unpin(context.Context, *cid.Cid, bool) error
// Check if a set of keys are pinned, more efficient than
@@ -162,11 +162,10 @@ func NewPinner(dstore ds.Datastore, serv, internal mdag.DAGService) Pinner {
}
// Pin the given node, optionally recursive
-func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error {
+func (p *pinner) Pin(ctx context.Context, node node.Node, recurse bool) error {
p.lock.Lock()
defer p.lock.Unlock()
c := node.Cid()
- k := key.Key(c.Hash())
if recurse {
if p.recursePin.Has(c) {
@@ -190,7 +189,7 @@ func (p *pinner) Pin(ctx context.Context, node *mdag.Node, recurse bool) error {
}
if p.recursePin.Has(c) {
- return fmt.Errorf("%s already pinned recursively", k.B58String())
+ return fmt.Errorf("%s already pinned recursively", c.String())
}
p.directPin.Add(c)
@@ -248,7 +247,6 @@ func (p *pinner) IsPinnedWithType(c *cid.Cid, mode PinMode) (string, bool, error
// isPinnedWithType is the implementation of IsPinnedWithType that does not lock.
// intended for use by other pinned methods that already take locks
func (p *pinner) isPinnedWithType(c *cid.Cid, mode PinMode) (string, bool, error) {
- k := key.Key(c.Hash())
switch mode {
case Any, Direct, Indirect, Recursive, Internal:
default:
@@ -279,7 +277,7 @@ func (p *pinner) isPinnedWithType(c *cid.Cid, mode PinMode) (string, bool, error
// Default is Indirect
for _, rc := range p.recursePin.Keys() {
- has, err := hasChild(p.dserv, rc, k)
+ has, err := hasChild(p.dserv, rc, c)
if err != nil {
return "", false, err
}
@@ -317,7 +315,7 @@ func (p *pinner) CheckIfPinned(cids ...*cid.Cid) ([]Pinned, error) {
return err
}
for _, lnk := range links {
- c := cid.NewCidV0(lnk.Hash)
+ c := lnk.Cid
if toCheck.Has(c) {
pinned = append(pinned,
@@ -403,12 +401,17 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error)
return nil, fmt.Errorf("cannot find pinning root object: %v", err)
}
+ rootpb, ok := root.(*mdag.ProtoNode)
+ if !ok {
+ return nil, mdag.ErrNotProtobuf
+ }
+
internalset := cid.NewSet()
internalset.Add(rootCid)
recordInternal := internalset.Add
{ // load recursive set
- recurseKeys, err := loadSet(ctx, internal, root, linkRecursive, recordInternal)
+ recurseKeys, err := loadSet(ctx, internal, rootpb, linkRecursive, recordInternal)
if err != nil {
return nil, fmt.Errorf("cannot load recursive pins: %v", err)
}
@@ -416,7 +419,7 @@ func LoadPinner(d ds.Datastore, dserv, internal mdag.DAGService) (Pinner, error)
}
{ // load direct set
- directKeys, err := loadSet(ctx, internal, root, linkDirect, recordInternal)
+ directKeys, err := loadSet(ctx, internal, rootpb, linkDirect, recordInternal)
if err != nil {
return nil, fmt.Errorf("cannot load direct pins: %v", err)
}
@@ -453,7 +456,7 @@ func (p *pinner) Flush() error {
internalset := cid.NewSet()
recordInternal := internalset.Add
- root := &mdag.Node{}
+ root := &mdag.ProtoNode{}
{
n, err := storeSet(ctx, p.internal, p.directPin.Keys(), recordInternal)
if err != nil {
@@ -475,7 +478,7 @@ func (p *pinner) Flush() error {
}
// add the empty node, its referenced by the pin sets but never created
- _, err := p.internal.Add(new(mdag.Node))
+ _, err := p.internal.Add(new(mdag.ProtoNode))
if err != nil {
return err
}
@@ -516,14 +519,14 @@ func (p *pinner) PinWithMode(c *cid.Cid, mode PinMode) {
}
}
-func hasChild(ds mdag.LinkService, root *cid.Cid, child key.Key) (bool, error) {
+func hasChild(ds mdag.LinkService, root *cid.Cid, child *cid.Cid) (bool, error) {
links, err := ds.GetLinks(context.Background(), root)
if err != nil {
return false, err
}
for _, lnk := range links {
- c := cid.NewCidV0(lnk.Hash)
- if key.Key(c.Hash()) == child {
+ c := lnk.Cid
+ if lnk.Cid.Equals(child) {
return true, nil
}
diff --git a/pin/pin_test.go b/pin/pin_test.go
index 911d0e88a45..787b8226a5a 100644
--- a/pin/pin_test.go
+++ b/pin/pin_test.go
@@ -16,8 +16,8 @@ import (
dssync "gx/ipfs/QmbzuUusHqaLLoNTDEVLcSF6vZDHZDLPC7p4bztRvvkXxU/go-datastore/sync"
)
-func randNode() (*mdag.Node, *cid.Cid) {
- nd := new(mdag.Node)
+func randNode() (*mdag.ProtoNode, *cid.Cid) {
+ nd := new(mdag.ProtoNode)
nd.SetData(make([]byte, 32))
util.NewTimeSeededRand().Read(nd.Data())
k := nd.Cid()
diff --git a/pin/set.go b/pin/set.go
index 11d56188d09..eaaba7884c9 100644
--- a/pin/set.go
+++ b/pin/set.go
@@ -12,9 +12,11 @@ import (
"github.com/ipfs/go-ipfs/merkledag"
"github.com/ipfs/go-ipfs/pin/internal/pb"
+
cid "gx/ipfs/QmXUuRadqDq5BuFWzVU6VuKaSjTcNm1gNCtLvvP1TJCW4z/go-cid"
"gx/ipfs/QmYEoKZXHoAToWfhGF3vryhMn3WWhE1o2MasQ8uzY5iDi9/go-key"
"gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
const (
@@ -47,7 +49,7 @@ type itemIterator func() (c *cid.Cid, ok bool)
type keyObserver func(*cid.Cid)
type sortByHash struct {
- links []*merkledag.Link
+ links []*node.Link
}
func (s sortByHash) Len() int {
@@ -55,25 +57,27 @@ func (s sortByHash) Len() int {
}
func (s sortByHash) Less(a, b int) bool {
- return bytes.Compare(s.links[a].Hash, s.links[b].Hash) == -1
+ return bytes.Compare(s.links[a].Cid.Bytes(), s.links[b].Cid.Bytes()) == -1
}
func (s sortByHash) Swap(a, b int) {
s.links[a], s.links[b] = s.links[b], s.links[a]
}
-func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.Node, error) {
+func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint64, iter itemIterator, internalKeys keyObserver) (*merkledag.ProtoNode, error) {
seed, err := randomSeed()
if err != nil {
return nil, err
}
-
- n := &merkledag.Node{Links: make([]*merkledag.Link, 0, defaultFanout+maxItems)}
+ links := make([]*node.Link, 0, defaultFanout+maxItems)
for i := 0; i < defaultFanout; i++ {
- n.Links = append(n.Links, &merkledag.Link{Hash: emptyKey.Hash()})
+ links = append(links, &node.Link{Cid: emptyKey})
}
// add emptyKey to our set of internal pinset objects
+ n := &merkledag.ProtoNode{}
+ n.SetLinks(links)
+
internalKeys(emptyKey)
hdr := &pb.Set{
@@ -87,17 +91,22 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint
if estimatedLen < maxItems {
// it'll probably fit
+ links := n.Links()
for i := 0; i < maxItems; i++ {
k, ok := iter()
if !ok {
// all done
break
}
- n.Links = append(n.Links, &merkledag.Link{Hash: k.Hash()})
+
+ links = append(links, &node.Link{Cid: k})
}
+
+ n.SetLinks(links)
+
// sort by hash, also swap item Data
s := sortByHash{
- links: n.Links[defaultFanout:],
+ links: n.Links()[defaultFanout:],
}
sort.Stable(s)
}
@@ -152,15 +161,15 @@ func storeItems(ctx context.Context, dag merkledag.DAGService, estimatedLen uint
internalKeys(childKey)
// overwrite the 'empty key' in the existing links array
- n.Links[h] = &merkledag.Link{
- Hash: childKey.Hash(),
+ n.Links()[h] = &node.Link{
+ Cid: childKey,
Size: size,
}
}
return n, nil
}
-func readHdr(n *merkledag.Node) (*pb.Set, error) {
+func readHdr(n *merkledag.ProtoNode) (*pb.Set, error) {
hdrLenRaw, consumed := binary.Uvarint(n.Data())
if consumed <= 0 {
return nil, errors.New("invalid Set header length")
@@ -180,13 +189,13 @@ func readHdr(n *merkledag.Node) (*pb.Set, error) {
if v := hdr.GetVersion(); v != 1 {
return nil, fmt.Errorf("unsupported Set version: %d", v)
}
- if uint64(hdr.GetFanout()) > uint64(len(n.Links)) {
+ if uint64(hdr.GetFanout()) > uint64(len(n.Links())) {
return nil, errors.New("impossibly large Fanout")
}
return &hdr, nil
}
-func writeHdr(n *merkledag.Node, hdr *pb.Set) error {
+func writeHdr(n *merkledag.ProtoNode, hdr *pb.Set) error {
hdrData, err := proto.Marshal(hdr)
if err != nil {
return err
@@ -205,22 +214,22 @@ func writeHdr(n *merkledag.Node, hdr *pb.Set) error {
return nil
}
-type walkerFunc func(idx int, link *merkledag.Link) error
+type walkerFunc func(idx int, link *node.Link) error
-func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node, fn walkerFunc, children keyObserver) error {
+func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.ProtoNode, fn walkerFunc, children keyObserver) error {
hdr, err := readHdr(n)
if err != nil {
return err
}
// readHdr guarantees fanout is a safe value
fanout := hdr.GetFanout()
- for i, l := range n.Links[fanout:] {
+ for i, l := range n.Links()[fanout:] {
if err := fn(i, l); err != nil {
return err
}
}
- for _, l := range n.Links[:fanout] {
- c := cid.NewCidV0(l.Hash)
+ for _, l := range n.Links()[:fanout] {
+ c := l.Cid
children(c)
if c.Equals(emptyKey) {
continue
@@ -229,20 +238,26 @@ func walkItems(ctx context.Context, dag merkledag.DAGService, n *merkledag.Node,
if err != nil {
return err
}
- if err := walkItems(ctx, dag, subtree, fn, children); err != nil {
+
+ stpb, ok := subtree.(*merkledag.ProtoNode)
+ if !ok {
+ return merkledag.ErrNotProtobuf
+ }
+
+ if err := walkItems(ctx, dag, stpb, fn, children); err != nil {
return err
}
}
return nil
}
-func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node, name string, internalKeys keyObserver) ([]*cid.Cid, error) {
+func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.ProtoNode, name string, internalKeys keyObserver) ([]*cid.Cid, error) {
l, err := root.GetNodeLink(name)
if err != nil {
return nil, err
}
- lnkc := cid.NewCidV0(l.Hash)
+ lnkc := l.Cid
internalKeys(lnkc)
n, err := l.GetNode(ctx, dag)
@@ -250,12 +265,18 @@ func loadSet(ctx context.Context, dag merkledag.DAGService, root *merkledag.Node
return nil, err
}
+ pbn, ok := n.(*merkledag.ProtoNode)
+ if !ok {
+ return nil, merkledag.ErrNotProtobuf
+ }
+
var res []*cid.Cid
- walk := func(idx int, link *merkledag.Link) error {
- res = append(res, cid.NewCidV0(link.Hash))
+ walk := func(idx int, link *node.Link) error {
+ res = append(res, link.Cid)
return nil
}
- if err := walkItems(ctx, dag, n, walk, internalKeys); err != nil {
+
+ if err := walkItems(ctx, dag, pbn, walk, internalKeys); err != nil {
return nil, err
}
return res, nil
@@ -273,7 +294,7 @@ func getCidListIterator(cids []*cid.Cid) itemIterator {
}
}
-func storeSet(ctx context.Context, dag merkledag.DAGService, cids []*cid.Cid, internalKeys keyObserver) (*merkledag.Node, error) {
+func storeSet(ctx context.Context, dag merkledag.DAGService, cids []*cid.Cid, internalKeys keyObserver) (*merkledag.ProtoNode, error) {
iter := getCidListIterator(cids)
n, err := storeItems(ctx, dag, uint64(len(cids)), iter, internalKeys)
diff --git a/pin/set_test.go b/pin/set_test.go
index e17906f6f02..335b59e99ae 100644
--- a/pin/set_test.go
+++ b/pin/set_test.go
@@ -38,7 +38,7 @@ func TestSet(t *testing.T) {
// weird wrapper node because loadSet expects us to pass an
// object pointing to multiple named sets
- setroot := &dag.Node{}
+ setroot := &dag.ProtoNode{}
err = setroot.AddNodeLinkClean("foo", out)
if err != nil {
t.Fatal(err)
diff --git a/tar/format.go b/tar/format.go
index 337c53f85b2..052ff22142f 100644
--- a/tar/format.go
+++ b/tar/format.go
@@ -3,6 +3,7 @@ package tarfmt
import (
"archive/tar"
"bytes"
+ "context"
"errors"
"io"
"io/ioutil"
@@ -14,9 +15,9 @@ import (
dagutil "github.com/ipfs/go-ipfs/merkledag/utils"
path "github.com/ipfs/go-ipfs/path"
uio "github.com/ipfs/go-ipfs/unixfs/io"
- logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
- context "context"
+ logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log"
+ node "gx/ipfs/QmZx42H5khbVQhV5odp66TApShV4XCujYazcvYduZ4TroB/go-ipld-node"
)
var log = logging.Logger("tarfmt")
@@ -34,7 +35,7 @@ func marshalHeader(h *tar.Header) ([]byte, error) {
return buf.Bytes(), nil
}
-func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
+func ImportTar(r io.Reader, ds dag.DAGService) (*dag.ProtoNode, error) {
rall, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
@@ -44,7 +45,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
tr := tar.NewReader(r)
- root := new(dag.Node)
+ root := new(dag.ProtoNode)
root.SetData([]byte("ipfs/tar"))
e := dagutil.NewDagEditor(root, ds)
@@ -58,7 +59,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
return nil, err
}
- header := new(dag.Node)
+ header := new(dag.ProtoNode)
headerBytes, err := marshalHeader(h)
if err != nil {
@@ -86,7 +87,7 @@ func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) {
}
path := escapePath(h.Name)
- err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) })
+ err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.ProtoNode { return new(dag.ProtoNode) })
if err != nil {
return nil, err
}
@@ -106,7 +107,7 @@ func escapePath(pth string) string {
}
type tarReader struct {
- links []*dag.Link
+ links []*node.Link
ds dag.DAGService
childRead *tarReader
@@ -170,9 +171,14 @@ func (tr *tarReader) Read(b []byte) (int, error) {
return 0, err
}
- tr.hdrBuf = bytes.NewReader(headerNd.Data())
+ hndpb, ok := headerNd.(*dag.ProtoNode)
+ if !ok {
+ return 0, dag.ErrNotProtobuf
+ }
+
+ tr.hdrBuf = bytes.NewReader(hndpb.Data())
- dataNd, err := headerNd.GetLinkedNode(tr.ctx, tr.ds, "data")
+ dataNd, err := hndpb.GetLinkedProtoNode(tr.ctx, tr.ds, "data")
if err != nil && err != dag.ErrLinkNotFound {
return 0, err
}
@@ -185,9 +191,9 @@ func (tr *tarReader) Read(b []byte) (int, error) {
}
tr.fileRead = &countReader{r: dr}
- } else if len(headerNd.Links) > 0 {
+ } else if len(headerNd.Links()) > 0 {
tr.childRead = &tarReader{
- links: headerNd.Links,
+ links: headerNd.Links(),
ds: tr.ds,
ctx: tr.ctx,
}
@@ -196,12 +202,12 @@ func (tr *tarReader) Read(b []byte) (int, error) {
return tr.Read(b)
}
-func ExportTar(ctx context.Context, root *dag.Node, ds dag.DAGService) (io.Reader, error) {
+func ExportTar(ctx context.Context, root *dag.ProtoNode, ds dag.DAGService) (io.Reader, error) {
if string(root.Data()) != "ipfs/tar" {
return nil, errors.New("not an ipfs tarchive")
}
return &tarReader{
- links: root.Links,
+ links: root.Links(),
ds: ds,
ctx: ctx,
}, nil
diff --git a/unixfs/archive/archive.go b/unixfs/archive/archive.go
index 8cc1ec2e113..a94c9f7afcc 100644
--- a/unixfs/archive/archive.go
+++ b/unixfs/archive/archive.go
@@ -30,7 +30,7 @@ func (i *identityWriteCloser) Close() error {
}
// DagArchive is equivalent to `ipfs getdag $hash | maybe_tar | maybe_gzip`
-func DagArchive(ctx cxt.Context, nd *mdag.Node, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
+func DagArchive(ctx cxt.Context, nd *mdag.ProtoNode, name string, dag mdag.DAGService, archive bool, compression int) (io.Reader, error) {
_, filename := path.Split(name)
diff --git a/unixfs/archive/tar/writer.go b/unixfs/archive/tar/writer.go
index 475b318a49b..f710d4063c4 100644
--- a/unixfs/archive/tar/writer.go
+++ b/unixfs/archive/tar/writer.go
@@ -34,7 +34,7 @@ func NewWriter(ctx cxt.Context, dag mdag.DAGService, archive bool, compression i
}, nil
}
-func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
+func (w *Writer) writeDir(nd *mdag.ProtoNode, fpath string) error {
if err := writeDirHeader(w.TarW, fpath); err != nil {
return err
}
@@ -45,8 +45,13 @@ func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
return err
}
- npath := path.Join(fpath, nd.Links[i].Name)
- if err := w.WriteNode(child, npath); err != nil {
+ childpb, ok := child.(*mdag.ProtoNode)
+ if !ok {
+ return mdag.ErrNotProtobuf
+ }
+
+ npath := path.Join(fpath, nd.Links()[i].Name)
+ if err := w.WriteNode(childpb, npath); err != nil {
return err
}
}
@@ -54,7 +59,7 @@ func (w *Writer) writeDir(nd *mdag.Node, fpath string) error {
return nil
}
-func (w *Writer) writeFile(nd *mdag.Node, pb *upb.Data, fpath string) error {
+func (w *Writer) writeFile(nd *mdag.ProtoNode, pb *upb.Data, fpath string) error {
if err := writeFileHeader(w.TarW, fpath, pb.GetFilesize()); err != nil {
return err
}
@@ -67,7 +72,7 @@ func (w *Writer) writeFile(nd *mdag.Node, pb *upb.Data, fpath string) error {
return nil
}
-func (w *Writer) WriteNode(nd *mdag.Node, fpath string) error {
+func (w *Writer) WriteNode(nd *mdag.ProtoNode, fpath string) error {
pb := new(upb.Data)
if err := proto.Unmarshal(nd.Data(), pb); err != nil {
return err
diff --git a/unixfs/format.go b/unixfs/format.go
index 0235f0c7c73..a8ade430c9b 100644
--- a/unixfs/format.go
+++ b/unixfs/format.go
@@ -224,6 +224,6 @@ func BytesForMetadata(m *Metadata) ([]byte, error) {
return proto.Marshal(pbd)
}
-func EmptyDirNode() *dag.Node {
+func EmptyDirNode() *dag.ProtoNode {
return dag.NodeWithData(FolderPBData())
}
diff --git a/unixfs/io/dagreader.go b/unixfs/io/dagreader.go
index f78fbbf77e7..086e5038ce7 100644
--- a/unixfs/io/dagreader.go
+++ b/unixfs/io/dagreader.go
@@ -24,7 +24,7 @@ type DagReader struct {
serv mdag.DAGService
// the node being read
- node *mdag.Node
+ node *mdag.ProtoNode
// cached protobuf structure from node.Data
pbdata *ftpb.Data
@@ -58,7 +58,7 @@ type ReadSeekCloser interface {
// NewDagReader creates a new reader object that reads the data represented by
// the given node, using the passed in DAGService for data retreival
-func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*DagReader, error) {
+func NewDagReader(ctx context.Context, n *mdag.ProtoNode, serv mdag.DAGService) (*DagReader, error) {
pb := new(ftpb.Data)
if err := proto.Unmarshal(n.Data(), pb); err != nil {
return nil, err
@@ -71,14 +71,19 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
case ftpb.Data_File, ftpb.Data_Raw:
return NewDataFileReader(ctx, n, pb, serv), nil
case ftpb.Data_Metadata:
- if len(n.Links) == 0 {
+ if len(n.Links()) == 0 {
return nil, errors.New("incorrectly formatted metadata object")
}
- child, err := n.Links[0].GetNode(ctx, serv)
+ child, err := n.Links()[0].GetNode(ctx, serv)
if err != nil {
return nil, err
}
- return NewDagReader(ctx, child, serv)
+
+ childpb, ok := child.(*mdag.ProtoNode)
+ if !ok {
+ return nil, mdag.ErrNotProtobuf
+ }
+ return NewDagReader(ctx, childpb, serv)
case ftpb.Data_Symlink:
return nil, ErrCantReadSymlinks
default:
@@ -86,7 +91,7 @@ func NewDagReader(ctx context.Context, n *mdag.Node, serv mdag.DAGService) (*Dag
}
}
-func NewDataFileReader(ctx context.Context, n *mdag.Node, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
+func NewDataFileReader(ctx context.Context, n *mdag.ProtoNode, pb *ftpb.Data, serv mdag.DAGService) *DagReader {
fctx, cancel := context.WithCancel(ctx)
promises := mdag.GetDAG(fctx, serv, n)
return &DagReader{
@@ -114,8 +119,13 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
}
dr.linkPosition++
+ nxtpb, ok := nxt.(*mdag.ProtoNode)
+ if !ok {
+ return mdag.ErrNotProtobuf
+ }
+
pb := new(ftpb.Data)
- err = proto.Unmarshal(nxt.Data(), pb)
+ err = proto.Unmarshal(nxtpb.Data(), pb)
if err != nil {
return fmt.Errorf("incorrectly formatted protobuf: %s", err)
}
@@ -125,7 +135,7 @@ func (dr *DagReader) precalcNextBuf(ctx context.Context) error {
// A directory should not exist within a file
return ft.ErrInvalidDirLocation
case ftpb.Data_File:
- dr.buf = NewDataFileReader(dr.ctx, nxt, pb, dr.serv)
+ dr.buf = NewDataFileReader(dr.ctx, nxtpb, pb, dr.serv)
return nil
case ftpb.Data_Raw:
dr.buf = NewRSNCFromBytes(pb.GetData())
diff --git a/unixfs/io/dirbuilder.go b/unixfs/io/dirbuilder.go
index 967e22c4bbd..ac316f8a275 100644
--- a/unixfs/io/dirbuilder.go
+++ b/unixfs/io/dirbuilder.go
@@ -10,12 +10,12 @@ import (
type directoryBuilder struct {
dserv mdag.DAGService
- dirnode *mdag.Node
+ dirnode *mdag.ProtoNode
}
// NewEmptyDirectory returns an empty merkledag Node with a folder Data chunk
-func NewEmptyDirectory() *mdag.Node {
- nd := new(mdag.Node)
+func NewEmptyDirectory() *mdag.ProtoNode {
+ nd := new(mdag.ProtoNode)
nd.SetData(format.FolderPBData())
return nd
}
@@ -35,10 +35,15 @@ func (d *directoryBuilder) AddChild(ctx context.Context, name string, c *cid.Cid
return err
}
- return d.dirnode.AddNodeLinkClean(name, cnode)
+ cnpb, ok := cnode.(*mdag.ProtoNode)
+ if !ok {
+ return mdag.ErrNotProtobuf
+ }
+
+ return d.dirnode.AddNodeLinkClean(name, cnpb)
}
// GetNode returns the root of this directoryBuilder
-func (d *directoryBuilder) GetNode() *mdag.Node {
+func (d *directoryBuilder) GetNode() *mdag.ProtoNode {
return d.dirnode
}
diff --git a/unixfs/io/dirbuilder_test.go b/unixfs/io/dirbuilder_test.go
index 80a01d3251d..e7539a8bc26 100644
--- a/unixfs/io/dirbuilder_test.go
+++ b/unixfs/io/dirbuilder_test.go
@@ -10,7 +10,7 @@ import (
func TestEmptyNode(t *testing.T) {
n := NewEmptyDirectory()
- if len(n.Links) != 0 {
+ if len(n.Links()) != 0 {
t.Fatal("empty node should have 0 links")
}
}
@@ -27,7 +27,7 @@ func TestDirBuilder(t *testing.T) {
b.AddChild(ctx, "random", key)
dir := b.GetNode()
- outn, err := dir.GetLinkedNode(ctx, dserv, "random")
+ outn, err := dir.GetLinkedProtoNode(ctx, dserv, "random")
if err != nil {
t.Fatal(err)
}
diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go
index 8e3cae16a04..3479ab4a488 100644
--- a/unixfs/mod/dagmodifier.go
+++ b/unixfs/mod/dagmodifier.go
@@ -32,7 +32,7 @@ var log = logging.Logger("dagio")
// Dear god, please rename this to something more pleasant
type DagModifier struct {
dagserv mdag.DAGService
- curNode *mdag.Node
+ curNode *mdag.ProtoNode
splitter chunk.SplitterGen
ctx context.Context
@@ -45,7 +45,7 @@ type DagModifier struct {
read *uio.DagReader
}
-func NewDagModifier(ctx context.Context, from *mdag.Node, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
+func NewDagModifier(ctx context.Context, from *mdag.ProtoNode, serv mdag.DAGService, spl chunk.SplitterGen) (*DagModifier, error) {
return &DagModifier{
curNode: from.Copy(),
dagserv: serv,
@@ -178,11 +178,16 @@ func (dm *DagModifier) Sync() error {
return err
}
- dm.curNode = nd
+ pbnd, ok := nd.(*mdag.ProtoNode)
+ if !ok {
+ return mdag.ErrNotProtobuf
+ }
+
+ dm.curNode = pbnd
// need to write past end of current dag
if !done {
- nd, err = dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
+ nd, err := dm.appendData(dm.curNode, dm.splitter(dm.wrBuf))
if err != nil {
return err
}
@@ -204,14 +209,14 @@ func (dm *DagModifier) Sync() error {
// modifyDag writes the data in 'data' over the data in 'node' starting at 'offset'
// returns the new key of the passed in node and whether or not all the data in the reader
// has been consumed.
-func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
+func (dm *DagModifier) modifyDag(node *mdag.ProtoNode, offset uint64, data io.Reader) (*cid.Cid, bool, error) {
f, err := ft.FromBytes(node.Data())
if err != nil {
return nil, false, err
}
// If we've reached a leaf node.
- if len(node.Links) == 0 {
+ if len(node.Links()) == 0 {
n, err := data.Read(f.Data[offset:])
if err != nil && err != io.EOF {
return nil, false, err
@@ -223,7 +228,7 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
return nil, false, err
}
- nd := new(mdag.Node)
+ nd := new(mdag.ProtoNode)
nd.SetData(b)
k, err := dm.dagserv.Add(nd)
if err != nil {
@@ -244,17 +249,23 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
for i, bs := range f.GetBlocksizes() {
// We found the correct child to write into
if cur+bs > offset {
- child, err := node.Links[i].GetNode(dm.ctx, dm.dagserv)
+ child, err := node.Links()[i].GetNode(dm.ctx, dm.dagserv)
if err != nil {
return nil, false, err
}
- k, sdone, err := dm.modifyDag(child, offset-cur, data)
+
+ childpb, ok := child.(*mdag.ProtoNode)
+ if !ok {
+ return nil, false, mdag.ErrNotProtobuf
+ }
+
+ k, sdone, err := dm.modifyDag(childpb, offset-cur, data)
if err != nil {
return nil, false, err
}
offset += bs
- node.Links[i].Hash = k.Hash()
+ node.Links()[i].Cid = k
// Recache serialized node
_, err = node.EncodeProtobuf(true)
@@ -277,7 +288,7 @@ func (dm *DagModifier) modifyDag(node *mdag.Node, offset uint64, data io.Reader)
}
// appendData appends the blocks from the given chan to the end of this dag
-func (dm *DagModifier) appendData(node *mdag.Node, spl chunk.Splitter) (*mdag.Node, error) {
+func (dm *DagModifier) appendData(node *mdag.ProtoNode, spl chunk.Splitter) (*mdag.ProtoNode, error) {
dbp := &help.DagBuilderParams{
Dagserv: dm.dagserv,
Maxlinks: help.DefaultLinksPerBlock,
@@ -340,7 +351,7 @@ func (dm *DagModifier) CtxReadFull(ctx context.Context, b []byte) (int, error) {
}
// GetNode gets the modified DAG Node
-func (dm *DagModifier) GetNode() (*mdag.Node, error) {
+func (dm *DagModifier) GetNode() (*mdag.ProtoNode, error) {
err := dm.Sync()
if err != nil {
return nil, err
@@ -425,8 +436,8 @@ func (dm *DagModifier) Truncate(size int64) error {
}
// dagTruncate truncates the given node to 'size' and returns the modified Node
-func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGService) (*mdag.Node, error) {
- if len(nd.Links) == 0 {
+func dagTruncate(ctx context.Context, nd *mdag.ProtoNode, size uint64, ds mdag.DAGService) (*mdag.ProtoNode, error) {
+ if len(nd.Links()) == 0 {
// TODO: this can likely be done without marshaling and remarshaling
pbn, err := ft.FromBytes(nd.Data())
if err != nil {
@@ -439,22 +450,27 @@ func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGSer
var cur uint64
end := 0
- var modified *mdag.Node
+ var modified *mdag.ProtoNode
ndata := new(ft.FSNode)
- for i, lnk := range nd.Links {
+ for i, lnk := range nd.Links() {
child, err := lnk.GetNode(ctx, ds)
if err != nil {
return nil, err
}
- childsize, err := ft.DataSize(child.Data())
+ childpb, ok := child.(*mdag.ProtoNode)
+ if !ok {
+ return nil, err
+ }
+
+ childsize, err := ft.DataSize(childpb.Data())
if err != nil {
return nil, err
}
// found the child we want to cut
if size < cur+childsize {
- nchild, err := dagTruncate(ctx, child, size-cur, ds)
+ nchild, err := dagTruncate(ctx, childpb, size-cur, ds)
if err != nil {
return nil, err
}
@@ -474,7 +490,7 @@ func dagTruncate(ctx context.Context, nd *mdag.Node, size uint64, ds mdag.DAGSer
return nil, err
}
- nd.Links = nd.Links[:end]
+ nd.SetLinks(nd.Links()[:end])
err = nd.AddNodeLinkClean("", modified)
if err != nil {
return nil, err
diff --git a/unixfs/test/utils.go b/unixfs/test/utils.go
index b997a11a8e7..26755cec57d 100644
--- a/unixfs/test/utils.go
+++ b/unixfs/test/utils.go
@@ -27,7 +27,7 @@ func GetDAGServ() mdag.DAGService {
return mdagmock.Mock()
}
-func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.Node {
+func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.ProtoNode {
in := bytes.NewReader(data)
node, err := imp.BuildTrickleDagFromReader(dserv, SizeSplitterGen(500)(in))
if err != nil {
@@ -37,11 +37,11 @@ func GetNode(t testing.TB, dserv mdag.DAGService, data []byte) *mdag.Node {
return node
}
-func GetEmptyNode(t testing.TB, dserv mdag.DAGService) *mdag.Node {
+func GetEmptyNode(t testing.TB, dserv mdag.DAGService) *mdag.ProtoNode {
return GetNode(t, dserv, []byte{})
}
-func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.Node) {
+func GetRandomNode(t testing.TB, dserv mdag.DAGService, size int64) ([]byte, *mdag.ProtoNode) {
in := io.LimitReader(u.NewTimeSeededRand(), size)
buf, err := ioutil.ReadAll(in)
if err != nil {
@@ -64,7 +64,7 @@ func ArrComp(a, b []byte) error {
return nil
}
-func PrintDag(nd *mdag.Node, ds mdag.DAGService, indent int) {
+func PrintDag(nd *mdag.ProtoNode, ds mdag.DAGService, indent int) {
pbd, err := ft.FromBytes(nd.Data())
if err != nil {
panic(err)
@@ -74,17 +74,17 @@ func PrintDag(nd *mdag.Node, ds mdag.DAGService, indent int) {
fmt.Print(" ")
}
fmt.Printf("{size = %d, type = %s, children = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes()))
- if len(nd.Links) > 0 {
+ if len(nd.Links()) > 0 {
fmt.Println()
}
- for _, lnk := range nd.Links {
+ for _, lnk := range nd.Links() {
child, err := lnk.GetNode(context.Background(), ds)
if err != nil {
panic(err)
}
- PrintDag(child, ds, indent+1)
+ PrintDag(child.(*mdag.ProtoNode), ds, indent+1)
}
- if len(nd.Links) > 0 {
+ if len(nd.Links()) > 0 {
for i := 0; i < indent; i++ {
fmt.Print(" ")
}