Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dependency upgrades #2026

Merged
merged 5 commits into from
Jan 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion allocate.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ func allocationError(hash api.Cid, needed, wanted int, candidatesValid []peer.ID
logger.Errorf(" Wanted: %d", wanted)
logger.Errorf(" Available candidates: %d:", len(candidatesValid))
for _, c := range candidatesValid {
logger.Errorf(" - %s", c.Pretty())
logger.Errorf(" - %s", c)
}
errorMsg := "not enough peers to allocate CID. "
errorMsg += fmt.Sprintf("Needed at least: %d. ", needed)
Expand Down
4 changes: 2 additions & 2 deletions api/common/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ func (api *API) runLibp2pServer(ctx context.Context) {

listenMsg := ""
for _, a := range api.host.Addrs() {
listenMsg += fmt.Sprintf(" %s/p2p/%s\n", a, api.host.ID().Pretty())
listenMsg += fmt.Sprintf(" %s/p2p/%s\n", a, api.host.ID())
}

api.config.Logger.Infof(strings.ToUpper(api.config.ConfigKey)+" (libp2p-http): ENABLED. Listening on:\n%s\n", listenMsg)
Expand Down Expand Up @@ -544,7 +544,7 @@ func (api *API) ParsePinPathOrFail(w http.ResponseWriter, r *http.Request) types
vars := mux.Vars(r)
urlpath := "/" + vars["keyType"] + "/" + strings.TrimSuffix(vars["path"], "/")

path, err := gopath.ParsePath(urlpath)
path, err := gopath.NewPath(urlpath)
if err != nil {
api.SendResponse(w, http.StatusBadRequest, errors.New("error parsing path: "+err.Error()), nil)
return types.PinPath{}
Expand Down
22 changes: 19 additions & 3 deletions api/ipfsproxy/ipfsproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ func (proxy *Server) pinOpHandler(op string, w http.ResponseWriter, r *http.Requ

q := r.URL.Query()
arg := q.Get("arg")
p, err := path.ParsePath(arg)
p, err := pathOrCidPath(arg)
if err != nil {
ipfsErrorResponder(w, "Error parsing IPFS Path: "+err.Error(), -1)
return
Expand Down Expand Up @@ -528,13 +528,13 @@ func (proxy *Server) pinUpdateHandler(w http.ResponseWriter, r *http.Request) {
to := args[1]

// Parse paths (we will need to resolve them)
pFrom, err := path.ParsePath(from)
pFrom, err := pathOrCidPath(from)
if err != nil {
ipfsErrorResponder(w, "error parsing \"from-path\" argument: "+err.Error(), -1)
return
}

pTo, err := path.ParsePath(to)
pTo, err := pathOrCidPath(to)
if err != nil {
ipfsErrorResponder(w, "error parsing \"to-path\" argument: "+err.Error(), -1)
return
Expand Down Expand Up @@ -967,3 +967,19 @@ func slashHandler(origHandler http.HandlerFunc) http.HandlerFunc {
origHandler(w, r)
}
}

// pathOrCidPath returns a path.Path built from the argument. It keeps the old
// behavior by building a path from a CID string.
func pathOrCidPath(str string) (path.Path, error) {
p, err := path.NewPath(str)
if err == nil {
return p, nil
}

if p, err := path.NewPath("/ipfs/" + str); err == nil {
return p, nil
}

// Send back original err.
return nil, err
}
16 changes: 11 additions & 5 deletions api/rest/client/methods.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func (c *defaultClient) PeerRm(ctx context.Context, id peer.ID) error {
ctx, span := trace.StartSpan(ctx, "client/PeerRm")
defer span.End()

return c.do(ctx, "DELETE", fmt.Sprintf("/peers/%s", id.Pretty()), nil, nil, nil)
return c.do(ctx, "DELETE", fmt.Sprintf("/peers/%s", id), nil, nil, nil)
}

// Pin tracks a Cid with the given replication factor and a name for
Expand Down Expand Up @@ -122,9 +122,12 @@ func (c *defaultClient) PinPath(ctx context.Context, path string, opts api.PinOp
defer span.End()

var pin api.Pin
ipfspath, err := gopath.ParsePath(path)
ipfspath, err := gopath.NewPath(path)
if err != nil {
return api.Pin{}, err
ipfspath, err = gopath.NewPath("/ipfs/" + path)
if err != nil {
return api.Pin{}, err
}
}
query, err := opts.ToQuery()
if err != nil {
Expand Down Expand Up @@ -153,9 +156,12 @@ func (c *defaultClient) UnpinPath(ctx context.Context, p string) (api.Pin, error
defer span.End()

var pin api.Pin
ipfspath, err := gopath.ParsePath(p)
ipfspath, err := gopath.NewPath(p)
if err != nil {
return api.Pin{}, err
ipfspath, err = gopath.NewPath("/ipfs/" + p)
if err != nil {
return api.Pin{}, err
}
}

err = c.do(ctx, "DELETE", fmt.Sprintf("/pins%s", ipfspath.String()), nil, nil, &pin)
Expand Down
17 changes: 8 additions & 9 deletions api/rest/restapi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ func TestRestAPIIDEndpoint(t *testing.T) {
tf := func(t *testing.T, url test.URLFunc) {
id := api.ID{}
test.MakeGet(t, rest, url(rest)+"/id", &id)
if id.ID.Pretty() != clustertest.PeerID1.Pretty() {
if id.ID != clustertest.PeerID1 {
t.Error("expected correct id")
}
}
Expand Down Expand Up @@ -106,7 +106,7 @@ func TestAPIPeersEndpoint(t *testing.T) {
if len(list) != 1 {
t.Fatal("expected 1 element")
}
if list[0].ID.Pretty() != clustertest.PeerID1.Pretty() {
if list[0].ID != clustertest.PeerID1 {
t.Error("expected a different peer id list: ", list)
}
}
Expand All @@ -122,10 +122,10 @@ func TestAPIPeerAddEndpoint(t *testing.T) {
tf := func(t *testing.T, url test.URLFunc) {
id := api.ID{}
// post with valid body
body := fmt.Sprintf("{\"peer_id\":\"%s\"}", clustertest.PeerID1.Pretty())
body := fmt.Sprintf("{\"peer_id\":\"%s\"}", clustertest.PeerID1)
t.Log(body)
test.MakePost(t, rest, url(rest)+"/peers", []byte(body), &id)
if id.ID.Pretty() != clustertest.PeerID1.Pretty() {
if id.ID != clustertest.PeerID1 {
t.Error("expected correct ID")
}
if id.Error != "" {
Expand Down Expand Up @@ -272,7 +272,7 @@ func TestAPIPeerRemoveEndpoint(t *testing.T) {
defer rest.Shutdown(ctx)

tf := func(t *testing.T, url test.URLFunc) {
test.MakeDelete(t, rest, url(rest)+"/peers/"+clustertest.PeerID1.Pretty(), &struct{}{})
test.MakeDelete(t, rest, url(rest)+"/peers/"+clustertest.PeerID1.String(), &struct{}{})
}

test.BothEndpoints(t, tf)
Expand All @@ -286,7 +286,7 @@ func TestConnectGraphEndpoint(t *testing.T) {
tf := func(t *testing.T, url test.URLFunc) {
var cg api.ConnectGraph
test.MakeGet(t, rest, url(rest)+"/health/graph", &cg)
if cg.ClusterID.Pretty() != clustertest.PeerID1.Pretty() {
if cg.ClusterID != clustertest.PeerID1 {
t.Error("unexpected cluster id")
}
if len(cg.IPFSLinks) != 3 {
Expand Down Expand Up @@ -567,7 +567,7 @@ func TestAPIMetricsEndpoint(t *testing.T) {
if m.Name != "test" {
t.Error("Unexpected metric name: ", m.Name)
}
if m.Peer.Pretty() != clustertest.PeerID1.Pretty() {
if m.Peer != clustertest.PeerID1 {
t.Error("Unexpected peer id: ", m.Peer)
}
}
Expand Down Expand Up @@ -845,7 +845,6 @@ func TestAPIIPFSGCEndpoint(t *testing.T) {
test.BothEndpoints(t, tf)
}


func TestHealthEndpoint(t *testing.T) {
ctx := context.Background()
rest := testAPI(t)
Expand All @@ -860,4 +859,4 @@ func TestHealthEndpoint(t *testing.T) {
}

test.BothEndpoints(t, tf)
}
}
16 changes: 8 additions & 8 deletions cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ func NewCluster(

listenAddrs := ""
for _, addr := range host.Addrs() {
listenAddrs += fmt.Sprintf(" %s/p2p/%s\n", addr, host.ID().Pretty())
listenAddrs += fmt.Sprintf(" %s/p2p/%s\n", addr, host.ID())
}

logger.Infof("IPFS Cluster v%s listening on:\n%s\n", version.Version, listenAddrs)
Expand Down Expand Up @@ -607,7 +607,7 @@ func (c *Cluster) vacatePeer(ctx context.Context, p peer.ID) {
defer span.End()

if c.config.DisableRepinning {
logger.Warnf("repinning is disabled. Will not re-allocate cids from %s", p.Pretty())
logger.Warnf("repinning is disabled. Will not re-allocate cids from %s", p)
return
}

Expand Down Expand Up @@ -645,7 +645,7 @@ func (c *Cluster) repinFromPeer(ctx context.Context, p peer.ID, pin api.Pin) {
// if we are not under the replication-factor min.
_, ok, err := c.pin(ctx, pin, []peer.ID{p})
if ok && err == nil {
logger.Infof("repinned %s out of %s", pin.Cid, p.Pretty())
logger.Infof("repinned %s out of %s", pin.Cid, p)
}
}

Expand Down Expand Up @@ -739,7 +739,7 @@ This might be due to one or several causes:

for _, p := range peers {
if p != c.id {
logger.Infof(" - %s", p.Pretty())
logger.Infof(" - %s", p)
}
}

Expand Down Expand Up @@ -971,7 +971,7 @@ func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) {
// seems to help.
c.paMux.Lock()
defer c.paMux.Unlock()
logger.Debugf("peerAdd called with %s", pid.Pretty())
logger.Debugf("peerAdd called with %s", pid)

// Let the consensus layer be aware of this peer
err := c.consensus.AddPeer(ctx, pid)
Expand All @@ -981,7 +981,7 @@ func (c *Cluster) PeerAdd(ctx context.Context, pid peer.ID) (*api.ID, error) {
return id, err
}

logger.Info("Peer added ", pid.Pretty())
logger.Infof("Peer added %s", pid)
addedID, err := c.getIDForPeer(ctx, pid)
if err != nil {
return addedID, err
Expand Down Expand Up @@ -1010,7 +1010,7 @@ func (c *Cluster) PeerRemove(ctx context.Context, pid peer.ID) error {
logger.Error(err)
return err
}
logger.Info("Peer removed ", pid.Pretty())
logger.Info("Peer removed %s", pid)
return nil
}

Expand Down Expand Up @@ -1123,7 +1123,7 @@ func (c *Cluster) Join(ctx context.Context, addr ma.Multiaddr) error {
}()
go c.RecoverAllLocal(c.ctx, out)

logger.Infof("%s: joined %s's cluster", c.id.Pretty(), pid.Pretty())
logger.Infof("%s: joined %s's cluster", c.id, pid)
return nil
}

Expand Down
4 changes: 2 additions & 2 deletions cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ func (ipfs *mockConnector) RepoGC(ctx context.Context) (api.RepoGC, error) {
}

func (ipfs *mockConnector) Resolve(ctx context.Context, path string) (api.Cid, error) {
_, err := gopath.ParsePath(path)
_, err := gopath.NewPath(path)
if err != nil {
return api.CidUndef, err
}
Expand Down Expand Up @@ -164,7 +164,7 @@ func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, PinTracke

host, pubsub, dht := createHost(t, ident.PrivateKey, clusterCfg.Secret, clusterCfg.ListenAddr)

folder := filepath.Join(testsFolder, host.ID().Pretty())
folder := filepath.Join(testsFolder, host.ID().String())
cleanState()
clusterCfg.SetBaseDir(folder)
raftCfg.DataFolder = folder
Expand Down
6 changes: 3 additions & 3 deletions cmd/ipfs-cluster-ctl/formatters.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,13 +127,13 @@ func textFormatObject(resp interface{}) {

func textFormatPrintID(obj api.ID) {
if obj.Error != "" {
fmt.Printf("%s | ERROR: %s\n", obj.ID.Pretty(), obj.Error)
fmt.Printf("%s | ERROR: %s\n", obj.ID, obj.Error)
return
}

fmt.Printf(
"%s | %s | Sees %d other peers\n",
obj.ID.Pretty(),
obj.ID,
obj.Peername,
len(obj.ClusterPeers)-1,
)
Expand All @@ -157,7 +157,7 @@ func textFormatPrintID(obj api.ID) {
ipfsAddrs = append(ipfsAddrs, a.String())
}
ipfsAddrs.Sort()
fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID.Pretty())
fmt.Printf(" > IPFS: %s\n", obj.IPFS.ID)
for _, a := range ipfsAddrs {
fmt.Printf(" - %s\n", a)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/ipfs-cluster-service/daemon.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ func createCluster(
checkErr("getting configuration string", err)
logger.Debugf("Configuration:\n%s\n", cfgBytes)

ctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().Pretty()))
ctx, err = tag.New(ctx, tag.Upsert(observations.HostKey, host.ID().String()))
checkErr("tag context with host id", err)

err = observations.SetupMetrics(cfgs.Metrics)
Expand Down
2 changes: 1 addition & 1 deletion cmdutils/configs.go
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ func (ch *ConfigHelper) SaveIdentityToDisk() error {
func (ch *ConfigHelper) SetupTracing(forceEnabled bool) {
enabled := forceEnabled || ch.configs.Tracing.EnableTracing

ch.configs.Tracing.ClusterID = ch.Identity().ID.Pretty()
ch.configs.Tracing.ClusterID = ch.Identity().ID.String()
ch.configs.Tracing.ClusterPeername = ch.configs.Cluster.Peername
ch.configs.Tracing.EnableTracing = enabled
ch.configs.Cluster.Tracing = enabled
Expand Down
2 changes: 1 addition & 1 deletion config/identity.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (ident *Identity) toIdentityJSON() (jID *identityJSON, err error) {
pKey := base64.StdEncoding.EncodeToString(pkeyBytes)

// Set all identity fields
jID.ID = ident.ID.Pretty()
jID.ID = ident.ID.String()
jID.PrivateKey = pKey
return
}
Expand Down
6 changes: 3 additions & 3 deletions connect_graph.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []api
var pID api.ID
for _, id := range peers {
if id.Error != "" {
logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID.Pretty())
logger.Debugf("Peer %s errored connecting to its peer %s", p, id.ID)
continue
}
if id.ID.String() == p {
Expand All @@ -102,15 +102,15 @@ func (c *Cluster) recordClusterLinks(cg *api.ConnectGraph, p string, peers []api
func (c *Cluster) recordIPFSLinks(cg *api.ConnectGraph, pID api.ID) {
ipfsID := pID.IPFS.ID
if pID.IPFS.Error != "" { // Only setting ipfs connections when no error occurs
logger.Warnf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID.Pretty(), pID.IPFS.Error)
logger.Warnf("ipfs id: %s has error: %s. Skipping swarm connections", ipfsID, pID.IPFS.Error)
return
}

pid := pID.ID.String()
ipfsPid := ipfsID.String()

if _, ok := cg.IPFSLinks[pid]; ok {
logger.Warnf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID.Pretty())
logger.Warnf("ipfs id: %s already recorded, one ipfs daemon in use by multiple cluster peers", ipfsID)
}
cg.ClustertoIPFS[pid] = ipfsID
cg.IPFSLinks[ipfsPid] = make([]peer.ID, 0)
Expand Down
10 changes: 5 additions & 5 deletions consensus/raft/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ func (cc *Consensus) redirectToLeader(ctx context.Context, method string, arg in
return false, nil
}

logger.Debugf("redirecting %s to leader: %s", method, leader.Pretty())
logger.Debugf("redirecting %s to leader: %s", method, leader)
finalErr = cc.rpcClient.CallContext(
ctx,
leader,
Expand Down Expand Up @@ -403,7 +403,7 @@ func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {

var finalErr error
for i := 0; i <= cc.config.CommitRetries; i++ {
logger.Debugf("attempt #%d: AddPeer %s", i, pid.Pretty())
logger.Debugf("attempt #%d: AddPeer %s", i, pid)
if finalErr != nil {
logger.Errorf("retrying to add peer. Attempt #%d failed: %s", i, finalErr)
}
Expand All @@ -420,7 +420,7 @@ func (cc *Consensus) AddPeer(ctx context.Context, pid peer.ID) error {
time.Sleep(cc.config.CommitRetryDelay)
continue
}
logger.Infof("peer added to Raft: %s", pid.Pretty())
logger.Infof("peer added to Raft: %s", pid)
break
}
return finalErr
Expand All @@ -434,7 +434,7 @@ func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {

var finalErr error
for i := 0; i <= cc.config.CommitRetries; i++ {
logger.Debugf("attempt #%d: RmPeer %s", i, pid.Pretty())
logger.Debugf("attempt #%d: RmPeer %s", i, pid)
if finalErr != nil {
logger.Errorf("retrying to remove peer. Attempt #%d failed: %s", i, finalErr)
}
Expand All @@ -450,7 +450,7 @@ func (cc *Consensus) RmPeer(ctx context.Context, pid peer.ID) error {
time.Sleep(cc.config.CommitRetryDelay)
continue
}
logger.Infof("peer removed from Raft: %s", pid.Pretty())
logger.Infof("peer removed from Raft: %s", pid)
break
}
return finalErr
Expand Down
Loading
Loading