diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 0b735dab1..6aee77227 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -67,7 +67,7 @@ jobs: uses: n8maninger/action-golang-test@v1 with: package: "./internal/test/e2e/..." - args: "-failfast;-race;-tags=testing;-timeout=60m" + args: "-failfast;-race;-timeout=60m" - name: Test Integration - MySQL if: matrix.os == 'ubuntu-latest' uses: n8maninger/action-golang-test@v1 @@ -77,6 +77,6 @@ jobs: RENTERD_DB_PASSWORD: test with: package: "./internal/test/e2e/..." - args: "-failfast;-race;-tags=testing;-timeout=60m" + args: "-failfast;-race;-timeout=60m" - name: Build run: go build -o bin/ ./cmd/renterd diff --git a/README.md b/README.md index bc01922e1..c20749935 100644 --- a/README.md +++ b/README.md @@ -542,7 +542,7 @@ formed. "hosts": { "allowRedundantIPs": false, "maxDowntimeHours": 1440, - "minRecentScanFailures": 20, + "maxConsecutiveScanFailures": 20, "scoreOverrides": {} }, "contracts": { diff --git a/alerts/alerts_test.go b/alerts/alerts_test.go index 24b299e1b..99b698f7e 100644 --- a/alerts/alerts_test.go +++ b/alerts/alerts_test.go @@ -57,7 +57,7 @@ func TestWebhooks(t *testing.T) { mux := http.NewServeMux() var events []webhooks.Event var mu sync.Mutex - mux.HandleFunc("/events", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/event", func(w http.ResponseWriter, r *http.Request) { var event webhooks.Event if err := json.NewDecoder(r.Body).Decode(&event); err != nil { t.Fatal(err) @@ -72,7 +72,7 @@ func TestWebhooks(t *testing.T) { // register a hook wh := webhooks.Webhook{ Module: webhookModule, - URL: fmt.Sprintf("http://%v/events", srv.Listener.Addr().String()), + URL: fmt.Sprintf("http://%v/event", srv.Listener.Addr().String()), } if hookID := wh.String(); hookID != fmt.Sprintf("%v.%v.%v", wh.URL, wh.Module, "") { t.Fatalf("wrong result for wh.String(): %v != %v", wh.String(), hookID) diff --git a/api/account.go b/api/account.go index 7cecd3bae..46ed69c00 100644 --- a/api/account.go +++ b/api/account.go @@ -33,6 +33,10 @@ type ( // an account and the balance reported by a host. Drift *big.Int `json:"drift"` + // Owner is the owner of the account which is responsible for funding + // it. + Owner string `json:"owner"` + // RequiresSync indicates whether an account needs to be synced with the // host before it can be used again. RequiresSync bool `json:"requiresSync"` diff --git a/api/autopilot.go b/api/autopilot.go index e81328d88..7f2ee7b08 100644 --- a/api/autopilot.go +++ b/api/autopilot.go @@ -56,11 +56,11 @@ type ( // HostsConfig contains all hosts settings used in the autopilot. HostsConfig struct { - AllowRedundantIPs bool `json:"allowRedundantIPs"` - MaxDowntimeHours uint64 `json:"maxDowntimeHours"` - MinProtocolVersion string `json:"minProtocolVersion"` - MinRecentScanFailures uint64 `json:"minRecentScanFailures"` - ScoreOverrides map[types.PublicKey]float64 `json:"scoreOverrides"` + AllowRedundantIPs bool `json:"allowRedundantIPs"` + MaxDowntimeHours uint64 `json:"maxDowntimeHours"` + MinProtocolVersion string `json:"minProtocolVersion"` + MaxConsecutiveScanFailures uint64 `json:"maxConsecutiveScanFailures"` + ScoreOverrides map[types.PublicKey]float64 `json:"scoreOverrides"` } ) diff --git a/api/bus.go b/api/bus.go index 453af61ca..3b73469e3 100644 --- a/api/bus.go +++ b/api/bus.go @@ -3,6 +3,7 @@ package api import ( "errors" + rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" ) @@ -45,10 +46,29 @@ type ( ) type ( + AccountsFundRequest struct { + AccountID rhpv3.Account `json:"accountID"` + Amount types.Currency `json:"amount"` + ContractID types.FileContractID `json:"contractID"` + } + + AccountsFundResponse struct { + Deposit types.Currency `json:"deposit"` + } + + AccountsSaveRequest struct { + Accounts []Account `json:"accounts"` + } + // BusStateResponse is the response type for the /bus/state endpoint. BusStateResponse struct { StartTime TimeRFC3339 `json:"startTime"` Network string `json:"network"` BuildState } + + ContractSetUpdateRequest struct { + ToAdd []types.FileContractID `json:"toAdd"` + ToRemove []types.FileContractID `json:"toRemove"` + } ) diff --git a/api/contract.go b/api/contract.go index b7d43b6a7..19da16a2d 100644 --- a/api/contract.go +++ b/api/contract.go @@ -144,6 +144,16 @@ type ( TotalCost types.Currency `json:"totalCost"` } + // ContractFormRequest is the request type for the POST /contracts endpoint. + ContractFormRequest struct { + EndHeight uint64 `json:"endHeight"` + HostCollateral types.Currency `json:"hostCollateral"` + HostKey types.PublicKey `json:"hostKey"` + HostIP string `json:"hostIP"` + RenterFunds types.Currency `json:"renterFunds"` + RenterAddress types.Address `json:"renterAddress"` + } + // ContractKeepaliveRequest is the request type for the /contract/:id/keepalive // endpoint. ContractKeepaliveRequest struct { @@ -151,12 +161,37 @@ type ( LockID uint64 `json:"lockID"` } + // ContractPruneRequest is the request type for the /contract/:id/prune + // endpoint. + ContractPruneRequest struct { + Timeout DurationMS `json:"timeout"` + } + + // ContractPruneResponse is the response type for the /contract/:id/prune + // endpoint. + ContractPruneResponse struct { + ContractSize uint64 `json:"size"` + Pruned uint64 `json:"pruned"` + Remaining uint64 `json:"remaining"` + Error string `json:"error,omitempty"` + } + // ContractAcquireRequest is the request type for the /contract/:id/release // endpoint. ContractReleaseRequest struct { LockID uint64 `json:"lockID"` } + // ContractRenewRequest is the request type for the /contract/:id/renew + // endpoint. + ContractRenewRequest struct { + EndHeight uint64 `json:"endHeight"` + ExpectedNewStorage uint64 `json:"expectedNewStorage"` + MaxFundAmount types.Currency `json:"maxFundAmount"` + MinNewCollateral types.Currency `json:"minNewCollateral"` + RenterFunds types.Currency `json:"renterFunds"` + } + // ContractRenewedRequest is the request type for the /contract/:id/renewed // endpoint. ContractRenewedRequest struct { @@ -191,6 +226,11 @@ type ( } ) +// Total returns the total cost of the contract spending. +func (x ContractSpending) Total() types.Currency { + return x.Uploads.Add(x.Downloads).Add(x.FundAccount).Add(x.Deletions).Add(x.SectorRoots) +} + // Add returns the sum of the current and given contract spending. func (x ContractSpending) Add(y ContractSpending) (z ContractSpending) { z.Uploads = x.Uploads.Add(y.Uploads) diff --git a/api/events.go b/api/events.go index e9600e53b..7f14ee4c5 100644 --- a/api/events.go +++ b/api/events.go @@ -58,9 +58,10 @@ type ( } EventContractSetUpdate struct { - Name string `json:"name"` - ContractIDs []types.FileContractID `json:"contractIDs"` - Timestamp time.Time `json:"timestamp"` + Name string `json:"name"` + ToAdd []types.FileContractID `json:"toAdd"` + ToRemove []types.FileContractID `json:"toRemove"` + Timestamp time.Time `json:"timestamp"` } EventSettingUpdate struct { diff --git a/api/host.go b/api/host.go index d932229d6..0a6b506ff 100644 --- a/api/host.go +++ b/api/host.go @@ -53,8 +53,8 @@ type ( // HostsRemoveRequest is the request type for the /hosts/remove endpoint. HostsRemoveRequest struct { - MaxDowntimeHours DurationH `json:"maxDowntimeHours"` - MinRecentScanFailures uint64 `json:"minRecentScanFailures"` + MaxDowntimeHours DurationH `json:"maxDowntimeHours"` + MaxConsecutiveScanFailures uint64 `json:"maxConsecutiveScanFailures"` } // SearchHostsRequest is the request type for the /api/bus/search/hosts diff --git a/api/metrics.go b/api/metrics.go index 98c6f06b0..763b8c473 100644 --- a/api/metrics.go +++ b/api/metrics.go @@ -50,14 +50,6 @@ type ( Reason string } - PerformanceMetric struct { - Action string `json:"action"` - HostKey types.PublicKey `json:"hostKey"` - Origin string `json:"origin"` - Duration time.Duration `json:"duration"` - Timestamp TimeRFC3339 `json:"timestamp"` - } - PerformanceMetricsQueryOpts struct { Action string HostKey types.PublicKey diff --git a/api/wallet.go b/api/wallet.go index 510e7b95b..ad8acb56d 100644 --- a/api/wallet.go +++ b/api/wallet.go @@ -5,8 +5,6 @@ import ( "net/url" "time" - rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" ) @@ -45,42 +43,6 @@ type ( DependsOn []types.Transaction `json:"dependsOn"` } - // WalletPrepareFormRequest is the request type for the /wallet/prepare/form - // endpoint. - WalletPrepareFormRequest struct { - EndHeight uint64 `json:"endHeight"` - HostCollateral types.Currency `json:"hostCollateral"` - HostKey types.PublicKey `json:"hostKey"` - HostSettings rhpv2.HostSettings `json:"hostSettings"` - RenterAddress types.Address `json:"renterAddress"` - RenterFunds types.Currency `json:"renterFunds"` - RenterKey types.PublicKey `json:"renterKey"` - } - - // WalletPrepareRenewRequest is the request type for the /wallet/prepare/renew - // endpoint. - WalletPrepareRenewRequest struct { - Revision types.FileContractRevision `json:"revision"` - EndHeight uint64 `json:"endHeight"` - ExpectedNewStorage uint64 `json:"expectedNewStorage"` - HostAddress types.Address `json:"hostAddress"` - PriceTable rhpv3.HostPriceTable `json:"priceTable"` - MaxFundAmount types.Currency `json:"maxFundAmount"` - MinNewCollateral types.Currency `json:"minNewCollateral"` - RenterAddress types.Address `json:"renterAddress"` - RenterFunds types.Currency `json:"renterFunds"` - RenterKey types.PrivateKey `json:"renterKey"` - WindowSize uint64 `json:"windowSize"` - } - - // WalletPrepareRenewResponse is the response type for the /wallet/prepare/renew - // endpoint. - WalletPrepareRenewResponse struct { - FundAmount types.Currency `json:"fundAmount"` - ToSign []types.Hash256 `json:"toSign"` - TransactionSet []types.Transaction `json:"transactionSet"` - } - // WalletRedistributeRequest is the request type for the /wallet/redistribute // endpoint. WalletRedistributeRequest struct { diff --git a/api/worker.go b/api/worker.go index 894fd0c60..d1c18b61b 100644 --- a/api/worker.go +++ b/api/worker.go @@ -80,16 +80,6 @@ type ( Error string `json:"error,omitempty"` } - // RHPFormRequest is the request type for the /rhp/form endpoint. - RHPFormRequest struct { - EndHeight uint64 `json:"endHeight"` - HostCollateral types.Currency `json:"hostCollateral"` - HostKey types.PublicKey `json:"hostKey"` - HostIP string `json:"hostIP"` - RenterFunds types.Currency `json:"renterFunds"` - RenterAddress types.Address `json:"renterAddress"` - } - // RHPFormResponse is the response type for the /rhp/form endpoint. RHPFormResponse struct { ContractID types.FileContractID `json:"contractID"` @@ -105,20 +95,6 @@ type ( Balance types.Currency `json:"balance"` } - // RHPPruneContractRequest is the request type for the /rhp/contract/:id/prune - // endpoint. - RHPPruneContractRequest struct { - Timeout DurationMS `json:"timeout"` - } - - // RHPPruneContractResponse is the response type for the /rhp/contract/:id/prune - // endpoint. - RHPPruneContractResponse struct { - Pruned uint64 `json:"pruned"` - Remaining uint64 `json:"remaining"` - Error string `json:"error,omitempty"` - } - // RHPPriceTableRequest is the request type for the /rhp/pricetable endpoint. RHPPriceTableRequest struct { HostKey types.PublicKey `json:"hostKey"` @@ -126,31 +102,6 @@ type ( Timeout DurationMS `json:"timeout"` } - // RHPRenewRequest is the request type for the /rhp/renew endpoint. - RHPRenewRequest struct { - ContractID types.FileContractID `json:"contractID"` - EndHeight uint64 `json:"endHeight"` - ExpectedNewStorage uint64 `json:"expectedNewStorage"` - HostAddress types.Address `json:"hostAddress"` - HostKey types.PublicKey `json:"hostKey"` - MaxFundAmount types.Currency `json:"maxFundAmount"` - MinNewCollateral types.Currency `json:"minNewCollateral"` - SiamuxAddr string `json:"siamuxAddr"` - RenterAddress types.Address `json:"renterAddress"` - RenterFunds types.Currency `json:"renterFunds"` - WindowSize uint64 `json:"windowSize"` - } - - // RHPRenewResponse is the response type for the /rhp/renew endpoint. - RHPRenewResponse struct { - Error string `json:"error"` - ContractID types.FileContractID `json:"contractID"` - Contract rhpv2.ContractRevision `json:"contract"` - ContractPrice types.Currency `json:"contractPrice"` - FundAmount types.Currency `json:"fundAmount"` - TransactionSet []types.Transaction `json:"transactionSet"` - } - // RHPScanRequest is the request type for the /rhp/scan endpoint. RHPScanRequest struct { HostKey types.PublicKey `json:"hostKey"` diff --git a/autopilot/accounts.go b/autopilot/accounts.go deleted file mode 100644 index a1422d69a..000000000 --- a/autopilot/accounts.go +++ /dev/null @@ -1,291 +0,0 @@ -package autopilot - -import ( - "context" - "errors" - "fmt" - "math/big" - "sync" - "time" - - rhpv3 "go.sia.tech/core/rhp/v3" - "go.sia.tech/core/types" - "go.sia.tech/renterd/alerts" - "go.sia.tech/renterd/api" - "go.uber.org/zap" -) - -var errMaxDriftExceeded = errors.New("drift on account is too large") - -var ( - minBalance = types.Siacoins(1).Div64(2).Big() - maxBalance = types.Siacoins(1) - maxNegDrift = new(big.Int).Neg(types.Siacoins(10).Big()) -) - -type accounts struct { - ap *Autopilot - a AccountStore - c ContractStore - l *zap.SugaredLogger - w *workerPool - - refillInterval time.Duration - revisionSubmissionBuffer uint64 - - mu sync.Mutex - inProgressRefills map[types.Hash256]struct{} -} - -type AccountStore interface { - Account(ctx context.Context, id rhpv3.Account, hk types.PublicKey) (account api.Account, err error) - Accounts(ctx context.Context) (accounts []api.Account, err error) -} - -type ContractStore interface { - Contracts(ctx context.Context, opts api.ContractsOpts) ([]api.ContractMetadata, error) -} - -func newAccounts(ap *Autopilot, a AccountStore, c ContractStore, w *workerPool, l *zap.SugaredLogger, refillInterval time.Duration, revisionSubmissionBuffer uint64) *accounts { - return &accounts{ - ap: ap, - a: a, - c: c, - l: l.Named("accounts"), - w: w, - - refillInterval: refillInterval, - revisionSubmissionBuffer: revisionSubmissionBuffer, - inProgressRefills: make(map[types.Hash256]struct{}), - } -} - -func (a *accounts) markRefillInProgress(workerID string, hk types.PublicKey) bool { - a.mu.Lock() - defer a.mu.Unlock() - k := types.HashBytes(append([]byte(workerID), hk[:]...)) - _, inProgress := a.inProgressRefills[k] - if inProgress { - return false - } - a.inProgressRefills[k] = struct{}{} - return true -} - -func (a *accounts) markRefillDone(workerID string, hk types.PublicKey) { - a.mu.Lock() - defer a.mu.Unlock() - k := types.HashBytes(append([]byte(workerID), hk[:]...)) - _, inProgress := a.inProgressRefills[k] - if !inProgress { - panic("releasing a refill that hasn't been in progress") - } - delete(a.inProgressRefills, k) -} - -func (a *accounts) refillWorkersAccountsLoop(ctx context.Context) { - ticker := time.NewTicker(a.refillInterval) - - for { - select { - case <-ctx.Done(): - return // shutdown - case <-ticker.C: - } - - a.w.withWorker(func(w Worker) { - a.refillWorkerAccounts(ctx, w) - }) - } -} - -// refillWorkerAccounts refills all accounts on a worker that require a refill. -// To avoid slow hosts preventing refills for fast hosts, a separate goroutine -// is used for every host. If a slow host's account is still being refilled by a -// goroutine from a previous call, refillWorkerAccounts will skip that account -// until the previously launched goroutine returns. -func (a *accounts) refillWorkerAccounts(ctx context.Context, w Worker) { - // fetch config - cfg, err := a.ap.Config(ctx) - if err != nil { - a.l.Errorw(fmt.Sprintf("failed to fetch config for refill: %v", err)) - return - } - - // fetch consensus state - cs, err := a.ap.bus.ConsensusState(ctx) - if err != nil { - a.l.Errorw(fmt.Sprintf("failed to fetch consensus state for refill: %v", err)) - return - } - - // fetch worker id - workerID, err := w.ID(ctx) - if err != nil { - a.l.Errorw(fmt.Sprintf("failed to fetch worker id for refill: %v", err)) - return - } - - // fetch all contracts - contracts, err := a.c.Contracts(ctx, api.ContractsOpts{}) - if err != nil { - a.l.Errorw(fmt.Sprintf("failed to fetch contracts for refill: %v", err)) - return - } else if len(contracts) == 0 { - return - } - - // filter all contract set contracts - var contractSetContracts []api.ContractMetadata - inContractSet := make(map[types.FileContractID]struct{}) - for _, c := range contracts { - if c.InSet(cfg.Config.Contracts.Set) { - contractSetContracts = append(contractSetContracts, c) - inContractSet[c.ID] = struct{}{} - } - } - - // refill accounts in separate goroutines - for _, c := range contracts { - // launch refill if not already in progress - if a.markRefillInProgress(workerID, c.HostKey) { - go func(contract api.ContractMetadata) { - defer a.markRefillDone(workerID, contract.HostKey) - - rCtx, cancel := context.WithTimeout(ctx, 5*time.Minute) - defer cancel() - accountID, refilled, rerr := refillWorkerAccount(rCtx, a.a, w, contract, cs.BlockHeight, a.revisionSubmissionBuffer) - if rerr != nil { - if rerr.Is(errMaxDriftExceeded) { - // register the alert if error is errMaxDriftExceeded - a.ap.RegisterAlert(ctx, newAccountRefillAlert(accountID, contract, *rerr)) - } - if _, inSet := inContractSet[contract.ID]; inSet { - a.l.Errorw(rerr.err.Error(), rerr.keysAndValues...) - } else { - a.l.Debugw(rerr.err.Error(), rerr.keysAndValues...) - } - } else { - // dismiss alerts on success - a.ap.DismissAlert(ctx, alerts.IDForAccount(alertAccountRefillID, accountID)) - - // log success - if refilled { - a.l.Infow("Successfully funded account", - "account", accountID, - "host", contract.HostKey, - "balance", maxBalance, - ) - } - } - }(c) - } - } -} - -type refillError struct { - err error - keysAndValues []interface{} -} - -func (err *refillError) Error() string { - if err.err == nil { - return "" - } - return err.err.Error() -} - -func (err *refillError) Is(target error) bool { - return errors.Is(err.err, target) -} - -func refillWorkerAccount(ctx context.Context, a AccountStore, w Worker, contract api.ContractMetadata, bh, revisionSubmissionBuffer uint64) (accountID rhpv3.Account, refilled bool, rerr *refillError) { - wrapErr := func(err error, keysAndValues ...interface{}) *refillError { - if err == nil { - return nil - } - return &refillError{ - err: err, - keysAndValues: keysAndValues, - } - } - - // fetch the account - accountID, err := w.Account(ctx, contract.HostKey) - if err != nil { - rerr = wrapErr(err) - return - } - var account api.Account - account, err = a.Account(ctx, accountID, contract.HostKey) - if err != nil { - rerr = wrapErr(err) - return - } - - // check if the contract is too close to the proof window to be revised, - // trying to refill the account would result in the host not returning the - // revision and returning an obfuscated error - if (bh + revisionSubmissionBuffer) > contract.WindowStart { - rerr = wrapErr(fmt.Errorf("not refilling account since contract is too close to the proof window to be revised (%v > %v)", bh+revisionSubmissionBuffer, contract.WindowStart), - "accountID", account.ID, - "hostKey", contract.HostKey, - "blockHeight", bh, - ) - return - } - - // check if a host is potentially cheating before refilling. - // We only check against the max drift if the account's drift is - // negative because we don't care if we have more money than - // expected. - if account.Drift.Cmp(maxNegDrift) < 0 { - rerr = wrapErr(fmt.Errorf("not refilling account since host is potentially cheating: %w", errMaxDriftExceeded), - "accountID", account.ID, - "hostKey", contract.HostKey, - "balance", account.Balance, - "drift", account.Drift, - ) - return - } - - // check if a resync is needed - if account.RequiresSync { - // sync the account - err = w.RHPSync(ctx, contract.ID, contract.HostKey, contract.HostIP, contract.SiamuxAddr) - if err != nil { - rerr = wrapErr(fmt.Errorf("failed to sync account's balance: %w", err), - "accountID", account.ID, - "hostKey", contract.HostKey, - ) - return - } - - // refetch the account after syncing - account, err = a.Account(ctx, accountID, contract.HostKey) - if err != nil { - rerr = wrapErr(err) - return - } - } - - // check if refill is needed - if account.Balance.Cmp(minBalance) >= 0 { - rerr = wrapErr(err) - return - } - - // fund the account - err = w.RHPFund(ctx, contract.ID, contract.HostKey, contract.HostIP, contract.SiamuxAddr, maxBalance) - if err != nil { - rerr = wrapErr(fmt.Errorf("failed to fund account: %w", err), - "accountID", account.ID, - "hostKey", contract.HostKey, - "balance", account.Balance, - "expected", maxBalance, - ) - } else { - refilled = true - } - return -} diff --git a/autopilot/alerts.go b/autopilot/alerts.go index 1d089c39d..47a926ad5 100644 --- a/autopilot/alerts.go +++ b/autopilot/alerts.go @@ -5,15 +5,12 @@ import ( "fmt" "time" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/alerts" - "go.sia.tech/renterd/api" "go.sia.tech/renterd/object" ) var ( - alertAccountRefillID = alerts.RandomAlertID() // constant until restarted alertHealthRefreshID = alerts.RandomAlertID() // constant until restarted alertLowBalanceID = alerts.RandomAlertID() // constant until restarted alertMigrationID = alerts.RandomAlertID() // constant until restarted @@ -54,26 +51,6 @@ func newAccountLowBalanceAlert(address types.Address, balance, allowance types.C } } -func newAccountRefillAlert(id rhpv3.Account, contract api.ContractMetadata, err refillError) alerts.Alert { - data := map[string]interface{}{ - "error": err.Error(), - "accountID": id.String(), - "contractID": contract.ID.String(), - "hostKey": contract.HostKey.String(), - } - for i := 0; i < len(err.keysAndValues); i += 2 { - data[fmt.Sprint(err.keysAndValues[i])] = err.keysAndValues[i+1] - } - - return alerts.Alert{ - ID: alerts.IDForAccount(alertAccountRefillID, id), - Severity: alerts.SeverityError, - Message: "Ephemeral account refill failed", - Data: data, - Timestamp: time.Now(), - } -} - func newContractPruningFailedAlert(hk types.PublicKey, version, release string, fcid types.FileContractID, err error) alerts.Alert { return alerts.Alert{ ID: alerts.IDForContract(alertPruningID, fcid), diff --git a/autopilot/autopilot.go b/autopilot/autopilot.go index 58fb0a9ec..8c45e950a 100644 --- a/autopilot/autopilot.go +++ b/autopilot/autopilot.go @@ -10,8 +10,6 @@ import ( "sync" "time" - rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/jape" "go.sia.tech/renterd/alerts" @@ -31,8 +29,7 @@ type Bus interface { webhooks.Broadcaster // Accounts - Account(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey) (account api.Account, err error) - Accounts(ctx context.Context) (accounts []api.Account, err error) + Accounts(ctx context.Context, owner string) (accounts []api.Account, err error) // Autopilots Autopilot(ctx context.Context, id string) (autopilot api.Autopilot, err error) @@ -42,20 +39,22 @@ type Bus interface { ConsensusState(ctx context.Context) (api.ConsensusState, error) // contracts - AddContract(ctx context.Context, c rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, state string) (api.ContractMetadata, error) - AddRenewedContract(ctx context.Context, c rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, renewedFrom types.FileContractID, state string) (api.ContractMetadata, error) AncestorContracts(ctx context.Context, id types.FileContractID, minStartHeight uint64) ([]api.ArchivedContract, error) ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error + BroadcastContract(ctx context.Context, fcid types.FileContractID) (types.TransactionID, error) Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) Contracts(ctx context.Context, opts api.ContractsOpts) (contracts []api.ContractMetadata, err error) FileContractTax(ctx context.Context, payout types.Currency) (types.Currency, error) - SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) error + FormContract(ctx context.Context, renterAddress types.Address, renterFunds types.Currency, hostKey types.PublicKey, hostIP string, hostCollateral types.Currency, endHeight uint64) (api.ContractMetadata, error) + RenewContract(ctx context.Context, fcid types.FileContractID, endHeight uint64, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedNewStorage uint64) (api.ContractMetadata, error) + UpdateContractSet(ctx context.Context, set string, toAdd, toRemove []types.FileContractID) error PrunableData(ctx context.Context) (prunableData api.ContractsPrunableDataResponse, err error) + PruneContract(ctx context.Context, id types.FileContractID, timeout time.Duration) (api.ContractPruneResponse, error) // hostdb Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) - RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) + RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (uint64, error) SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error @@ -100,7 +99,6 @@ type Autopilot struct { logger *zap.SugaredLogger workers *workerPool - a *accounts c *contractor.Contractor m *migrator s scanner.Scanner @@ -149,7 +147,6 @@ func New(cfg config.Autopilot, bus Bus, workers []Worker, logger *zap.Logger) (_ ap.c = contractor.New(bus, bus, ap.logger, cfg.RevisionSubmissionBuffer, cfg.RevisionBroadcastInterval) ap.m = newMigrator(ap, cfg.MigrationHealthCutoff, cfg.MigratorParallelSlabsPerWorker) - ap.a = newAccounts(ap, ap.bus, ap.bus, ap.workers, ap.logger, cfg.AccountsRefillInterval, cfg.RevisionSubmissionBuffer) return ap, nil } @@ -240,7 +237,6 @@ func (ap *Autopilot) Run() { } var forceScan bool - var launchAccountRefillsOnce sync.Once for !ap.isStopped() { ap.logger.Info("autopilot iteration starting") tickerFired := make(chan struct{}) @@ -323,20 +319,12 @@ func (ap *Autopilot) Run() { ap.m.SignalMaintenanceFinished() } - // launch account refills after successful contract maintenance. - if maintenanceSuccess { - launchAccountRefillsOnce.Do(func() { - ap.logger.Info("account refills loop launched") - go ap.a.refillWorkersAccountsLoop(ap.shutdownCtx) - }) - } - // migration ap.m.tryPerformMigrations(ap.workers) // pruning if autopilot.Config.Contracts.Prune { - ap.tryPerformPruning(ap.workers) + ap.tryPerformPruning() } else { ap.logger.Info("pruning disabled") } @@ -352,7 +340,6 @@ func (ap *Autopilot) Run() { case <-tickerFired: } } - return } // Shutdown shuts down the autopilot. @@ -685,6 +672,9 @@ func (ap *Autopilot) configHandlerPUT(jc jape.Context) { autopilot, err := ap.bus.Autopilot(jc.Request.Context(), ap.id) if utils.IsErr(err, api.ErrAutopilotNotFound) { autopilot = api.Autopilot{ID: ap.id, Config: cfg} + } else if err != nil { + jc.Error(err, http.StatusInternalServerError) + return } else { if autopilot.Config.Contracts.Set != cfg.Contracts.Set { contractSetChanged = true diff --git a/autopilot/contract_pruning.go b/autopilot/contract_pruning.go index 7822fb326..0189430ec 100644 --- a/autopilot/contract_pruning.go +++ b/autopilot/contract_pruning.go @@ -105,15 +105,17 @@ func (ap *Autopilot) fetchHostContract(fcid types.FileContractID) (host api.Host return } -func (ap *Autopilot) performContractPruning(wp *workerPool) { - ap.logger.Info("performing contract pruning") +func (ap *Autopilot) performContractPruning() { + log := ap.logger.Named("performContractPruning") + log.Info("performing contract pruning") // fetch prunable contracts prunable, err := ap.fetchPrunableContracts() if err != nil { - ap.logger.Error(err) + log.Error(err) return } + log.Debugf("found %d prunable contracts", len(prunable)) // dismiss alerts for contracts that are no longer prunable ap.dismissPruneAlerts(prunable) @@ -129,39 +131,70 @@ func (ap *Autopilot) performContractPruning(wp *workerPool) { // fetch host h, _, err := ap.fetchHostContract(contract.ID) if utils.IsErr(err, api.ErrContractNotFound) { + log.Debugw("contract got archived", "contract", contract.ID) continue // contract got archived } else if err != nil { - ap.logger.Errorf("failed to fetch host for contract '%v', err %v", contract.ID, err) + log.Errorw("failed to fetch host", zap.Error(err), "contract", contract.ID) continue } - // prune contract using a random worker - wp.withWorker(func(w Worker) { - total += ap.pruneContract(w, contract.ID, h.PublicKey, h.Settings.Version, h.Settings.Release) - }) + // prune contract + n, err := ap.pruneContract(ap.shutdownCtx, contract.ID, h.PublicKey, h.Settings.Version, h.Settings.Release, log) + if err != nil { + log.Errorw("failed to prune contract", zap.Error(err), "contract", contract.ID) + continue + } + + // handle alerts + ap.mu.Lock() + alertID := alerts.IDForContract(alertPruningID, contract.ID) + if shouldSendPruneAlert(err, h.Settings.Version, h.Settings.Release) { + ap.RegisterAlert(ap.shutdownCtx, newContractPruningFailedAlert(h.PublicKey, h.Settings.Version, h.Settings.Release, contract.ID, err)) + ap.pruningAlertIDs[contract.ID] = alertID // store id to dismiss stale alerts + } else { + ap.DismissAlert(ap.shutdownCtx, alertID) + delete(ap.pruningAlertIDs, contract.ID) + } + ap.mu.Unlock() + + // adjust total + total += n } // log total pruned - ap.logger.Info(fmt.Sprintf("pruned %d (%s) from %v contracts", total, humanReadableSize(int(total)), len(prunable))) + log.Info(fmt.Sprintf("pruned %d (%s) from %v contracts", total, humanReadableSize(int(total)), len(prunable))) } -func (ap *Autopilot) pruneContract(w Worker, fcid types.FileContractID, hk types.PublicKey, hostVersion, hostRelease string) uint64 { - // use a sane timeout - ctx, cancel := context.WithTimeout(ap.shutdownCtx, timeoutPruneContract+5*time.Minute) - defer cancel() +func (ap *Autopilot) pruneContract(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, hostVersion, hostRelease string, logger *zap.SugaredLogger) (uint64, error) { + // define logger + log := logger.With( + zap.Stringer("contract", fcid), + zap.Stringer("host", hk), + zap.String("version", hostVersion), + zap.String("release", hostRelease)) // prune the contract start := time.Now() - pruned, remaining, err := w.RHPPruneContract(ctx, fcid, timeoutPruneContract) - duration := time.Since(start) + res, err := ap.bus.PruneContract(ctx, fcid, timeoutPruneContract) + if err != nil { + return 0, err + } + + // decorate logger + log = log.With( + zap.String("pruned", utils.HumanReadableSize(int(res.Pruned))), + zap.String("remaining", utils.HumanReadableSize(int(res.Remaining))), + zap.String("size", utils.HumanReadableSize(int(res.ContractSize))), + zap.Duration("elapsed", time.Since(start)), + ) // ignore slow pruning until host network is 1.6.0+ - if utils.IsErr(err, context.DeadlineExceeded) && pruned > 0 { - err = nil + if res.Error != "" && utils.IsErr(errors.New(res.Error), context.DeadlineExceeded) && res.Pruned > 0 { + res.Error = "" } // handle metrics - if err == nil || pruned > 0 { + if res.Pruned > 0 { if err := ap.bus.RecordContractPruneMetric(ctx, api.ContractPruneMetric{ Timestamp: api.TimeRFC3339(start), @@ -169,40 +202,25 @@ func (ap *Autopilot) pruneContract(w Worker, fcid types.FileContractID, hk types HostKey: hk, HostVersion: hostVersion, - Pruned: pruned, - Remaining: remaining, - Duration: duration, + Pruned: res.Pruned, + Remaining: res.Remaining, + Duration: time.Since(start), }); err != nil { ap.logger.Error(err) } } // handle logs - log := ap.logger.With("contract", fcid, "host", hk, "version", hostVersion, "release", hostRelease, "pruned", pruned, "remaining", remaining, "elapsed", duration) - if err != nil && pruned > 0 { - log.With(zap.Error(err)).Error("unexpected error interrupted pruning") - } else if err != nil { - log.With(zap.Error(err)).Error("failed to prune contract") + if res.Error != "" { + log.Errorw("unexpected error interrupted pruning", zap.Error(errors.New(res.Error))) } else { log.Info("successfully pruned contract") } - // handle alerts - ap.mu.Lock() - defer ap.mu.Unlock() - alertID := alerts.IDForContract(alertPruningID, fcid) - if shouldSendPruneAlert(err, hostVersion, hostRelease) { - ap.RegisterAlert(ctx, newContractPruningFailedAlert(hk, hostVersion, hostRelease, fcid, err)) - ap.pruningAlertIDs[fcid] = alertID // store id to dismiss stale alerts - } else { - ap.DismissAlert(ctx, alertID) - delete(ap.pruningAlertIDs, fcid) - } - - return pruned + return res.Pruned, nil } -func (ap *Autopilot) tryPerformPruning(wp *workerPool) { +func (ap *Autopilot) tryPerformPruning() { ap.mu.Lock() if ap.pruning || ap.isStopped() { ap.mu.Unlock() @@ -215,7 +233,7 @@ func (ap *Autopilot) tryPerformPruning(wp *workerPool) { ap.wg.Add(1) go func() { defer ap.wg.Done() - ap.performContractPruning(wp) + ap.performContractPruning() ap.mu.Lock() ap.pruning = false ap.mu.Unlock() diff --git a/autopilot/contractor/contractor.go b/autopilot/contractor/contractor.go index e82253d43..35ede27c9 100644 --- a/autopilot/contractor/contractor.go +++ b/autopilot/contractor/contractor.go @@ -81,27 +81,25 @@ const ( ) type Bus interface { - AddContract(ctx context.Context, c rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, state string) (api.ContractMetadata, error) - AddRenewedContract(ctx context.Context, c rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, renewedFrom types.FileContractID, state string) (api.ContractMetadata, error) AncestorContracts(ctx context.Context, id types.FileContractID, minStartHeight uint64) ([]api.ArchivedContract, error) ArchiveContracts(ctx context.Context, toArchive map[types.FileContractID]string) error + BroadcastContract(ctx context.Context, fcid types.FileContractID) (types.TransactionID, error) ConsensusState(ctx context.Context) (api.ConsensusState, error) Contract(ctx context.Context, id types.FileContractID) (api.ContractMetadata, error) Contracts(ctx context.Context, opts api.ContractsOpts) (contracts []api.ContractMetadata, err error) FileContractTax(ctx context.Context, payout types.Currency) (types.Currency, error) + FormContract(ctx context.Context, renterAddress types.Address, renterFunds types.Currency, hostKey types.PublicKey, hostIP string, hostCollateral types.Currency, endHeight uint64) (api.ContractMetadata, error) + RenewContract(ctx context.Context, fcid types.FileContractID, endHeight uint64, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedNewStorage uint64) (api.ContractMetadata, error) Host(ctx context.Context, hostKey types.PublicKey) (api.Host, error) RecordContractSetChurnMetric(ctx context.Context, metrics ...api.ContractSetChurnMetric) error SearchHosts(ctx context.Context, opts api.SearchHostOptions) ([]api.Host, error) - SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) error + UpdateContractSet(ctx context.Context, set string, toAdd, toRemove []types.FileContractID) error UpdateHostCheck(ctx context.Context, autopilotID string, hostKey types.PublicKey, hostCheck api.HostCheck) error } type Worker interface { Contracts(ctx context.Context, hostTimeout time.Duration) (api.ContractsResponse, error) - RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) - RHPForm(ctx context.Context, endHeight uint64, hk types.PublicKey, hostIP string, renterAddress types.Address, renterFunds types.Currency, hostCollateral types.Currency) (rhpv2.ContractRevision, []types.Transaction, error) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) - RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedNewStorage, windowSize uint64) (api.RHPRenewResponse, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) } @@ -133,9 +131,6 @@ type ( revisionSubmissionBuffer uint64 firstRefreshFailure map[types.FileContractID]time.Time - - shutdownCtx context.Context - shutdownCtxCancel context.CancelFunc } scoredHost struct { @@ -168,7 +163,6 @@ type ( func New(bus Bus, alerter alerts.Alerter, logger *zap.SugaredLogger, revisionSubmissionBuffer uint64, revisionBroadcastInterval time.Duration) *Contractor { logger = logger.Named("contractor") - ctx, cancel := context.WithCancel(context.Background()) return &Contractor{ bus: bus, alerter: alerter, @@ -180,17 +174,9 @@ func New(bus Bus, alerter alerts.Alerter, logger *zap.SugaredLogger, revisionSub revisionSubmissionBuffer: revisionSubmissionBuffer, firstRefreshFailure: make(map[types.FileContractID]time.Time), - - shutdownCtx: ctx, - shutdownCtxCancel: cancel, } } -func (c *Contractor) Close() error { - c.shutdownCtxCancel() - return nil -} - func (c *Contractor) PerformContractMaintenance(ctx context.Context, w Worker, state *MaintenanceState) (bool, error) { return performContractMaintenance(newMaintenanceCtx(ctx, state), c.alerter, c.bus, c.churn, w, c, c, c, c.logger) } @@ -228,7 +214,7 @@ func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitial hostCollateral := rhpv2.ContractFormationCollateral(ctx.Period(), expectedStorage, scan.Settings) // form contract - contract, _, err := w.RHPForm(ctx, endHeight, hk, host.NetAddress, ctx.state.Address, renterFunds, hostCollateral) + contract, err := c.bus.FormContract(ctx, ctx.state.Address, renterFunds, hk, host.NetAddress, hostCollateral, endHeight) if err != nil { // TODO: keep track of consecutive failures and break at some point logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) @@ -241,20 +227,12 @@ func (c *Contractor) formContract(ctx *mCtx, w Worker, host api.Host, minInitial // update the budget *budget = budget.Sub(renterFunds) - // persist contract in store - contractPrice := contract.Revision.MissedHostPayout().Sub(hostCollateral) - formedContract, err := c.bus.AddContract(ctx, contract, contractPrice, renterFunds, cs.BlockHeight, api.ContractStatePending) - if err != nil { - logger.Errorw(fmt.Sprintf("contract formation failed, err: %v", err), "hk", hk) - return api.ContractMetadata{}, true, err - } - logger.Infow("formation succeeded", - "fcid", formedContract.ID, + "fcid", contract.ID, "renterFunds", renterFunds.String(), "collateral", hostCollateral.String(), ) - return formedContract, true, nil + return contract, true, nil } func (c *Contractor) initialContractFunding(settings rhpv2.HostSettings, txnFee, minFunding, maxFunding types.Currency) types.Currency { @@ -328,7 +306,7 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, contract api.Contract, maxFundAmount := budget.Add(rev.ValidRenterPayout()) // renew the contract - resp, err := w.RHPRenew(ctx, contract.ID, contract.EndHeight(), hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, minNewCollateral, maxFundAmount, expectedNewStorage, settings.WindowSize) + renewal, err := c.bus.RenewContract(ctx, contract.ID, contract.EndHeight(), renterFunds, minNewCollateral, maxFundAmount, expectedNewStorage) if err != nil { if strings.Contains(err.Error(), "new collateral is too low") { logger.Infow("refresh failed: contract wouldn't have enough collateral after refresh", @@ -347,25 +325,16 @@ func (c *Contractor) refreshContract(ctx *mCtx, w Worker, contract api.Contract, } // update the budget - *budget = budget.Sub(resp.FundAmount) - - // persist the contract - refreshedContract, err := c.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, contract.ID, api.ContractStatePending) - if err != nil { - logger.Errorw("adding refreshed contract failed", zap.Error(err), "hk", hk, "fcid", fcid) - return api.ContractMetadata{}, false, err - } + *budget = budget.Sub(renewal.TotalCost) // add to renewed set - newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice) logger.Infow("refresh succeeded", - "fcid", refreshedContract.ID, - "renewedFrom", contract.ID, + "fcid", renewal.ID, + "renewedFrom", renewal.RenewedFrom, "renterFunds", renterFunds.String(), "minNewCollateral", minNewCollateral.String(), - "newCollateral", newCollateral.String(), ) - return refreshedContract, true, nil + return renewal, true, nil } func (c *Contractor) renewContract(ctx *mCtx, w Worker, contract api.Contract, host api.Host, budget *types.Currency, logger *zap.SugaredLogger) (cm api.ContractMetadata, proceed bool, err error) { @@ -375,11 +344,9 @@ func (c *Contractor) renewContract(ctx *mCtx, w Worker, contract api.Contract, h logger = logger.With("to_renew", contract.ID, "hk", contract.HostKey, "hostVersion", host.Settings.Version, "hostRelease", host.Settings.Release) // convenience variables - settings := host.Settings pt := host.PriceTable.HostPriceTable fcid := contract.ID rev := contract.Revision - hk := contract.HostKey // fetch consensus state cs, err := c.bus.ConsensusState(ctx) @@ -409,7 +376,7 @@ func (c *Contractor) renewContract(ctx *mCtx, w Worker, contract api.Contract, h expectedNewStorage := renterFundsToExpectedStorage(renterFunds, endHeight-cs.BlockHeight, pt) // renew the contract - resp, err := w.RHPRenew(ctx, fcid, endHeight, hk, contract.SiamuxAddr, settings.Address, ctx.state.Address, renterFunds, types.ZeroCurrency, *budget, expectedNewStorage, settings.WindowSize) + renewal, err := c.bus.RenewContract(ctx, fcid, endHeight, renterFunds, types.ZeroCurrency, *budget, expectedNewStorage) if err != nil { logger.Errorw( "renewal failed", @@ -425,24 +392,15 @@ func (c *Contractor) renewContract(ctx *mCtx, w Worker, contract api.Contract, h } // update the budget - *budget = budget.Sub(resp.FundAmount) + *budget = budget.Sub(renewal.TotalCost) - // persist the contract - renewedContract, err := c.bus.AddRenewedContract(ctx, resp.Contract, resp.ContractPrice, renterFunds, cs.BlockHeight, fcid, api.ContractStatePending) - if err != nil { - logger.Errorw(fmt.Sprintf("renewal failed to persist, err: %v", err)) - return api.ContractMetadata{}, false, err - } - - newCollateral := resp.Contract.Revision.MissedHostPayout().Sub(resp.ContractPrice) logger.Infow( "renewal succeeded", - "fcid", renewedContract.ID, - "renewedFrom", fcid, + "fcid", renewal.ID, + "renewedFrom", renewal.RenewedFrom, "renterFunds", renterFunds.String(), - "newCollateral", newCollateral.String(), ) - return renewedContract, true, nil + return renewal, true, nil } // broadcastRevisions broadcasts contract revisions from the current set of @@ -475,7 +433,7 @@ func (c *Contractor) broadcastRevisions(ctx context.Context, w Worker, contracts // broadcast revision ctx, cancel := context.WithTimeout(ctx, timeoutBroadcastRevision) - err := w.RHPBroadcast(ctx, contract.ID) + _, err := c.bus.BroadcastContract(ctx, contract.ID) cancel() if utils.IsErr(err, errors.New("transaction has a file contract with an outdated revision number")) { continue // don't log - revision was already broadcasted @@ -1377,16 +1335,14 @@ func performContractMaintenance(ctx *mCtx, alerter alerts.Alerter, bus Bus, chur return false, fmt.Errorf("failed to fetch old contract set: %w", err) } - // STEP 4: update contract set + // merge kept and formed contracts into new set newSet := make([]api.ContractMetadata, 0, len(keptContracts)+len(formedContracts)) newSet = append(newSet, keptContracts...) newSet = append(newSet, formedContracts...) - var newSetIDs []types.FileContractID - for _, contract := range newSet { - newSetIDs = append(newSetIDs, contract.ID) - } - if err := bus.SetContractSet(ctx, ctx.ContractSet(), newSetIDs); err != nil { - return false, fmt.Errorf("failed to update contract set: %w", err) + + // STEP 4: update contract set + if err := updateContractSet(ctx, bus, oldSet, newSet); err != nil { + return false, err } // STEP 5: perform minor maintenance such as cleanups and broadcasting @@ -1398,3 +1354,31 @@ func performContractMaintenance(ctx *mCtx, alerter alerts.Alerter, bus Bus, chur // STEP 6: log changes and register alerts return computeContractSetChanged(ctx, alerter, bus, churn, logger, oldSet, newSet, churnReasons) } + +func updateContractSet(ctx *mCtx, bus Bus, oldSet, newSet []api.ContractMetadata) error { + var newSetIDs []types.FileContractID + for _, contract := range newSet { + newSetIDs = append(newSetIDs, contract.ID) + } + inOldSet := make(map[types.FileContractID]struct{}) + for _, c := range oldSet { + inOldSet[c.ID] = struct{}{} + } + var toAdd []types.FileContractID + for _, c := range newSet { + if _, ok := inOldSet[c.ID]; !ok { + toAdd = append(toAdd, c.ID) + } + // only keep contracts that are in the old but not the new set + delete(inOldSet, c.ID) + } + + var toRemove []types.FileContractID + for id := range inOldSet { + toRemove = append(toRemove, id) + } + if err := bus.UpdateContractSet(ctx, ctx.ContractSet(), newSetIDs, toRemove); err != nil { + return fmt.Errorf("failed to update contract set: %w", err) + } + return nil +} diff --git a/autopilot/contractor/hostscore_test.go b/autopilot/contractor/hostscore_test.go index 9b2cdea47..6acc309ec 100644 --- a/autopilot/contractor/hostscore_test.go +++ b/autopilot/contractor/hostscore_test.go @@ -26,8 +26,8 @@ var cfg = api.AutopilotConfig{ Set: api.DefaultAutopilotID, }, Hosts: api.HostsConfig{ - MaxDowntimeHours: 24 * 7 * 2, - MinRecentScanFailures: 10, + MaxDowntimeHours: 24 * 7 * 2, + MaxConsecutiveScanFailures: 10, }, } diff --git a/autopilot/scanner/scanner.go b/autopilot/scanner/scanner.go index 6c34274ad..f79147a17 100644 --- a/autopilot/scanner/scanner.go +++ b/autopilot/scanner/scanner.go @@ -20,7 +20,7 @@ const ( type ( HostStore interface { HostsForScanning(ctx context.Context, opts api.HostsForScanningOptions) ([]api.HostAddress, error) - RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) + RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (uint64, error) } Scanner interface { @@ -268,12 +268,12 @@ func (s *scanner) removeOfflineHosts(ctx context.Context) (removed uint64) { s.logger.Infow("removing offline hosts", "maxDowntime", maxDowntime, - "minRecentScanFailures", s.hostsCfg.MinRecentScanFailures) + "maxConsecutiveScanFailures", s.hostsCfg.MaxConsecutiveScanFailures) var err error - removed, err = s.hs.RemoveOfflineHosts(ctx, s.hostsCfg.MinRecentScanFailures, maxDowntime) + removed, err = s.hs.RemoveOfflineHosts(ctx, s.hostsCfg.MaxConsecutiveScanFailures, maxDowntime) if err != nil { - s.logger.Errorw("removing offline hosts failed", zap.Error(err), "maxDowntime", maxDowntime, "minRecentScanFailures", s.hostsCfg.MinRecentScanFailures) + s.logger.Errorw("removing offline hosts failed", zap.Error(err), "maxDowntime", maxDowntime, "maxConsecutiveScanFailures", s.hostsCfg.MaxConsecutiveScanFailures) return } diff --git a/autopilot/scanner/scanner_test.go b/autopilot/scanner/scanner_test.go index ee847395b..665913ab0 100644 --- a/autopilot/scanner/scanner_test.go +++ b/autopilot/scanner/scanner_test.go @@ -51,10 +51,10 @@ func (hs *mockHostStore) HostsForScanning(ctx context.Context, opts api.HostsFor return hostAddresses, nil } -func (hs *mockHostStore) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) { +func (hs *mockHostStore) RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (uint64, error) { hs.mu.Lock() defer hs.mu.Unlock() - hs.removals = append(hs.removals, fmt.Sprintf("%d-%d", minRecentScanFailures, maxDowntime)) + hs.removals = append(hs.removals, fmt.Sprintf("%d-%d", maxConsecutiveScanFailures, maxDowntime)) return 0, nil } @@ -146,8 +146,8 @@ func TestScanner(t *testing.T) { // update the hosts config s.UpdateHostsConfig(api.HostsConfig{ - MinRecentScanFailures: 10, - MaxDowntimeHours: 1, + MaxConsecutiveScanFailures: 10, + MaxDowntimeHours: 1, }) s.Scan(context.Background(), w, true) diff --git a/autopilot/workerpool.go b/autopilot/workerpool.go index 990498e62..871f1babc 100644 --- a/autopilot/workerpool.go +++ b/autopilot/workerpool.go @@ -5,7 +5,6 @@ import ( "sync" "time" - rhpv2 "go.sia.tech/core/rhp/v2" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" @@ -19,14 +18,8 @@ type Worker interface { ID(ctx context.Context) (string, error) MigrateSlab(ctx context.Context, s object.Slab, set string) (api.MigrateSlabResponse, error) - RHPBroadcast(ctx context.Context, fcid types.FileContractID) (err error) - RHPForm(ctx context.Context, endHeight uint64, hk types.PublicKey, hostIP string, renterAddress types.Address, renterFunds types.Currency, hostCollateral types.Currency) (rhpv2.ContractRevision, []types.Transaction, error) - RHPFund(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string, balance types.Currency) (err error) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (api.HostPriceTable, error) - RHPPruneContract(ctx context.Context, fcid types.FileContractID, timeout time.Duration) (pruned, remaining uint64, err error) - RHPRenew(ctx context.Context, fcid types.FileContractID, endHeight uint64, hk types.PublicKey, hostIP string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedStorage, windowSize uint64) (api.RHPRenewResponse, error) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (api.RHPScanResponse, error) - RHPSync(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string) (err error) } // workerPool contains all workers known to the autopilot. Users can call diff --git a/bus/bus.go b/bus/bus.go index 431d5abd5..832b603b0 100644 --- a/bus/bus.go +++ b/bus/bus.go @@ -8,7 +8,8 @@ import ( "encoding/json" "errors" "fmt" - "math/big" + "math" + "net" "net/http" "strings" "time" @@ -26,17 +27,29 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/bus/client" ibus "go.sia.tech/renterd/internal/bus" + "go.sia.tech/renterd/internal/gouging" + "go.sia.tech/renterd/internal/rhp" + rhp2 "go.sia.tech/renterd/internal/rhp/v2" + rhp3 "go.sia.tech/renterd/internal/rhp/v3" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/stores/sql" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" + "golang.org/x/crypto/blake2b" ) const ( defaultWalletRecordMetricInterval = 5 * time.Minute defaultPinUpdateInterval = 5 * time.Minute defaultPinRateWindow = 6 * time.Hour - stdTxnSize = 1200 // bytes + + lockingPriorityPruning = 20 + lockingPriorityFunding = 40 + lockingPriorityRenew = 80 + lockingPriorityBroadcast = 100 + + stdTxnSize = 1200 // bytes ) // Client re-exports the client from the client package. @@ -55,18 +68,6 @@ func NewClient(addr, password string) *Client { } type ( - AccountManager interface { - Account(id rhpv3.Account, hostKey types.PublicKey) (api.Account, error) - Accounts() []api.Account - AddAmount(id rhpv3.Account, hk types.PublicKey, amt *big.Int) - LockAccount(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey, exclusive bool, duration time.Duration) (api.Account, uint64) - ResetDrift(id rhpv3.Account) error - SetBalance(id rhpv3.Account, hk types.PublicKey, balance *big.Int) - ScheduleSync(id rhpv3.Account, hk types.PublicKey) error - Shutdown(context.Context) error - UnlockAccount(id rhpv3.Account, lockID uint64) error - } - AlertManager interface { alerts.Alerter RegisterWebhookBroadcaster(b webhooks.Broadcaster) @@ -86,7 +87,7 @@ type ( TipState() consensus.State UnconfirmedParents(txn types.Transaction) []types.Transaction UpdatesSince(index types.ChainIndex, max int) (rus []chain.RevertUpdate, aus []chain.ApplyUpdate, err error) - V2UnconfirmedParents(txn types.V2Transaction) []types.V2Transaction + V2TransactionSet(basis types.ChainIndex, txn types.V2Transaction) (types.ChainIndex, []types.V2Transaction, error) } ContractLocker interface { @@ -138,14 +139,14 @@ type ( Balance() (wallet.Balance, error) Close() error FundTransaction(txn *types.Transaction, amount types.Currency, useUnconfirmed bool) ([]types.Hash256, error) - FundV2Transaction(txn *types.V2Transaction, amount types.Currency, useUnconfirmed bool) (consensus.State, []int, error) + FundV2Transaction(txn *types.V2Transaction, amount types.Currency, useUnconfirmed bool) (types.ChainIndex, []int, error) Redistribute(outputs int, amount, feePerByte types.Currency) (txns []types.Transaction, toSign []types.Hash256, err error) RedistributeV2(outputs int, amount, feePerByte types.Currency) (txns []types.V2Transaction, toSign [][]int, err error) ReleaseInputs(txns []types.Transaction, v2txns []types.V2Transaction) SignTransaction(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) - SignV2Inputs(state consensus.State, txn *types.V2Transaction, toSign []int) + SignV2Inputs(txn *types.V2Transaction, toSign []int) SpendableOutputs() ([]types.SiacoinElement, error) - Tip() (types.ChainIndex, error) + Tip() types.ChainIndex UnconfirmedEvents() ([]wallet.Event, error) UpdateChainState(tx wallet.UpdateTx, reverted []chain.RevertUpdate, applied []chain.ApplyUpdate) error Events(offset, limit int) ([]wallet.Event, error) @@ -174,9 +175,8 @@ type ( // are rapidly updated and can be recovered, they are only loaded upon // startup and persisted upon shutdown. AccountStore interface { - Accounts(context.Context) ([]api.Account, error) + Accounts(context.Context, string) ([]api.Account, error) SaveAccounts(context.Context, []api.Account) error - SetUncleanShutdown(context.Context) error } // An AutopilotStore stores autopilots. @@ -200,7 +200,7 @@ type ( HostsForScanning(ctx context.Context, maxLastScan time.Time, offset, limit int) ([]api.HostAddress, error) RecordHostScans(ctx context.Context, scans []api.HostScan) error RecordPriceTables(ctx context.Context, priceTableUpdate []api.HostPriceTableUpdate) error - RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (uint64, error) + RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (uint64, error) ResetLostSectors(ctx context.Context, hk types.PublicKey) error SearchHosts(ctx context.Context, autopilotID, filterMode, usabilityMode, addressContains string, keyIn []types.PublicKey, offset, limit int) ([]api.Host, error) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error @@ -222,11 +222,12 @@ type ( RecordContractSpending(ctx context.Context, records []api.ContractSpendingRecord) error RemoveContractSet(ctx context.Context, name string) error RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (api.ContractMetadata, error) - SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) error + UpdateContractSet(ctx context.Context, set string, toAdd, toRemove []types.FileContractID) error ContractRoots(ctx context.Context, id types.FileContractID) ([]types.Hash256, error) ContractSizes(ctx context.Context) (map[types.FileContractID]api.ContractSize, error) ContractSize(ctx context.Context, id types.FileContractID) (api.ContractSize, error) + PrunableContractRoots(ctx context.Context, id types.FileContractID, roots []types.Hash256) ([]uint64, error) DeleteHostSector(ctx context.Context, hk types.PublicKey, root types.Hash256) (int, error) @@ -303,8 +304,8 @@ type ( type Bus struct { startTime time.Time + masterKey utils.MasterKey - accountsMgr AccountManager alerts alerts.Alerter alertMgr AlertManager pinMgr PinManager @@ -314,11 +315,15 @@ type Bus struct { s Syncer w Wallet - as AutopilotStore - hs HostStore - ms MetadataStore - mtrcs MetricsStore - ss SettingStore + accounts AccountStore + as AutopilotStore + hs HostStore + ms MetadataStore + mtrcs MetricsStore + ss SettingStore + + rhp2 *rhp2.Client + rhp3 *rhp3.Client contractLocker ContractLocker sectors UploadingSectorsCache @@ -328,25 +333,30 @@ type Bus struct { } // New returns a new Bus -func New(ctx context.Context, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, l *zap.Logger) (_ *Bus, err error) { +func New(ctx context.Context, masterKey [32]byte, am AlertManager, wm WebhooksManager, cm ChainManager, s Syncer, w Wallet, store Store, announcementMaxAge time.Duration, l *zap.Logger) (_ *Bus, err error) { l = l.Named("bus") b := &Bus{ - s: s, - cm: cm, - w: w, - hs: store, - as: store, - ms: store, - mtrcs: store, - ss: store, + startTime: time.Now(), + masterKey: masterKey, + + accounts: store, + s: s, + cm: cm, + w: w, + hs: store, + as: store, + ms: store, + mtrcs: store, + ss: store, alerts: alerts.WithOrigin(am, "bus"), alertMgr: am, webhooksMgr: wm, logger: l.Sugar(), - startTime: time.Now(), + rhp2: rhp2.New(rhp.NewFallbackDialer(store, net.Dialer{}, l), l), + rhp3: rhp3.New(rhp.NewFallbackDialer(store, net.Dialer{}, l), l), } // init settings @@ -354,12 +364,6 @@ func New(ctx context.Context, am AlertManager, wm WebhooksManager, cm ChainManag return nil, err } - // create account manager - b.accountsMgr, err = ibus.NewAccountManager(ctx, store, l) - if err != nil { - return nil, err - } - // create contract locker b.contractLocker = ibus.NewContractLocker() @@ -381,14 +385,9 @@ func New(ctx context.Context, am AlertManager, wm WebhooksManager, cm ChainManag // Handler returns an HTTP handler that serves the bus API. func (b *Bus) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ - "GET /accounts": b.accountsHandlerGET, - "POST /account/:id": b.accountHandlerGET, - "POST /account/:id/add": b.accountsAddHandlerPOST, - "POST /account/:id/lock": b.accountsLockHandlerPOST, - "POST /account/:id/unlock": b.accountsUnlockHandlerPOST, - "POST /account/:id/update": b.accountsUpdateHandlerPOST, - "POST /account/:id/requiressync": b.accountsRequiresSyncHandlerPOST, - "POST /account/:id/resetdrift": b.accountsResetDriftHandlerPOST, + "GET /accounts": b.accountsHandlerGET, + "POST /accounts": b.accountsHandlerPOST, + "POST /accounts/fund": b.accountsFundHandler, "GET /alerts": b.handleGETAlerts, "POST /alerts/dismiss": b.handlePOSTAlertsDismiss, @@ -411,13 +410,14 @@ func (b *Bus) Handler() http.Handler { "GET /consensus/siafundfee/:payout": b.contractTaxHandlerGET, "GET /consensus/state": b.consensusStateHandler, + "POST /contracts": b.contractsFormHandler, "GET /contracts": b.contractsHandlerGET, "DELETE /contracts/all": b.contractsAllHandlerDELETE, "POST /contracts/archive": b.contractsArchiveHandlerPOST, "GET /contracts/prunable": b.contractsPrunableDataHandlerGET, "GET /contracts/renewed/:id": b.contractsRenewedIDHandlerGET, "GET /contracts/sets": b.contractsSetsHandlerGET, - "PUT /contracts/set/:set": b.contractsSetHandlerPUT, + "POST /contracts/set/:set": b.contractsSetHandlerPUT, "DELETE /contracts/set/:set": b.contractsSetHandlerDELETE, "POST /contracts/spending": b.contractsSpendingHandlerPOST, "GET /contract/:id": b.contractIDHandlerGET, @@ -425,7 +425,10 @@ func (b *Bus) Handler() http.Handler { "DELETE /contract/:id": b.contractIDHandlerDELETE, "POST /contract/:id/acquire": b.contractAcquireHandlerPOST, "GET /contract/:id/ancestors": b.contractIDAncestorsHandler, + "POST /contract/:id/broadcast": b.contractIDBroadcastHandler, "POST /contract/:id/keepalive": b.contractKeepaliveHandlerPOST, + "POST /contract/:id/prune": b.contractPruneHandlerPOST, + "POST /contract/:id/renew": b.contractIDRenewHandlerPOST, "POST /contract/:id/renewed": b.contractIDRenewedHandlerPOST, "POST /contract/:id/release": b.contractReleaseHandlerPOST, "GET /contract/:id/roots": b.contractIDRootsHandlerGET, @@ -502,17 +505,15 @@ func (b *Bus) Handler() http.Handler { "DELETE /upload/:id": b.uploadFinishedHandlerDELETE, "POST /upload/:id/sector": b.uploadAddSectorHandlerPOST, - "GET /wallet": b.walletHandler, - "POST /wallet/discard": b.walletDiscardHandler, - "POST /wallet/fund": b.walletFundHandler, - "GET /wallet/outputs": b.walletOutputsHandler, - "GET /wallet/pending": b.walletPendingHandler, - "POST /wallet/prepare/form": b.walletPrepareFormHandler, - "POST /wallet/prepare/renew": b.walletPrepareRenewHandler, - "POST /wallet/redistribute": b.walletRedistributeHandler, - "POST /wallet/send": b.walletSendSiacoinsHandler, - "POST /wallet/sign": b.walletSignHandler, - "GET /wallet/transactions": b.walletTransactionsHandler, + "GET /wallet": b.walletHandler, + "POST /wallet/discard": b.walletDiscardHandler, + "POST /wallet/fund": b.walletFundHandler, + "GET /wallet/outputs": b.walletOutputsHandler, + "GET /wallet/pending": b.walletPendingHandler, + "POST /wallet/redistribute": b.walletRedistributeHandler, + "POST /wallet/send": b.walletSendSiacoinsHandler, + "POST /wallet/sign": b.walletSignHandler, + "GET /wallet/transactions": b.walletTransactionsHandler, "GET /webhooks": b.webhookHandlerGet, "POST /webhooks": b.webhookHandlerPost, @@ -525,13 +526,149 @@ func (b *Bus) Handler() http.Handler { func (b *Bus) Shutdown(ctx context.Context) error { return errors.Join( b.walletMetricsRecorder.Shutdown(ctx), - b.accountsMgr.Shutdown(ctx), b.webhooksMgr.Shutdown(ctx), b.pinMgr.Shutdown(ctx), b.cs.Shutdown(ctx), ) } +func (b *Bus) addContract(ctx context.Context, rev rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, state string) (api.ContractMetadata, error) { + c, err := b.ms.AddContract(ctx, rev, contractPrice, totalCost, startHeight, state) + if err != nil { + return api.ContractMetadata{}, err + } + + b.broadcastAction(webhooks.Event{ + Module: api.ModuleContract, + Event: api.EventAdd, + Payload: api.EventContractAdd{ + Added: c, + Timestamp: time.Now().UTC(), + }, + }) + return c, nil +} + +func (b *Bus) addRenewedContract(ctx context.Context, renewedFrom types.FileContractID, rev rhpv2.ContractRevision, contractPrice, totalCost types.Currency, startHeight uint64, state string) (api.ContractMetadata, error) { + r, err := b.ms.AddRenewedContract(ctx, rev, contractPrice, totalCost, startHeight, renewedFrom, state) + if err != nil { + return api.ContractMetadata{}, err + } + + b.sectors.HandleRenewal(r.ID, r.RenewedFrom) + b.broadcastAction(webhooks.Event{ + Module: api.ModuleContract, + Event: api.EventRenew, + Payload: api.EventContractRenew{ + Renewal: r, + Timestamp: time.Now().UTC(), + }, + }) + return r, nil +} + +func (b *Bus) broadcastContract(ctx context.Context, fcid types.FileContractID) (txnID types.TransactionID, _ error) { + // acquire contract lock indefinitely and defer the release + lockID, err := b.contractLocker.Acquire(ctx, lockingPriorityRenew, fcid, time.Duration(math.MaxInt64)) + if err != nil { + return types.TransactionID{}, fmt.Errorf("couldn't acquire contract lock; %w", err) + } + defer func() { + if err := b.contractLocker.Release(fcid, lockID); err != nil { + b.logger.Error("failed to release contract lock", zap.Error(err)) + } + }() + + // fetch contract + c, err := b.ms.Contract(ctx, fcid) + if err != nil { + return types.TransactionID{}, fmt.Errorf("couldn't fetch contract; %w", err) + } + + // fetch revision + rk := b.deriveRenterKey(c.HostKey) + rev, err := b.rhp2.SignedRevision(ctx, c.HostIP, c.HostKey, rk, fcid, time.Minute) + if err != nil { + return types.TransactionID{}, fmt.Errorf("couldn't fetch revision; %w", err) + } + + // send V2 transaction if we're passed the V2 hardfork allow height + if b.isPassedV2AllowHeight() { + panic("not implemented") + } else { + // create the transaction + txn := types.Transaction{ + FileContractRevisions: []types.FileContractRevision{rev.Revision}, + Signatures: rev.Signatures[:], + } + + // fund the transaction (only the fee) + toSign, err := b.w.FundTransaction(&txn, types.ZeroCurrency, true) + if err != nil { + return types.TransactionID{}, fmt.Errorf("couldn't fund transaction; %w", err) + } + // sign the transaction + b.w.SignTransaction(&txn, toSign, types.CoveredFields{WholeTransaction: true}) + + // verify the transaction and add it to the transaction pool + txnset := append(b.cm.UnconfirmedParents(txn), txn) + _, err = b.cm.AddPoolTransactions(txnset) + if err != nil { + b.w.ReleaseInputs([]types.Transaction{txn}, nil) + return types.TransactionID{}, fmt.Errorf("couldn't add transaction set to the pool; %w", err) + } + + // broadcast the transaction + b.s.BroadcastTransactionSet(txnset) + txnID = txn.ID() + } + + return +} + +func (b *Bus) formContract(ctx context.Context, hostSettings rhpv2.HostSettings, renterAddress types.Address, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostIP string, endHeight uint64) (rhpv2.ContractRevision, error) { + // derive the renter key + renterKey := b.deriveRenterKey(hostKey) + + // prepare the transaction + cs := b.cm.TipState() + fc := rhpv2.PrepareContractFormation(renterKey.PublicKey(), hostKey, renterFunds, hostCollateral, endHeight, hostSettings, renterAddress) + txn := types.Transaction{FileContracts: []types.FileContract{fc}} + + // calculate the miner fee + fee := b.cm.RecommendedFee().Mul64(cs.TransactionWeight(txn)) + txn.MinerFees = []types.Currency{fee} + + // fund the transaction + cost := rhpv2.ContractFormationCost(cs, fc, hostSettings.ContractPrice).Add(fee) + toSign, err := b.w.FundTransaction(&txn, cost, true) + if err != nil { + return rhpv2.ContractRevision{}, fmt.Errorf("couldn't fund transaction: %w", err) + } + + // sign the transaction + b.w.SignTransaction(&txn, toSign, wallet.ExplicitCoveredFields(txn)) + + // form the contract + contract, txnSet, err := b.rhp2.FormContract(ctx, hostKey, hostIP, renterKey, append(b.cm.UnconfirmedParents(txn), txn)) + if err != nil { + b.w.ReleaseInputs([]types.Transaction{txn}, nil) + return rhpv2.ContractRevision{}, err + } + + // add transaction set to the pool + _, err = b.cm.AddPoolTransactions(txnSet) + if err != nil { + b.w.ReleaseInputs([]types.Transaction{txn}, nil) + return rhpv2.ContractRevision{}, fmt.Errorf("couldn't add transaction set to the pool: %w", err) + } + + // broadcast the transaction set + go b.s.BroadcastTransactionSet(txnSet) + + return contract, nil +} + // initSettings loads the default settings if the setting is not already set and // ensures the settings are valid func (b *Bus) initSettings(ctx context.Context) error { @@ -645,3 +782,106 @@ func (b *Bus) initSettings(ctx context.Context) error { return nil } + +func (b *Bus) isPassedV2AllowHeight() bool { + cs := b.cm.TipState() + return cs.Index.Height >= cs.Network.HardforkV2.AllowHeight +} + +func (b *Bus) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { + seed := blake2b.Sum256(append(b.deriveSubKey("renterkey"), hostKey[:]...)) + pk := types.NewPrivateKeyFromSeed(seed[:]) + for i := range seed { + seed[i] = 0 + } + return pk +} + +func (b *Bus) deriveSubKey(purpose string) types.PrivateKey { + seed := blake2b.Sum256(append(b.masterKey[:], []byte(purpose)...)) + pk := types.NewPrivateKeyFromSeed(seed[:]) + for i := range seed { + seed[i] = 0 + } + return pk +} + +func (b *Bus) prepareRenew(cs consensus.State, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, endHeight, expectedStorage uint64) rhp3.PrepareRenewFn { + return func(pt rhpv3.HostPriceTable) ([]types.Hash256, []types.Transaction, types.Currency, rhp3.DiscardTxnFn, error) { + // create the final revision from the provided revision + finalRevision := revision + finalRevision.MissedProofOutputs = finalRevision.ValidProofOutputs + finalRevision.Filesize = 0 + finalRevision.FileMerkleRoot = types.Hash256{} + finalRevision.RevisionNumber = math.MaxUint64 + + // prepare the new contract + fc, basePrice, err := rhpv3.PrepareContractRenewal(revision, hostAddress, renterAddress, renterFunds, minNewCollateral, pt, expectedStorage, endHeight) + if err != nil { + return nil, nil, types.ZeroCurrency, nil, fmt.Errorf("couldn't prepare contract renewal: %w", err) + } + + // prepare the transaction + txn := types.Transaction{ + FileContracts: []types.FileContract{fc}, + FileContractRevisions: []types.FileContractRevision{finalRevision}, + MinerFees: []types.Currency{pt.TxnFeeMaxRecommended.Mul64(4096)}, + } + + // compute how much renter funds to put into the new contract + fundAmount := rhpv3.ContractRenewalCost(cs, pt, fc, txn.MinerFees[0], basePrice) + + // make sure we don't exceed the max fund amount. + if maxFundAmount.Cmp(fundAmount) < 0 { + return nil, nil, types.ZeroCurrency, nil, fmt.Errorf("%w: %v > %v", api.ErrMaxFundAmountExceeded, fundAmount, maxFundAmount) + } + + // fund the transaction, we are not signing it yet since it's not + // complete. The host still needs to complete it and the revision + + // contract are signed with the renter key by the worker. + toSign, err := b.w.FundTransaction(&txn, fundAmount, true) + if err != nil { + return nil, nil, types.ZeroCurrency, nil, fmt.Errorf("couldn't fund transaction: %w", err) + } + + return toSign, append(b.cm.UnconfirmedParents(txn), txn), fundAmount, func(err *error) { + if *err == nil { + return + } + b.w.ReleaseInputs([]types.Transaction{txn}, nil) + }, nil + } +} + +func (b *Bus) renewContract(ctx context.Context, cs consensus.State, gp api.GougingParams, c api.ContractMetadata, hs rhpv2.HostSettings, renterFunds, minNewCollateral, maxFundAmount types.Currency, endHeight, expectedNewStorage uint64) (rhpv2.ContractRevision, types.Currency, types.Currency, error) { + // acquire contract lock indefinitely and defer the release + lockID, err := b.contractLocker.Acquire(ctx, lockingPriorityRenew, c.ID, time.Duration(math.MaxInt64)) + if err != nil { + return rhpv2.ContractRevision{}, types.ZeroCurrency, types.ZeroCurrency, fmt.Errorf("couldn't acquire contract lock; %w", err) + } + defer func() { + if err := b.contractLocker.Release(c.ID, lockID); err != nil { + b.logger.Error("failed to release contract lock", zap.Error(err)) + } + }() + + // fetch the revision + rev, err := b.rhp3.Revision(ctx, c.ID, c.HostKey, c.SiamuxAddr) + if err != nil { + return rhpv2.ContractRevision{}, types.ZeroCurrency, types.ZeroCurrency, fmt.Errorf("couldn't fetch revision; %w", err) + } + + // renew contract + gc := gouging.NewChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, nil, nil) + renterKey := b.deriveRenterKey(c.HostKey) + prepareRenew := b.prepareRenew(cs, rev, hs.Address, b.w.Address(), renterFunds, minNewCollateral, maxFundAmount, endHeight, expectedNewStorage) + newRevision, txnSet, contractPrice, fundAmount, err := b.rhp3.Renew(ctx, gc, rev, renterKey, c.HostKey, c.SiamuxAddr, prepareRenew, b.w.SignTransaction) + if err != nil { + return rhpv2.ContractRevision{}, types.ZeroCurrency, types.ZeroCurrency, fmt.Errorf("couldn't renew contract; %w", err) + } + + // broadcast the transaction set + b.s.BroadcastTransactionSet(txnSet) + + return newRevision, contractPrice, fundAmount, nil +} diff --git a/bus/client/accounts.go b/bus/client/accounts.go index 052928ae4..9742d2a0d 100644 --- a/bus/client/accounts.go +++ b/bus/client/accounts.go @@ -2,76 +2,35 @@ package client import ( "context" - "fmt" - "math/big" - "time" + "net/url" rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" ) -// Account returns the account for given id. -func (c *Client) Account(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey) (account api.Account, err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s", id), api.AccountHandlerPOST{ - HostKey: hostKey, - }, &account) - return -} - // Accounts returns all accounts. -func (c *Client) Accounts(ctx context.Context) (accounts []api.Account, err error) { - err = c.c.WithContext(ctx).GET("/accounts", &accounts) +func (c *Client) Accounts(ctx context.Context, owner string) (accounts []api.Account, err error) { + values := url.Values{} + values.Set("owner", owner) + err = c.c.WithContext(ctx).GET("/accounts?"+values.Encode(), &accounts) return } -// AddBalance adds the given amount to an account's balance, the amount can be negative. -func (c *Client) AddBalance(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey, amount *big.Int) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/add", id), api.AccountsAddBalanceRequest{ - HostKey: hostKey, - Amount: amount, - }, nil) - return -} - -// LockAccount locks an account. -func (c *Client) LockAccount(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey, exclusive bool, duration time.Duration) (account api.Account, lockID uint64, err error) { - var resp api.AccountsLockHandlerResponse - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/lock", id), api.AccountsLockHandlerRequest{ - HostKey: hostKey, - Exclusive: exclusive, - Duration: api.DurationMS(duration), +func (c *Client) FundAccount(ctx context.Context, account rhpv3.Account, fcid types.FileContractID, amount types.Currency) (types.Currency, error) { + var resp api.AccountsFundResponse + err := c.c.WithContext(ctx).POST("/accounts/fund", api.AccountsFundRequest{ + AccountID: account, + Amount: amount, + ContractID: fcid, }, &resp) - return resp.Account, resp.LockID, err -} - -// ResetDrift resets the drift of an account to zero. -func (c *Client) ResetDrift(ctx context.Context, id rhpv3.Account) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/resetdrift", id), nil, nil) - return -} - -// SetBalance sets the given account's balance to a certain amount. -func (c *Client) SetBalance(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey, amount *big.Int) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/update", id), api.AccountsUpdateBalanceRequest{ - HostKey: hostKey, - Amount: amount, - }, nil) - return -} - -// ScheduleSync sets the requiresSync flag of an account. -func (c *Client) ScheduleSync(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/requiressync", id), api.AccountsRequiresSyncRequest{ - HostKey: hostKey, - }, nil) - return + return resp.Deposit, err } -// UnlockAccount unlocks an account. -func (c *Client) UnlockAccount(ctx context.Context, id rhpv3.Account, lockID uint64) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/unlock", id), api.AccountsUnlockHandlerRequest{ - LockID: lockID, +// UpdateAccounts saves all accounts. +func (c *Client) UpdateAccounts(ctx context.Context, accounts []api.Account) (err error) { + err = c.c.WithContext(ctx).POST("/accounts", api.AccountsSaveRequest{ + Accounts: accounts, }, nil) return } diff --git a/bus/client/client.go b/bus/client/client.go index b082e5d9e..c31b6d4a2 100644 --- a/bus/client/client.go +++ b/bus/client/client.go @@ -1,13 +1,11 @@ package client import ( - "encoding/json" - "errors" - "io" "net/http" "go.sia.tech/jape" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" ) // A Client provides methods for interacting with a bus. @@ -34,18 +32,6 @@ func (c *Client) do(req *http.Request, resp interface{}) error { if c.c.Password != "" { req.SetBasicAuth("", c.c.Password) } - r, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer io.Copy(io.Discard, r.Body) - defer r.Body.Close() - if !(200 <= r.StatusCode && r.StatusCode < 300) { - err, _ := io.ReadAll(r.Body) - return errors.New(string(err)) - } - if resp == nil { - return nil - } - return json.NewDecoder(r.Body).Decode(resp) + _, _, err := utils.DoRequest(req, &resp) + return err } diff --git a/bus/client/contracts.go b/bus/client/contracts.go index 84cd7dc88..0403e49f5 100644 --- a/bus/client/contracts.go +++ b/bus/client/contracts.go @@ -62,6 +62,12 @@ func (c *Client) ArchiveContracts(ctx context.Context, toArchive map[types.FileC return } +// BroadcastContract broadcasts the latest revision for a contract. +func (c *Client) BroadcastContract(ctx context.Context, contractID types.FileContractID) (txnID types.TransactionID, err error) { + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/contract/%s/broadcast", contractID), nil, &txnID) + return +} + // Contract returns the contract with the given ID. func (c *Client) Contract(ctx context.Context, id types.FileContractID) (contract api.ContractMetadata, err error) { err = c.c.WithContext(ctx).GET(fmt.Sprintf("/contract/%s", id), &contract) @@ -130,6 +136,19 @@ func (c *Client) DeleteContractSet(ctx context.Context, set string) (err error) return } +// FormContract forms a contract with a host and adds it to the bus. +func (c *Client) FormContract(ctx context.Context, renterAddress types.Address, renterFunds types.Currency, hostKey types.PublicKey, hostIP string, hostCollateral types.Currency, endHeight uint64) (contract api.ContractMetadata, err error) { + err = c.c.WithContext(ctx).POST("/contracts", api.ContractFormRequest{ + EndHeight: endHeight, + HostCollateral: hostCollateral, + HostKey: hostKey, + HostIP: hostIP, + RenterFunds: renterFunds, + RenterAddress: renterAddress, + }, &contract) + return +} + // KeepaliveContract extends the duration on an already acquired lock on a // contract. func (c *Client) KeepaliveContract(ctx context.Context, contractID types.FileContractID, lockID uint64, d time.Duration) (err error) { @@ -147,6 +166,25 @@ func (c *Client) PrunableData(ctx context.Context) (prunableData api.ContractsPr return } +// PruneContract prunes the given contract. +func (c *Client) PruneContract(ctx context.Context, contractID types.FileContractID, timeout time.Duration) (res api.ContractPruneResponse, err error) { + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/contract/%s/prune", contractID), api.ContractPruneRequest{Timeout: api.DurationMS(timeout)}, &res) + return +} + +// RenewContract renews an existing contract with a host and adds it to the bus. +func (c *Client) RenewContract(ctx context.Context, contractID types.FileContractID, endHeight uint64, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedStorage uint64) (renewal api.ContractMetadata, err error) { + req := api.ContractRenewRequest{ + EndHeight: endHeight, + ExpectedNewStorage: expectedStorage, + MaxFundAmount: maxFundAmount, + MinNewCollateral: minNewCollateral, + RenterFunds: renterFunds, + } + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/contract/%s/renew", contractID), req, &renewal) + return +} + // RenewedContract returns the renewed contract for the given ID. func (c *Client) RenewedContract(ctx context.Context, renewedFrom types.FileContractID) (contract api.ContractMetadata, err error) { err = c.c.WithContext(ctx).GET(fmt.Sprintf("/contracts/renewed/%s", renewedFrom), &contract) @@ -167,8 +205,11 @@ func (c *Client) ReleaseContract(ctx context.Context, contractID types.FileContr return } -// SetContractSet adds the given contracts to the given set. -func (c *Client) SetContractSet(ctx context.Context, set string, contracts []types.FileContractID) (err error) { - err = c.c.WithContext(ctx).PUT(fmt.Sprintf("/contracts/set/%s", set), contracts) +// UpdateContractSet adds/removes the given contracts to/from the given set. +func (c *Client) UpdateContractSet(ctx context.Context, set string, toAdd, toRemove []types.FileContractID) (err error) { + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/contracts/set/%s", set), api.ContractSetUpdateRequest{ + ToAdd: toAdd, + ToRemove: toRemove, + }, nil) return } diff --git a/bus/client/hosts.go b/bus/client/hosts.go index 709cb899c..1e09ab3ea 100644 --- a/bus/client/hosts.go +++ b/bus/client/hosts.go @@ -62,10 +62,10 @@ func (c *Client) RecordPriceTables(ctx context.Context, priceTableUpdates []api. } // RemoveOfflineHosts removes all hosts that have been offline for longer than the given max downtime. -func (c *Client) RemoveOfflineHosts(ctx context.Context, minRecentScanFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { +func (c *Client) RemoveOfflineHosts(ctx context.Context, maxConsecutiveScanFailures uint64, maxDowntime time.Duration) (removed uint64, err error) { err = c.c.WithContext(ctx).POST("/hosts/remove", api.HostsRemoveRequest{ - MaxDowntimeHours: api.DurationH(maxDowntime), - MinRecentScanFailures: minRecentScanFailures, + MaxDowntimeHours: api.DurationH(maxDowntime), + MaxConsecutiveScanFailures: maxConsecutiveScanFailures, }, &removed) return } diff --git a/bus/client/metrics.go b/bus/client/metrics.go index 10bc2fbca..3923145f0 100644 --- a/bus/client/metrics.go +++ b/bus/client/metrics.go @@ -4,15 +4,14 @@ import ( "bytes" "context" "encoding/json" - "errors" "fmt" - "io" "net/http" "net/url" "time" "go.sia.tech/core/types" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" ) func (c *Client) ContractMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractMetricsQueryOpts) ([]api.ContractMetric, error) { @@ -130,16 +129,8 @@ func (c *Client) PruneMetrics(ctx context.Context, metric string, cutoff time.Ti panic(err) } req.SetBasicAuth("", c.c.WithContext(ctx).Password) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - err, _ := io.ReadAll(resp.Body) - return errors.New(string(err)) - } - return nil + _, _, err = utils.DoRequest(req, nil) + return err } func (c *Client) recordMetric(ctx context.Context, key string, d interface{}) error { @@ -159,17 +150,8 @@ func (c *Client) recordMetric(ctx context.Context, key string, d interface{}) er panic(err) } req.SetBasicAuth("", c.c.WithContext(ctx).Password) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer io.Copy(io.Discard, resp.Body) - defer resp.Body.Close() - if resp.StatusCode != 200 { - err, _ := io.ReadAll(resp.Body) - return errors.New(string(err)) - } - return nil + _, _, err = utils.DoRequest(req, nil) + return err } func (c *Client) metric(ctx context.Context, key string, values url.Values, res interface{}) error { @@ -185,16 +167,6 @@ func (c *Client) metric(ctx context.Context, key string, values url.Values, res panic(err) } req.SetBasicAuth("", c.c.WithContext(ctx).Password) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - defer io.Copy(io.Discard, resp.Body) - defer resp.Body.Close() - - if resp.StatusCode != 200 && resp.StatusCode != 206 { - err, _ := io.ReadAll(resp.Body) - return errors.New(string(err)) - } - return json.NewDecoder(resp.Body).Decode(&res) + _, _, err = utils.DoRequest(req, &res) + return err } diff --git a/bus/client/slabs.go b/bus/client/slabs.go index db5c0023a..b0fc8837e 100644 --- a/bus/client/slabs.go +++ b/bus/client/slabs.go @@ -3,7 +3,6 @@ package client import ( "bytes" "context" - "encoding/json" "errors" "fmt" "io" @@ -12,6 +11,7 @@ import ( "time" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" ) @@ -33,18 +33,8 @@ func (c *Client) AddPartialSlab(ctx context.Context, data []byte, minShards, tot panic(err) } req.SetBasicAuth("", c.c.WithContext(ctx).Password) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, false, err - } - defer io.Copy(io.Discard, resp.Body) - defer resp.Body.Close() - if resp.StatusCode != 200 { - err, _ := io.ReadAll(resp.Body) - return nil, false, errors.New(string(err)) - } var apsr api.AddPartialSlabResponse - err = json.NewDecoder(resp.Body).Decode(&apsr) + _, _, err = utils.DoRequest(req, &apsr) if err != nil { return nil, false, err } diff --git a/bus/client/wallet.go b/bus/client/wallet.go index 9733ed335..d91289b56 100644 --- a/bus/client/wallet.go +++ b/bus/client/wallet.go @@ -6,8 +6,6 @@ import ( "net/http" "net/url" - rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" "go.sia.tech/core/types" "go.sia.tech/renterd/api" ) @@ -64,41 +62,6 @@ func (c *Client) WalletPending(ctx context.Context) (resp []types.Transaction, e return } -// WalletPrepareForm funds and signs a contract transaction. -func (c *Client) WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) { - req := api.WalletPrepareFormRequest{ - EndHeight: endHeight, - HostCollateral: hostCollateral, - HostKey: hostKey, - HostSettings: hostSettings, - RenterAddress: renterAddress, - RenterFunds: renterFunds, - RenterKey: renterKey, - } - err = c.c.WithContext(ctx).POST("/wallet/prepare/form", req, &txns) - return -} - -// WalletPrepareRenew funds and signs a contract renewal transaction. -func (c *Client) WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) { - req := api.WalletPrepareRenewRequest{ - Revision: revision, - EndHeight: endHeight, - ExpectedNewStorage: expectedStorage, - HostAddress: hostAddress, - PriceTable: pt, - MaxFundAmount: maxFundAmount, - MinNewCollateral: minNewCollateral, - RenterAddress: renterAddress, - RenterFunds: renterFunds, - RenterKey: renterKey, - WindowSize: windowSize, - } - var resp api.WalletPrepareRenewResponse - err := c.c.WithContext(ctx).POST("/wallet/prepare/renew", req, &resp) - return resp, err -} - // WalletRedistribute broadcasts a transaction that redistributes the money in // the wallet in the desired number of outputs of given amount. If the // transaction was successfully broadcasted it will return the transaction ID. diff --git a/bus/routes.go b/bus/routes.go index 9feb747e4..d8ffd2997 100644 --- a/bus/routes.go +++ b/bus/routes.go @@ -14,9 +14,12 @@ import ( "time" rhpv2 "go.sia.tech/core/rhp/v2" - rhpv3 "go.sia.tech/core/rhp/v3" + + rhp3 "go.sia.tech/renterd/internal/rhp/v3" ibus "go.sia.tech/renterd/internal/bus" + "go.sia.tech/renterd/internal/gouging" + rhp2 "go.sia.tech/renterd/internal/rhp/v2" "go.sia.tech/core/gateway" "go.sia.tech/core/types" @@ -41,6 +44,87 @@ func (b *Bus) fetchSetting(ctx context.Context, key string, value interface{}) e return nil } +func (b *Bus) accountsFundHandler(jc jape.Context) { + var req api.AccountsFundRequest + if jc.Decode(&req) != nil { + return + } + + // contract metadata + cm, err := b.ms.Contract(jc.Request.Context(), req.ContractID) + if jc.Check("failed to fetch contract metadata", err) != nil { + return + } + + rk := b.masterKey.DeriveContractKey(cm.HostKey) + + // acquire contract + lockID, err := b.contractLocker.Acquire(jc.Request.Context(), lockingPriorityFunding, req.ContractID, math.MaxInt64) + if jc.Check("failed to acquire lock", err) != nil { + return + } + defer b.contractLocker.Release(req.ContractID, lockID) + + // latest revision + rev, err := b.rhp3.Revision(jc.Request.Context(), req.ContractID, cm.HostKey, cm.SiamuxAddr) + if jc.Check("failed to fetch contract revision", err) != nil { + return + } + + // ensure we have at least 2H in the contract to cover the costs + if types.NewCurrency64(2).Cmp(rev.ValidRenterPayout()) >= 0 { + jc.Error(fmt.Errorf("insufficient funds to fund account: %v <= %v", rev.ValidRenterPayout(), types.NewCurrency64(2)), http.StatusBadRequest) + return + } + + // price table + pt, err := b.rhp3.PriceTable(jc.Request.Context(), cm.HostKey, cm.SiamuxAddr, rhp3.PreparePriceTableContractPayment(&rev, req.AccountID, rk)) + if jc.Check("failed to fetch price table", err) != nil { + return + } + + // check only the FundAccountCost + if types.NewCurrency64(1).Cmp(pt.FundAccountCost) < 0 { + jc.Error(fmt.Errorf("%w: host is gouging on FundAccountCost", gouging.ErrPriceTableGouging), http.StatusServiceUnavailable) + return + } + + // cap the deposit by what's left in the contract + deposit := req.Amount + cost := pt.FundAccountCost + availableFunds := rev.ValidRenterPayout().Sub(cost) + if deposit.Cmp(availableFunds) > 0 { + deposit = availableFunds + } + + // fund the account + err = b.rhp3.FundAccount(jc.Request.Context(), &rev, cm.HostKey, cm.SiamuxAddr, deposit, req.AccountID, pt.HostPriceTable, rk) + if jc.Check("failed to fund account", err) != nil { + return + } + + // record spending + err = b.ms.RecordContractSpending(jc.Request.Context(), []api.ContractSpendingRecord{ + { + ContractSpending: api.ContractSpending{ + FundAccount: deposit.Add(cost), + }, + ContractID: rev.ParentID, + RevisionNumber: rev.RevisionNumber, + Size: rev.Filesize, + + MissedHostPayout: rev.MissedHostPayout(), + ValidRenterPayout: rev.ValidRenterPayout(), + }, + }) + if err != nil { + b.logger.Error("failed to record contract spending", zap.Error(err)) + } + jc.Encode(api.AccountsFundResponse{ + Deposit: deposit, + }) +} + func (b *Bus) consensusAcceptBlock(jc jape.Context) { var block types.Block if jc.Decode(&block) != nil { @@ -188,11 +272,7 @@ func (b *Bus) walletHandler(jc jape.Context) { return } - tip, err := b.w.Tip() - if jc.Check("couldn't fetch wallet scan height", err) != nil { - return - } - + tip := b.w.Tip() jc.Encode(api.WalletResponse{ ScanHeight: tip.Height, Address: address, @@ -354,54 +434,56 @@ func (b *Bus) walletSendSiacoinsHandler(jc jape.Context) { } } - state := b.cm.TipState() - // if the current height is below the v2 hardfork height, send a v1 - // transaction - if state.Index.Height < state.Network.HardforkV2.AllowHeight { - // build transaction - txn := types.Transaction{ - MinerFees: []types.Currency{minerFee}, + // send V2 transaction if we're passed the V2 hardfork allow height + if b.isPassedV2AllowHeight() { + txn := types.V2Transaction{ + MinerFee: minerFee, SiacoinOutputs: []types.SiacoinOutput{ {Address: req.Address, Value: req.Amount}, }, } - toSign, err := b.w.FundTransaction(&txn, req.Amount.Add(minerFee), req.UseUnconfirmed) + // fund and sign transaction + basis, toSign, err := b.w.FundV2Transaction(&txn, req.Amount.Add(minerFee), req.UseUnconfirmed) if jc.Check("failed to fund transaction", err) != nil { return } - b.w.SignTransaction(&txn, toSign, types.CoveredFields{WholeTransaction: true}) - // shouldn't be necessary to get parents since the transaction is - // not using unconfirmed outputs, but good practice - txnset := append(b.cm.UnconfirmedParents(txn), txn) + b.w.SignV2Inputs(&txn, toSign) + basis, txnset, err := b.cm.V2TransactionSet(basis, txn) + if jc.Check("failed to get parents for funded transaction", err) != nil { + b.w.ReleaseInputs(nil, []types.V2Transaction{txn}) + return + } // verify the transaction and add it to the transaction pool - if _, err := b.cm.AddPoolTransactions(txnset); jc.Check("failed to add transaction set", err) != nil { - b.w.ReleaseInputs([]types.Transaction{txn}, nil) + if _, err := b.cm.AddV2PoolTransactions(basis, txnset); jc.Check("failed to add v2 transaction set", err) != nil { + b.w.ReleaseInputs(nil, []types.V2Transaction{txn}) return } // broadcast the transaction - b.s.BroadcastTransactionSet(txnset) + b.s.BroadcastV2TransactionSet(basis, txnset) jc.Encode(txn.ID()) } else { - txn := types.V2Transaction{ - MinerFee: minerFee, + // build transaction + txn := types.Transaction{ + MinerFees: []types.Currency{minerFee}, SiacoinOutputs: []types.SiacoinOutput{ {Address: req.Address, Value: req.Amount}, }, } - // fund and sign transaction - state, toSign, err := b.w.FundV2Transaction(&txn, req.Amount.Add(minerFee), req.UseUnconfirmed) + toSign, err := b.w.FundTransaction(&txn, req.Amount.Add(minerFee), req.UseUnconfirmed) if jc.Check("failed to fund transaction", err) != nil { return } - b.w.SignV2Inputs(state, &txn, toSign) - txnset := append(b.cm.V2UnconfirmedParents(txn), txn) + b.w.SignTransaction(&txn, toSign, types.CoveredFields{WholeTransaction: true}) + // shouldn't be necessary to get parents since the transaction is + // not using unconfirmed outputs, but good practice + txnset := append(b.cm.UnconfirmedParents(txn), txn) // verify the transaction and add it to the transaction pool - if _, err := b.cm.AddV2PoolTransactions(state.Index, txnset); jc.Check("failed to add v2 transaction set", err) != nil { - b.w.ReleaseInputs(nil, []types.V2Transaction{txn}) + if _, err := b.cm.AddPoolTransactions(txnset); jc.Check("failed to add transaction set", err) != nil { + b.w.ReleaseInputs([]types.Transaction{txn}, nil) return } // broadcast the transaction - b.s.BroadcastV2TransactionSet(state.Index, txnset) + b.s.BroadcastTransactionSet(txnset) jc.Encode(txn.ID()) } } @@ -461,7 +543,7 @@ func (b *Bus) walletRedistributeHandler(jc jape.Context) { } for i := 0; i < len(txns); i++ { - b.w.SignV2Inputs(state, &txns[i], toSign[i]) + b.w.SignV2Inputs(&txns[i], toSign[i]) ids = append(ids, txns[i].ID()) } @@ -482,94 +564,6 @@ func (b *Bus) walletDiscardHandler(jc jape.Context) { } } -func (b *Bus) walletPrepareFormHandler(jc jape.Context) { - var wpfr api.WalletPrepareFormRequest - if jc.Decode(&wpfr) != nil { - return - } - if wpfr.HostKey == (types.PublicKey{}) { - jc.Error(errors.New("no host key provided"), http.StatusBadRequest) - return - } - if wpfr.RenterKey == (types.PublicKey{}) { - jc.Error(errors.New("no renter key provided"), http.StatusBadRequest) - return - } - cs := b.cm.TipState() - - fc := rhpv2.PrepareContractFormation(wpfr.RenterKey, wpfr.HostKey, wpfr.RenterFunds, wpfr.HostCollateral, wpfr.EndHeight, wpfr.HostSettings, wpfr.RenterAddress) - cost := rhpv2.ContractFormationCost(cs, fc, wpfr.HostSettings.ContractPrice) - txn := types.Transaction{ - FileContracts: []types.FileContract{fc}, - } - txn.MinerFees = []types.Currency{b.cm.RecommendedFee().Mul64(cs.TransactionWeight(txn))} - toSign, err := b.w.FundTransaction(&txn, cost.Add(txn.MinerFees[0]), true) - if jc.Check("couldn't fund transaction", err) != nil { - return - } - - b.w.SignTransaction(&txn, toSign, wallet.ExplicitCoveredFields(txn)) - - jc.Encode(append(b.cm.UnconfirmedParents(txn), txn)) -} - -func (b *Bus) walletPrepareRenewHandler(jc jape.Context) { - var wprr api.WalletPrepareRenewRequest - if jc.Decode(&wprr) != nil { - return - } - if wprr.RenterKey == nil { - jc.Error(errors.New("no renter key provided"), http.StatusBadRequest) - return - } - cs := b.cm.TipState() - - // Create the final revision from the provided revision. - finalRevision := wprr.Revision - finalRevision.MissedProofOutputs = finalRevision.ValidProofOutputs - finalRevision.Filesize = 0 - finalRevision.FileMerkleRoot = types.Hash256{} - finalRevision.RevisionNumber = math.MaxUint64 - - // Prepare the new contract. - fc, basePrice, err := rhpv3.PrepareContractRenewal(wprr.Revision, wprr.HostAddress, wprr.RenterAddress, wprr.RenterFunds, wprr.MinNewCollateral, wprr.PriceTable, wprr.ExpectedNewStorage, wprr.EndHeight) - if jc.Check("couldn't prepare contract renewal", err) != nil { - return - } - - // Create the transaction containing both the final revision and new - // contract. - txn := types.Transaction{ - FileContracts: []types.FileContract{fc}, - FileContractRevisions: []types.FileContractRevision{finalRevision}, - MinerFees: []types.Currency{wprr.PriceTable.TxnFeeMaxRecommended.Mul64(4096)}, - } - - // Compute how much renter funds to put into the new contract. - cost := rhpv3.ContractRenewalCost(cs, wprr.PriceTable, fc, txn.MinerFees[0], basePrice) - - // Make sure we don't exceed the max fund amount. - // TODO: remove the IsZero check for the v2 change - if /*!wprr.MaxFundAmount.IsZero() &&*/ wprr.MaxFundAmount.Cmp(cost) < 0 { - jc.Error(fmt.Errorf("%w: %v > %v", api.ErrMaxFundAmountExceeded, cost, wprr.MaxFundAmount), http.StatusBadRequest) - return - } - - // Fund the txn. We are not signing it yet since it's not complete. The host - // still needs to complete it and the revision + contract are signed with - // the renter key by the worker. - toSign, err := b.w.FundTransaction(&txn, cost, true) - if jc.Check("couldn't fund transaction", err) != nil { - return - } - - jc.Encode(api.WalletPrepareRenewResponse{ - FundAmount: cost, - ToSign: toSign, - TransactionSet: append(b.cm.UnconfirmedParents(txn), txn), - }) -} - func (b *Bus) walletPendingHandler(jc jape.Context) { isRelevant := func(txn types.Transaction) bool { addr := b.w.Address() @@ -637,11 +631,11 @@ func (b *Bus) hostsRemoveHandlerPOST(jc jape.Context) { jc.Error(errors.New("maxDowntime must be non-zero"), http.StatusBadRequest) return } - if hrr.MinRecentScanFailures == 0 { - jc.Error(errors.New("minRecentScanFailures must be non-zero"), http.StatusBadRequest) + if hrr.MaxConsecutiveScanFailures == 0 { + jc.Error(errors.New("maxConsecutiveScanFailures must be non-zero"), http.StatusBadRequest) return } - removed, err := b.hs.RemoveOfflineHosts(jc.Request.Context(), hrr.MinRecentScanFailures, time.Duration(hrr.MaxDowntimeHours)) + removed, err := b.hs.RemoveOfflineHosts(jc.Request.Context(), hrr.MaxConsecutiveScanFailures, time.Duration(hrr.MaxDowntimeHours)) if jc.Check("couldn't remove offline hosts", err) != nil { return } @@ -808,22 +802,23 @@ func (b *Bus) contractsSetsHandlerGET(jc jape.Context) { } func (b *Bus) contractsSetHandlerPUT(jc jape.Context) { - var contractIds []types.FileContractID + var req api.ContractSetUpdateRequest if set := jc.PathParam("set"); set == "" { jc.Error(errors.New("path parameter 'set' can not be empty"), http.StatusBadRequest) return - } else if jc.Decode(&contractIds) != nil { + } else if jc.Decode(&req) != nil { return - } else if jc.Check("could not add contracts to set", b.ms.SetContractSet(jc.Request.Context(), set, contractIds)) != nil { + } else if jc.Check("could not add contracts to set", b.ms.UpdateContractSet(jc.Request.Context(), set, req.ToAdd, req.ToRemove)) != nil { return } else { b.broadcastAction(webhooks.Event{ Module: api.ModuleContractSet, Event: api.EventUpdate, Payload: api.EventContractSetUpdate{ - Name: set, - ContractIDs: contractIds, - Timestamp: time.Now().UTC(), + Name: set, + ToAdd: req.ToAdd, + ToRemove: req.ToRemove, + Timestamp: time.Now().UTC(), }, }) } @@ -870,6 +865,117 @@ func (b *Bus) contractKeepaliveHandlerPOST(jc jape.Context) { } } +func (b *Bus) contractPruneHandlerPOST(jc jape.Context) { + ctx := jc.Request.Context() + + // decode fcid + var fcid types.FileContractID + if jc.DecodeParam("id", &fcid) != nil { + return + } + + // decode timeout + var req api.ContractPruneRequest + if jc.Decode(&req) != nil { + return + } + + // create gouging checker + gp, err := b.gougingParams(ctx) + if jc.Check("couldn't fetch gouging parameters", err) != nil { + return + } + gc := gouging.NewChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, nil, nil) + + // apply timeout + pruneCtx := ctx + if req.Timeout > 0 { + var cancel context.CancelFunc + pruneCtx, cancel = context.WithTimeout(ctx, time.Duration(req.Timeout)) + defer cancel() + } + + // acquire contract lock indefinitely and defer the release + lockID, err := b.contractLocker.Acquire(pruneCtx, lockingPriorityPruning, fcid, time.Duration(math.MaxInt64)) + if jc.Check("couldn't acquire contract lock", err) != nil { + return + } + defer func() { + if err := b.contractLocker.Release(fcid, lockID); err != nil { + b.logger.Error("failed to release contract lock", zap.Error(err)) + } + }() + + // fetch the contract from the bus + c, err := b.ms.Contract(ctx, fcid) + if errors.Is(err, api.ErrContractNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("couldn't fetch contract", err) != nil { + return + } + + // build map of uploading sectors + pending := make(map[types.Hash256]struct{}) + for _, root := range b.sectors.Sectors(fcid) { + pending[root] = struct{}{} + } + + // prune the contract + rev, spending, pruned, remaining, err := b.rhp2.PruneContract(pruneCtx, b.deriveRenterKey(c.HostKey), gc, c.HostIP, c.HostKey, fcid, c.RevisionNumber, func(fcid types.FileContractID, roots []types.Hash256) ([]uint64, error) { + indices, err := b.ms.PrunableContractRoots(ctx, fcid, roots) + if err != nil { + return nil, err + } else if len(indices) > len(roots) { + return nil, fmt.Errorf("selected %d prunable roots but only %d were provided", len(indices), len(roots)) + } + + filtered := indices[:0] + for _, index := range indices { + _, ok := pending[roots[index]] + if !ok { + filtered = append(filtered, index) + } + } + indices = filtered + return indices, nil + }) + + if errors.Is(err, rhp2.ErrNoSectorsToPrune) { + err = nil // ignore error + } else if !errors.Is(err, context.Canceled) { + if jc.Check("couldn't prune contract", err) != nil { + return + } + } + + // record spending + if !spending.Total().IsZero() { + b.ms.RecordContractSpending(jc.Request.Context(), []api.ContractSpendingRecord{ + { + ContractSpending: spending, + ContractID: fcid, + RevisionNumber: rev.RevisionNumber, + Size: rev.Filesize, + + MissedHostPayout: rev.MissedHostPayout(), + ValidRenterPayout: rev.ValidRenterPayout(), + }, + }) + } + + // return response + res := api.ContractPruneResponse{ + ContractSize: rev.Filesize, + Pruned: pruned, + Remaining: remaining, + } + if err != nil { + res.Error = err.Error() + } + jc.Encode(res) +} + func (b *Bus) contractsPrunableDataHandlerGET(jc jape.Context) { sizes, err := b.ms.ContractSizes(jc.Request.Context()) if jc.Check("failed to fetch contract sizes", err) != nil { @@ -980,21 +1086,91 @@ func (b *Bus) contractIDHandlerPOST(jc jape.Context) { return } - a, err := b.ms.AddContract(jc.Request.Context(), req.Contract, req.ContractPrice, req.TotalCost, req.StartHeight, req.State) + a, err := b.addContract(jc.Request.Context(), req.Contract, req.ContractPrice, req.TotalCost, req.StartHeight, req.State) if jc.Check("couldn't store contract", err) != nil { return } + jc.Encode(a) +} - b.broadcastAction(webhooks.Event{ - Module: api.ModuleContract, - Event: api.EventAdd, - Payload: api.EventContractAdd{ - Added: a, - Timestamp: time.Now().UTC(), - }, - }) +func (b *Bus) contractIDRenewHandlerPOST(jc jape.Context) { + // apply pessimistic timeout + ctx, cancel := context.WithTimeout(jc.Request.Context(), 15*time.Minute) + defer cancel() - jc.Encode(a) + // decode contract id + var fcid types.FileContractID + if jc.DecodeParam("id", &fcid) != nil { + return + } + + // decode request + var rrr api.ContractRenewRequest + if jc.Decode(&rrr) != nil { + return + } + + // validate the request + if rrr.EndHeight == 0 { + http.Error(jc.ResponseWriter, "EndHeight can not be zero", http.StatusBadRequest) + } else if rrr.ExpectedNewStorage == 0 { + http.Error(jc.ResponseWriter, "ExpectedNewStorage can not be zero", http.StatusBadRequest) + } else if rrr.MaxFundAmount.IsZero() { + http.Error(jc.ResponseWriter, "MaxFundAmount can not be zero", http.StatusBadRequest) + } else if rrr.MinNewCollateral.IsZero() { + http.Error(jc.ResponseWriter, "MinNewCollateral can not be zero", http.StatusBadRequest) + } else if rrr.RenterFunds.IsZero() { + http.Error(jc.ResponseWriter, "RenterFunds can not be zero", http.StatusBadRequest) + return + } + + // fetch the contract + c, err := b.ms.Contract(ctx, fcid) + if errors.Is(err, api.ErrContractNotFound) { + jc.Error(err, http.StatusNotFound) + return + } else if jc.Check("couldn't fetch contract", err) != nil { + return + } + + // fetch the host + h, err := b.hs.Host(ctx, c.HostKey) + if jc.Check("couldn't fetch host", err) != nil { + return + } + + // fetch consensus state + cs := b.cm.TipState() + + // fetch gouging parameters + gp, err := b.gougingParams(ctx) + if jc.Check("could not get gouging parameters", err) != nil { + return + } + + // send V2 transaction if we're passed the V2 hardfork allow height + var newRevision rhpv2.ContractRevision + var contractPrice, fundAmount types.Currency + if b.isPassedV2AllowHeight() { + panic("not implemented") + } else { + newRevision, contractPrice, fundAmount, err = b.renewContract(ctx, cs, gp, c, h.Settings, rrr.RenterFunds, rrr.MinNewCollateral, rrr.MaxFundAmount, rrr.EndHeight, rrr.ExpectedNewStorage) + if errors.Is(err, api.ErrMaxFundAmountExceeded) { + jc.Error(err, http.StatusBadRequest) + return + } else if jc.Check("couldn't renew contract", err) != nil { + return + } + } + + // add renewal contract to store + metadata, err := b.addRenewedContract(ctx, fcid, newRevision, contractPrice, fundAmount, cs.Index.Height, api.ContractStatePending) + if jc.Check("couldn't store contract", err) != nil { + return + } + + // send the response + jc.Encode(metadata) } func (b *Bus) contractIDRenewedHandlerPOST(jc jape.Context) { @@ -1014,21 +1190,11 @@ func (b *Bus) contractIDRenewedHandlerPOST(jc jape.Context) { if req.State == "" { req.State = api.ContractStatePending } - r, err := b.ms.AddRenewedContract(jc.Request.Context(), req.Contract, req.ContractPrice, req.TotalCost, req.StartHeight, req.RenewedFrom, req.State) + r, err := b.addRenewedContract(jc.Request.Context(), req.RenewedFrom, req.Contract, req.ContractPrice, req.TotalCost, req.StartHeight, req.State) if jc.Check("couldn't store contract", err) != nil { return } - b.sectors.HandleRenewal(req.Contract.ID(), req.RenewedFrom) - b.broadcastAction(webhooks.Event{ - Module: api.ModuleContract, - Event: api.EventRenew, - Payload: api.EventContractRenew{ - Renewal: r, - Timestamp: time.Now().UTC(), - }, - }) - jc.Encode(r) } @@ -1613,6 +1779,18 @@ func (b *Bus) contractIDAncestorsHandler(jc jape.Context) { jc.Encode(ancestors) } +func (b *Bus) contractIDBroadcastHandler(jc jape.Context) { + var fcid types.FileContractID + if jc.DecodeParam("id", &fcid) != nil { + return + } + + txnID, err := b.broadcastContract(jc.Request.Context(), fcid) + if jc.Check("failed to broadcast contract revision", err) == nil { + jc.Encode(txnID) + } +} + func (b *Bus) paramsHandlerUploadGET(jc jape.Context) { gp, err := b.gougingParams(jc.Request.Context()) if jc.Check("could not get gouging parameters", err) != nil { @@ -1753,136 +1931,30 @@ func (b *Bus) handlePOSTAlertsRegister(jc jape.Context) { } func (b *Bus) accountsHandlerGET(jc jape.Context) { - jc.Encode(b.accountsMgr.Accounts()) -} - -func (b *Bus) accountHandlerGET(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { - return - } - var req api.AccountHandlerPOST - if jc.Decode(&req) != nil { - return - } - acc, err := b.accountsMgr.Account(id, req.HostKey) - if jc.Check("failed to fetch account", err) != nil { - return - } - jc.Encode(acc) -} - -func (b *Bus) accountsAddHandlerPOST(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { - return - } - var req api.AccountsAddBalanceRequest - if jc.Decode(&req) != nil { - return - } - if id == (rhpv3.Account{}) { - jc.Error(errors.New("account id needs to be set"), http.StatusBadRequest) - return - } - if req.HostKey == (types.PublicKey{}) { - jc.Error(errors.New("host needs to be set"), http.StatusBadRequest) - return - } - b.accountsMgr.AddAmount(id, req.HostKey, req.Amount) -} - -func (b *Bus) accountsResetDriftHandlerPOST(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { - return - } - err := b.accountsMgr.ResetDrift(id) - if errors.Is(err, ibus.ErrAccountNotFound) { - jc.Error(err, http.StatusNotFound) - return - } - if jc.Check("failed to reset drift", err) != nil { - return - } -} - -func (b *Bus) accountsUpdateHandlerPOST(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { - return - } - var req api.AccountsUpdateBalanceRequest - if jc.Decode(&req) != nil { - return - } - if id == (rhpv3.Account{}) { - jc.Error(errors.New("account id needs to be set"), http.StatusBadRequest) - return - } - if req.HostKey == (types.PublicKey{}) { - jc.Error(errors.New("host needs to be set"), http.StatusBadRequest) - return - } - b.accountsMgr.SetBalance(id, req.HostKey, req.Amount) -} - -func (b *Bus) accountsRequiresSyncHandlerPOST(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { + var owner string + if jc.DecodeForm("owner", &owner) != nil { return } - var req api.AccountsRequiresSyncRequest - if jc.Decode(&req) != nil { - return - } - if id == (rhpv3.Account{}) { - jc.Error(errors.New("account id needs to be set"), http.StatusBadRequest) - return - } - if req.HostKey == (types.PublicKey{}) { - jc.Error(errors.New("host needs to be set"), http.StatusBadRequest) - return - } - err := b.accountsMgr.ScheduleSync(id, req.HostKey) - if errors.Is(err, ibus.ErrAccountNotFound) { - jc.Error(err, http.StatusNotFound) - return - } - if jc.Check("failed to set requiresSync flag on account", err) != nil { + accounts, err := b.accounts.Accounts(jc.Request.Context(), owner) + if err != nil { + jc.Error(err, http.StatusInternalServerError) return } + jc.Encode(accounts) } -func (b *Bus) accountsLockHandlerPOST(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { - return - } - var req api.AccountsLockHandlerRequest +func (b *Bus) accountsHandlerPOST(jc jape.Context) { + var req api.AccountsSaveRequest if jc.Decode(&req) != nil { return } - - acc, lockID := b.accountsMgr.LockAccount(jc.Request.Context(), id, req.HostKey, req.Exclusive, time.Duration(req.Duration)) - jc.Encode(api.AccountsLockHandlerResponse{ - Account: acc, - LockID: lockID, - }) -} - -func (b *Bus) accountsUnlockHandlerPOST(jc jape.Context) { - var id rhpv3.Account - if jc.DecodeParam("id", &id) != nil { - return - } - var req api.AccountsUnlockHandlerRequest - if jc.Decode(&req) != nil { - return + for _, acc := range req.Accounts { + if acc.Owner == "" { + jc.Error(errors.New("acocunts need to have a valid 'Owner'"), http.StatusBadRequest) + return + } } - - err := b.accountsMgr.UnlockAccount(id, req.LockID) - if jc.Check("failed to unlock account", err) != nil { + if b.accounts.SaveAccounts(jc.Request.Context(), req.Accounts) != nil { return } } @@ -2315,3 +2387,92 @@ func (b *Bus) multipartHandlerListPartsPOST(jc jape.Context) { } jc.Encode(resp) } + +func (b *Bus) contractsFormHandler(jc jape.Context) { + // apply pessimistic timeout + ctx, cancel := context.WithTimeout(jc.Request.Context(), 15*time.Minute) + defer cancel() + + // decode the request + var rfr api.ContractFormRequest + if jc.Decode(&rfr) != nil { + return + } + + // validate the request + if rfr.EndHeight == 0 { + http.Error(jc.ResponseWriter, "EndHeight can not be zero", http.StatusBadRequest) + return + } else if rfr.HostKey == (types.PublicKey{}) { + http.Error(jc.ResponseWriter, "HostKey must be provided", http.StatusBadRequest) + return + } else if rfr.HostCollateral.IsZero() { + http.Error(jc.ResponseWriter, "HostCollateral can not be zero", http.StatusBadRequest) + return + } else if rfr.HostIP == "" { + http.Error(jc.ResponseWriter, "HostIP must be provided", http.StatusBadRequest) + return + } else if rfr.RenterFunds.IsZero() { + http.Error(jc.ResponseWriter, "RenterFunds can not be zero", http.StatusBadRequest) + return + } else if rfr.RenterAddress == (types.Address{}) { + http.Error(jc.ResponseWriter, "RenterAddress must be provided", http.StatusBadRequest) + return + } + + // fetch gouging parameters + gp, err := b.gougingParams(ctx) + if jc.Check("could not get gouging parameters", err) != nil { + return + } + gc := gouging.NewChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, nil, nil) + + // fetch host settings + settings, err := b.rhp2.Settings(ctx, rfr.HostKey, rfr.HostIP) + if jc.Check("couldn't fetch host settings", err) != nil { + return + } + + // check gouging + breakdown := gc.CheckSettings(settings) + if breakdown.Gouging() { + jc.Error(fmt.Errorf("failed to form contract, gouging check failed: %v", breakdown), http.StatusBadRequest) + return + } + + // send V2 transaction if we're passed the V2 hardfork allow height + var contract rhpv2.ContractRevision + if b.isPassedV2AllowHeight() { + panic("not implemented") + } else { + contract, err = b.formContract( + ctx, + settings, + rfr.RenterAddress, + rfr.RenterFunds, + rfr.HostCollateral, + rfr.HostKey, + rfr.HostIP, + rfr.EndHeight, + ) + if jc.Check("couldn't form contract", err) != nil { + return + } + } + + // store the contract + metadata, err := b.addContract( + ctx, + contract, + contract.Revision.MissedHostPayout().Sub(rfr.HostCollateral), + rfr.RenterFunds, + b.cm.Tip().Height, + api.ContractStatePending, + ) + if jc.Check("couldn't store contract", err) != nil { + return + } + + // return the contract + jc.Encode(metadata) +} diff --git a/cmd/renterd/config.go b/cmd/renterd/config.go index 38231458d..e1200f121 100644 --- a/cmd/renterd/config.go +++ b/cmd/renterd/config.go @@ -97,9 +97,10 @@ func defaultConfig() config.Config { Worker: config.Worker{ Enabled: true, - ID: "worker", - ContractLockTimeout: 30 * time.Second, - BusFlushInterval: 5 * time.Second, + ID: "", + AccountsRefillInterval: defaultAccountRefillInterval, + ContractLockTimeout: 30 * time.Second, + BusFlushInterval: 5 * time.Second, DownloadMaxOverdrive: 5, DownloadOverdriveTimeout: 3 * time.Second, @@ -114,7 +115,6 @@ func defaultConfig() config.Config { ID: api.DefaultAutopilotID, RevisionSubmissionBuffer: 150, // 144 + 6 blocks leeway - AccountsRefillInterval: defaultAccountRefillInterval, Heartbeat: 30 * time.Minute, MigrationHealthCutoff: 0.75, RevisionBroadcastInterval: 7 * 24 * time.Hour, @@ -132,6 +132,15 @@ func defaultConfig() config.Config { } } +func assertWorkerID(cfg *config.Config) error { + if cfg.Bus.RemoteAddr != "" && cfg.Worker.ID == "" { + return errors.New("a unique worker ID must be set in a cluster setup") + } else if cfg.Worker.ID == "" { + cfg.Worker.ID = "worker" + } + return nil +} + // loadConfig creates a default config and overrides it with the contents of the // YAML file (specified by the RENTERD_CONFIG_FILE), CLI flags, and environment // variables, in that order. @@ -141,6 +150,11 @@ func loadConfig() (cfg config.Config, network *consensus.Network, genesis types. parseCLIFlags(&cfg) parseEnvironmentVariables(&cfg) + // check worker id + if err = assertWorkerID(&cfg); err != nil { + return + } + // check network switch cfg.Network { case "anagami": @@ -294,6 +308,7 @@ func parseCLIFlags(cfg *config.Config) { flag.Int64Var(&cfg.Bus.SlabBufferCompletionThreshold, "bus.slabBufferCompletionThreshold", cfg.Bus.SlabBufferCompletionThreshold, "Threshold for slab buffer upload (overrides with RENTERD_BUS_SLAB_BUFFER_COMPLETION_THRESHOLD)") // worker + flag.DurationVar(&cfg.Worker.AccountsRefillInterval, "worker.accountRefillInterval", cfg.Worker.AccountsRefillInterval, "Interval for refilling workers' account balances") flag.BoolVar(&cfg.Worker.AllowPrivateIPs, "worker.allowPrivateIPs", cfg.Worker.AllowPrivateIPs, "Allows hosts with private IPs") flag.DurationVar(&cfg.Worker.BusFlushInterval, "worker.busFlushInterval", cfg.Worker.BusFlushInterval, "Interval for flushing data to bus") flag.Uint64Var(&cfg.Worker.DownloadMaxMemory, "worker.downloadMaxMemory", cfg.Worker.DownloadMaxMemory, "Max amount of RAM the worker allocates for slabs when downloading (overrides with RENTERD_WORKER_DOWNLOAD_MAX_MEMORY)") @@ -308,7 +323,6 @@ func parseCLIFlags(cfg *config.Config) { flag.StringVar(&cfg.Worker.ExternalAddress, "worker.externalAddress", cfg.Worker.ExternalAddress, "Address of the worker on the network, only necessary when the bus is remote (overrides with RENTERD_WORKER_EXTERNAL_ADDR)") // autopilot - flag.DurationVar(&cfg.Autopilot.AccountsRefillInterval, "autopilot.accountRefillInterval", cfg.Autopilot.AccountsRefillInterval, "Interval for refilling workers' account balances") flag.DurationVar(&cfg.Autopilot.Heartbeat, "autopilot.heartbeat", cfg.Autopilot.Heartbeat, "Interval for autopilot loop execution") flag.Float64Var(&cfg.Autopilot.MigrationHealthCutoff, "autopilot.migrationHealthCutoff", cfg.Autopilot.MigrationHealthCutoff, "Threshold for migrating slabs based on health") flag.DurationVar(&cfg.Autopilot.RevisionBroadcastInterval, "autopilot.revisionBroadcastInterval", cfg.Autopilot.RevisionBroadcastInterval, "Interval for broadcasting contract revisions (overrides with RENTERD_AUTOPILOT_REVISION_BROADCAST_INTERVAL)") diff --git a/cmd/renterd/node.go b/cmd/renterd/node.go index 89dd75ab0..a9758439c 100644 --- a/cmd/renterd/node.go +++ b/cmd/renterd/node.go @@ -376,9 +376,13 @@ func newBus(ctx context.Context, cfg config.Config, pk types.PrivateKey, network } } + // create master key - we currently derive the same key used by the workers + // to ensure contracts formed by the bus can be renewed by the autopilot + masterKey := blake2b.Sum256(append([]byte("worker"), pk...)) + // create bus announcementMaxAgeHours := time.Duration(cfg.Bus.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) if err != nil { return nil, nil, fmt.Errorf("failed to create bus: %w", err) } @@ -494,6 +498,11 @@ func buildStoreConfig(am alerts.Alerter, cfg config.Config, pk types.PrivateKey, var dbMain sql.Database var dbMetrics sql.MetricsDatabase if cfg.Database.MySQL.URI != "" { + // check that both main and metrics databases are not the same + if cfg.Database.MySQL.Database == cfg.Database.MySQL.MetricsDatabase { + return stores.Config{}, errors.New("main and metrics databases cannot be the same") + } + // create MySQL connections connMain, err := mysql.Open( cfg.Database.MySQL.User, diff --git a/config/config.go b/config/config.go index 99382240b..6755d3869 100644 --- a/config/config.go +++ b/config/config.go @@ -117,6 +117,7 @@ type ( Enabled bool `yaml:"enabled,omitempty"` ID string `yaml:"id,omitempty"` Remotes []RemoteWorker `yaml:"remotes,omitempty"` + AccountsRefillInterval time.Duration `yaml:"accountsRefillInterval,omitempty"` AllowPrivateIPs bool `yaml:"allowPrivateIPs,omitempty"` BusFlushInterval time.Duration `yaml:"busFlushInterval,omitempty"` ContractLockTimeout time.Duration `yaml:"contractLockTimeout,omitempty"` @@ -134,7 +135,6 @@ type ( Autopilot struct { Enabled bool `yaml:"enabled,omitempty"` ID string `yaml:"id,omitempty"` - AccountsRefillInterval time.Duration `yaml:"accountsRefillInterval,omitempty"` Heartbeat time.Duration `yaml:"heartbeat,omitempty"` MigrationHealthCutoff float64 `yaml:"migrationHealthCutoff,omitempty"` RevisionBroadcastInterval time.Duration `yaml:"revisionBroadcastInterval,omitempty"` diff --git a/go.mod b/go.mod index f8466efe0..65913ed56 100644 --- a/go.mod +++ b/go.mod @@ -1,28 +1,28 @@ module go.sia.tech/renterd -go 1.22.5 +go 1.23.0 require ( + github.com/aws/aws-sdk-go v1.55.5 github.com/gabriel-vasile/mimetype v1.4.5 github.com/go-sql-driver/mysql v1.8.1 github.com/google/go-cmp v0.6.0 github.com/gotd/contrib v0.20.0 - github.com/klauspost/reedsolomon v1.12.3 - github.com/mattn/go-sqlite3 v1.14.22 - github.com/minio/minio-go/v7 v7.0.75 + github.com/klauspost/reedsolomon v1.12.4 + github.com/mattn/go-sqlite3 v1.14.23 github.com/montanaflynn/stats v0.7.1 github.com/shopspring/decimal v1.4.0 - go.sia.tech/core v0.4.3 - go.sia.tech/coreutils v0.2.5 - go.sia.tech/gofakes3 v0.0.4 - go.sia.tech/hostd v1.1.3-0.20240807214810-c2d8ed84dc45 - go.sia.tech/jape v0.12.0 + go.sia.tech/core v0.4.6 + go.sia.tech/coreutils v0.3.2 + go.sia.tech/gofakes3 v0.0.5 + go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238 + go.sia.tech/jape v0.12.1 go.sia.tech/mux v1.2.0 - go.sia.tech/web/renterd v0.60.1 + go.sia.tech/web/renterd v0.61.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.26.0 - golang.org/x/sys v0.24.0 - golang.org/x/term v0.23.0 + golang.org/x/crypto v0.27.0 + golang.org/x/sys v0.25.0 + golang.org/x/term v0.24.0 gopkg.in/yaml.v3 v3.0.1 lukechampine.com/frand v1.4.2 ) @@ -30,28 +30,21 @@ require ( require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect - github.com/aws/aws-sdk-go v1.55.5 // indirect - github.com/cloudflare/cloudflare-go v0.101.0 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/go-ini/ini v1.67.0 // indirect + github.com/cloudflare/cloudflare-go v0.103.0 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect - github.com/klauspost/compress v1.17.9 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/minio/md5-simd v1.1.2 // indirect - github.com/rs/xid v1.5.0 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df // indirect - go.etcd.io/bbolt v1.3.10 // indirect + go.etcd.io/bbolt v1.3.11 // indirect go.sia.tech/web v0.0.0-20240610131903-5611d44a533e // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.22.0 // indirect - nhooyr.io/websocket v1.8.11 // indirect + golang.org/x/tools v0.23.0 // indirect + nhooyr.io/websocket v1.8.17 // indirect ) diff --git a/go.sum b/go.sum index 42cf991d3..4dc769e87 100644 --- a/go.sum +++ b/go.sum @@ -4,17 +4,13 @@ github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da h1:KjTM2ks9d14ZYCvmH github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da/go.mod h1:eHEWzANqSiWQsof+nXEI9bUVUyV6F53Fp89EuCh2EAA= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/cloudflare/cloudflare-go v0.101.0 h1:SXWNSEDkbdY84iFIZGyTdWQwDfd98ljv0/4UubpleBQ= -github.com/cloudflare/cloudflare-go v0.101.0/go.mod h1:xXQHnoXKR48JlWbFS42i2al3nVqimVhcYvKnIdXLw9g= +github.com/cloudflare/cloudflare-go v0.103.0 h1:XXKzgXeUbAo7UTtM4T5wuD2bJPBtNZv7TlZAEy5QI4k= +github.com/cloudflare/cloudflare-go v0.103.0/go.mod h1:0DrjT4g8wgYFYIxhlqR8xi8dNWfyHFGilUkU3+XV8h0= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4= github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4= -github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= -github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= @@ -24,8 +20,6 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gotd/contrib v0.20.0 h1:1Wc4+HMQiIKYQuGHVwVksIx152HFTP6B5n88dDe0ZYw= github.com/gotd/contrib v0.20.0/go.mod h1:P6o8W4niqhDPHLA0U+SA/L7l3BQHYLULpeHfRSePn9o= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -36,31 +30,22 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/klauspost/reedsolomon v1.12.3 h1:tzUznbfc3OFwJaTebv/QdhnFf2Xvb7gZ24XaHLBPmdc= -github.com/klauspost/reedsolomon v1.12.3/go.mod h1:3K5rXwABAvzGeR01r6pWZieUALXO/Tq7bFKGIb4m4WI= +github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= +github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= -github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= -github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.75 h1:0uLrB6u6teY2Jt+cJUVi9cTvDRuBKWSRzSAcznRkwlE= -github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= github.com/shabbyrobe/gocovmerge v0.0.0-20230507112040-c3350d9342df h1:S77Pf5fIGMa7oSwp8SQPp7Hb4ZiI38K3RNBKD2LLeEM= @@ -70,24 +55,24 @@ github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+D github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.sia.tech/core v0.4.3 h1:XEX7v6X8eJh4zyOkSHYi6FsyD+N/OEKw/NIigaaWPAU= -go.sia.tech/core v0.4.3/go.mod h1:cGfGNcyAq1k4oIOsrNpJV/Z/p+20/IMS6vIaofE8nr8= -go.sia.tech/coreutils v0.2.5 h1:oMnBGMBRfxhLzTH1ZDBg0Ep0QLE2GE1lND9yfzOzenA= -go.sia.tech/coreutils v0.2.5/go.mod h1:Pg9eE3xL25couNL/vYrtCWP5uXkVvC+SUcMVh1/E7+I= -go.sia.tech/gofakes3 v0.0.4 h1:Kvo8j5cVdJRBXvV1KBJ69bocY23twG8ao/HCdwuPMeI= -go.sia.tech/gofakes3 v0.0.4/go.mod h1:6hh4lETCMbyFFNWp3FRE838geY6vh1Aeas7LtYDpQdc= -go.sia.tech/hostd v1.1.3-0.20240807214810-c2d8ed84dc45 h1:yq8n3leZWAeEwbAa3sbqe5mS5LgG5IH23aM8tefSuUo= -go.sia.tech/hostd v1.1.3-0.20240807214810-c2d8ed84dc45/go.mod h1:MSP0m1OPZGE5hyXEx35HM6MJWsrL0MLKwaKMzW4b8JU= -go.sia.tech/jape v0.12.0 h1:13fBi7c5X8zxTQ05Cd9ZsIfRJgdvGoZqbEzH861z7BU= -go.sia.tech/jape v0.12.0/go.mod h1:wU+h6Wh5olDjkPXjF0tbZ1GDgoZ6VTi4naFw91yyWC4= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.sia.tech/core v0.4.6 h1:QLm97a7GWBonfnMEOokqWRAqsWCUPL7kzo6k3Adwx8E= +go.sia.tech/core v0.4.6/go.mod h1:Zuq0Tn2aIXJyO0bjGu8cMeVWe+vwQnUfZhG1LCmjD5c= +go.sia.tech/coreutils v0.3.2 h1:3gJqvs18n1FVZmcrnfIYyzS+rBu06OtIscDDAfUAYQI= +go.sia.tech/coreutils v0.3.2/go.mod h1:woPVmN6GUpIKHdi71Hkb9goIbl7b45TquCsAyEzyxnI= +go.sia.tech/gofakes3 v0.0.5 h1:vFhVBUFbKE9ZplvLE2w4TQxFMQyF8qvgxV4TaTph+Vw= +go.sia.tech/gofakes3 v0.0.5/go.mod h1:LXEzwGw+OHysWLmagleCttX93cJZlT9rBu/icOZjQ54= +go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238 h1:DP9o+TnNeS34EmxZ/zqZ4px3DgL8en/2RL4EsiSd4GU= +go.sia.tech/hostd v1.1.3-0.20240903081107-6e044db95238/go.mod h1:InmB5LdO6EP+ZW9uolUCO+zh+zVdbJF3iCgU7xokJxQ= +go.sia.tech/jape v0.12.1 h1:xr+o9V8FO8ScRqbSaqYf9bjj1UJ2eipZuNcI1nYousU= +go.sia.tech/jape v0.12.1/go.mod h1:wU+h6Wh5olDjkPXjF0tbZ1GDgoZ6VTi4naFw91yyWC4= go.sia.tech/mux v1.2.0 h1:ofa1Us9mdymBbGMY2XH/lSpY8itFsKIo/Aq8zwe+GHU= go.sia.tech/mux v1.2.0/go.mod h1:Yyo6wZelOYTyvrHmJZ6aQfRoer3o4xyKQ4NmQLJrBSo= go.sia.tech/web v0.0.0-20240610131903-5611d44a533e h1:oKDz6rUExM4a4o6n/EXDppsEka2y/+/PgFOZmHWQRSI= go.sia.tech/web v0.0.0-20240610131903-5611d44a533e/go.mod h1:4nyDlycPKxTlCqvOeRO0wUfXxyzWCEE7+2BRrdNqvWk= -go.sia.tech/web/renterd v0.60.1 h1:KJ/DgYKES29HoRd4/XY/G9CzTrHpMANCRCffIYc6Sxg= -go.sia.tech/web/renterd v0.60.1/go.mod h1:SWwKoAJvLxiHjTXsNPKX3RLiQzJb/vxwcpku3F78MO8= +go.sia.tech/web/renterd v0.61.0 h1:DmSGkpbaqodKvP4Mn79lLeZF2xqcWFQRrT2xPuLf8Uo= +go.sia.tech/web/renterd v0.61.0/go.mod h1:VWfvYtmdJGfrqSoNRO3NoOjUij+RB/xNO4M0HqIf1+M= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -95,31 +80,31 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= -golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -132,5 +117,5 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= lukechampine.com/frand v1.4.2 h1:RzFIpOvkMXuPMBb9maa4ND4wjBn71E1Jpf8BzJHMaVw= lukechampine.com/frand v1.4.2/go.mod h1:4S/TM2ZgrKejMcKMbeLjISpJMO+/eZ1zu3vYX9dtj3s= -nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= -nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +nhooyr.io/websocket v1.8.17 h1:KEVeLJkUywCKVsnLIDlD/5gtayKp8VoCkksHCGGfT9Y= +nhooyr.io/websocket v1.8.17/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= diff --git a/internal/bus/accounts.go b/internal/bus/accounts.go deleted file mode 100644 index 666398e57..000000000 --- a/internal/bus/accounts.go +++ /dev/null @@ -1,351 +0,0 @@ -package bus - -import ( - "context" - "errors" - "fmt" - "math" - "math/big" - "sync" - "time" - - rhpv3 "go.sia.tech/core/rhp/v3" - "go.sia.tech/core/types" - "go.sia.tech/renterd/api" - "go.uber.org/zap" - "lukechampine.com/frand" -) - -var ( - ErrAccountNotFound = errors.New("account doesn't exist") -) - -type ( - AccountStore interface { - Accounts(context.Context) ([]api.Account, error) - SaveAccounts(context.Context, []api.Account) error - SetUncleanShutdown(context.Context) error - } -) - -type ( - AccountMgr struct { - s AccountStore - logger *zap.SugaredLogger - - mu sync.Mutex - byID map[rhpv3.Account]*account - } - - account struct { - mu sync.Mutex - locks map[uint64]*accountLock - requiresSyncTime time.Time - api.Account - - rwmu sync.RWMutex - } - - accountLock struct { - heldByID uint64 - unlock func() - timer *time.Timer - } -) - -// NewAccountManager creates a new account manager. It will load all accounts -// from the given store and mark the shutdown as unclean. When Shutdown is -// called it will save all accounts. -func NewAccountManager(ctx context.Context, s AccountStore, logger *zap.Logger) (*AccountMgr, error) { - logger = logger.Named("accounts") - - // load saved accounts - saved, err := s.Accounts(ctx) - if err != nil { - return nil, err - } - - // wrap with a lock - accounts := make(map[rhpv3.Account]*account, len(saved)) - for _, acc := range saved { - account := &account{ - Account: acc, - locks: map[uint64]*accountLock{}, - } - accounts[account.ID] = account - } - - // mark the shutdown as unclean, this will be overwritten on shutdown - err = s.SetUncleanShutdown(ctx) - if err != nil { - return nil, fmt.Errorf("failed to mark account shutdown as unclean: %w", err) - } - - return &AccountMgr{ - s: s, - logger: logger.Sugar(), - - byID: accounts, - }, nil -} - -// Account returns the account with the given id. -func (a *AccountMgr) Account(id rhpv3.Account, hostKey types.PublicKey) (api.Account, error) { - acc := a.account(id, hostKey) - acc.mu.Lock() - defer acc.mu.Unlock() - return acc.convert(), nil -} - -// Accounts returns all accounts. -func (a *AccountMgr) Accounts() []api.Account { - a.mu.Lock() - defer a.mu.Unlock() - accounts := make([]api.Account, 0, len(a.byID)) - for _, acc := range a.byID { - acc.mu.Lock() - accounts = append(accounts, acc.convert()) - acc.mu.Unlock() - } - return accounts -} - -// AddAmount applies the provided amount to an account through addition. So the -// input can be both a positive or negative number depending on whether a -// withdrawal or deposit is recorded. If the account doesn't exist, it is -// created. -func (a *AccountMgr) AddAmount(id rhpv3.Account, hk types.PublicKey, amt *big.Int) { - acc := a.account(id, hk) - - // Update balance. - acc.mu.Lock() - balanceBefore := acc.Balance.String() - acc.Balance.Add(acc.Balance, amt) - - // Log deposits. - if amt.Cmp(big.NewInt(0)) > 0 { - a.logger.Infow("account balance was increased", - "account", acc.ID, - "host", acc.HostKey.String(), - "amt", amt.String(), - "balanceBefore", balanceBefore, - "balanceAfter", acc.Balance.String()) - } - acc.mu.Unlock() -} - -func (a *AccountMgr) LockAccount(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey, exclusive bool, duration time.Duration) (api.Account, uint64) { - acc := a.account(id, hostKey) - - // Try to lock the account. - if exclusive { - acc.rwmu.Lock() - } else { - acc.rwmu.RLock() - } - - // Create a new lock with an unlock function that can only be called once. - var once sync.Once - heldByID := frand.Uint64n(math.MaxUint64) + 1 - lock := &accountLock{ - heldByID: heldByID, - unlock: func() { - once.Do(func() { - if exclusive { - acc.rwmu.Unlock() - } else { - acc.rwmu.RUnlock() - } - acc.mu.Lock() - delete(acc.locks, heldByID) - acc.mu.Unlock() - }) - }, - } - - // Spawn a timer that will eventually unlock the lock. - lock.timer = time.AfterFunc(duration, lock.unlock) - - acc.mu.Lock() - acc.locks[lock.heldByID] = lock - account := acc.convert() - acc.mu.Unlock() - return account, lock.heldByID -} - -// ResetDrift resets the drift on an account. -func (a *AccountMgr) ResetDrift(id rhpv3.Account) error { - a.mu.Lock() - account, exists := a.byID[id] - if !exists { - a.mu.Unlock() - return ErrAccountNotFound - } - a.mu.Unlock() - - account.mu.Lock() - driftBefore := account.Drift.String() - account.mu.Unlock() - - account.resetDrift() - - a.logger.Infow("account drift was reset", - zap.Stringer("account", account.ID), - zap.Stringer("host", account.HostKey), - zap.String("driftBefore", driftBefore)) - - return nil -} - -// SetBalance sets the balance of a given account to the provided amount. If the -// account doesn't exist, it is created. -// If an account hasn't been saved successfully upon the last shutdown, no drift -// will be added upon the first call to SetBalance. -func (a *AccountMgr) SetBalance(id rhpv3.Account, hk types.PublicKey, balance *big.Int) { - acc := a.account(id, hk) - - acc.mu.Lock() - defer acc.mu.Unlock() - - // save previous values - prevBalance := new(big.Int).Set(acc.Balance) - prevDrift := new(big.Int).Set(acc.Drift) - - // update balance - acc.Balance.Set(balance) - - // update drift - drift := new(big.Int).Sub(balance, prevBalance) - if acc.CleanShutdown { - acc.Drift = acc.Drift.Add(acc.Drift, drift) - } - - // reset fields - acc.CleanShutdown = true - acc.RequiresSync = false - - // log account changes - a.logger.Infow("account balance was reset", - zap.Stringer("account", acc.ID), - zap.Stringer("host", acc.HostKey), - zap.Stringer("balanceBefore", prevBalance), - zap.Stringer("balanceAfter", balance), - zap.Stringer("driftBefore", prevDrift), - zap.Stringer("driftAfter", acc.Drift), - zap.Bool("firstDrift", acc.Drift.Cmp(big.NewInt(0)) != 0 && prevDrift.Cmp(big.NewInt(0)) == 0), - zap.Bool("cleanshutdown", acc.CleanShutdown), - zap.Stringer("drift", drift)) -} - -// ScheduleSync sets the requiresSync flag of an account. -func (a *AccountMgr) ScheduleSync(id rhpv3.Account, hk types.PublicKey) error { - acc := a.account(id, hk) - acc.mu.Lock() - // Only update the sync flag to 'true' if some time has passed since the - // last time it was set. That way we avoid multiple workers setting it after - // failing at the same time, causing multiple syncs in the process. - if time.Since(acc.requiresSyncTime) < 30*time.Second { - acc.mu.Unlock() - return api.ErrRequiresSyncSetRecently - } - acc.RequiresSync = true - acc.requiresSyncTime = time.Now() - - // Log scheduling a sync. - a.logger.Infow("account sync was scheduled", - "account", acc.ID, - "host", acc.HostKey.String(), - "balance", acc.Balance.String(), - "drift", acc.Drift.String()) - acc.mu.Unlock() - - a.mu.Lock() - account, exists := a.byID[id] - defer a.mu.Unlock() - if !exists { - return ErrAccountNotFound - } - account.resetDrift() - return nil -} - -func (a *AccountMgr) Shutdown(ctx context.Context) error { - accounts := a.Accounts() - err := a.s.SaveAccounts(ctx, accounts) - if err != nil { - a.logger.Errorf("failed to save %v accounts: %v", len(accounts), err) - return err - } - - a.logger.Infof("successfully saved %v accounts", len(accounts)) - return nil -} - -// UnlockAccount unlocks an account with the given lock id. -func (a *AccountMgr) UnlockAccount(id rhpv3.Account, lockID uint64) error { - a.mu.Lock() - acc, exists := a.byID[id] - if !exists { - a.mu.Unlock() - return ErrAccountNotFound - } - a.mu.Unlock() - - // Get lock. - acc.mu.Lock() - lock, exists := acc.locks[lockID] - acc.mu.Unlock() - if !exists { - return fmt.Errorf("account lock with id %v not found", lockID) - } - - // Stop timer. - lock.timer.Stop() - select { - case <-lock.timer.C: - default: - } - - // Unlock - lock.unlock() - return nil -} - -func (a *AccountMgr) account(id rhpv3.Account, hk types.PublicKey) *account { - a.mu.Lock() - defer a.mu.Unlock() - - acc, exists := a.byID[id] - if !exists { - acc = &account{ - Account: api.Account{ - ID: id, - CleanShutdown: false, - HostKey: hk, - Balance: big.NewInt(0), - Drift: big.NewInt(0), - RequiresSync: true, // initial sync - }, - locks: map[uint64]*accountLock{}, - } - a.byID[id] = acc - } - return acc -} - -func (a *account) convert() api.Account { - return api.Account{ - ID: a.ID, - Balance: new(big.Int).Set(a.Balance), - CleanShutdown: a.CleanShutdown, - Drift: new(big.Int).Set(a.Drift), - HostKey: a.HostKey, - RequiresSync: a.RequiresSync, - } -} - -func (a *account) resetDrift() { - a.mu.Lock() - defer a.mu.Unlock() - a.Drift.SetInt64(0) -} diff --git a/internal/bus/accounts_test.go b/internal/bus/accounts_test.go deleted file mode 100644 index 38d062e75..000000000 --- a/internal/bus/accounts_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package bus - -import ( - "context" - "testing" - "time" - - rhpv3 "go.sia.tech/core/rhp/v3" - "go.sia.tech/core/types" - "go.sia.tech/renterd/api" - "go.uber.org/zap" - "lukechampine.com/frand" -) - -type mockAccStore struct{} - -func (m *mockAccStore) Accounts(context.Context) ([]api.Account, error) { return nil, nil } -func (m *mockAccStore) SaveAccounts(context.Context, []api.Account) error { return nil } -func (m *mockAccStore) SetUncleanShutdown(context.Context) error { return nil } - -func TestAccountLocking(t *testing.T) { - eas := &mockAccStore{} - accounts, err := NewAccountManager(context.Background(), eas, zap.NewNop()) - if err != nil { - t.Fatal(err) - } - - var accountID rhpv3.Account - frand.Read(accountID[:]) - var hk types.PublicKey - frand.Read(hk[:]) - - // Lock account non-exclusively a few times. - var lockIDs []uint64 - for i := 0; i < 10; i++ { - acc, lockID := accounts.LockAccount(context.Background(), accountID, hk, false, 30*time.Second) - if lockID == 0 { - t.Fatal("invalid lock id") - } - if acc.ID != accountID { - t.Fatal("wrong id") - } - lockIDs = append(lockIDs, lockID) - } - - // Unlock them again. - for _, lockID := range lockIDs { - err := accounts.UnlockAccount(accountID, lockID) - if err != nil { - t.Fatal("failed to unlock", err) - } - } - - // Acquire exclusive lock. - _, exclusiveLockID := accounts.LockAccount(context.Background(), accountID, hk, true, 30*time.Second) - - // Try acquiring a non-exclusive one. - var sharedLockID uint64 - done := make(chan struct{}) - go func() { - defer close(done) - _, sharedLockID = accounts.LockAccount(context.Background(), accountID, hk, true, 30*time.Second) - }() - - // Wait some time to confirm it's not possible. - select { - case <-done: - t.Fatal("lock was acquired even though exclusive one was held") - case <-time.After(100 * time.Millisecond): - } - - // Unlock exclusive one. - if err := accounts.UnlockAccount(accountID, exclusiveLockID); err != nil { - t.Fatal(err) - } - // Doing so again should fail. - if err := accounts.UnlockAccount(accountID, exclusiveLockID); err == nil { - t.Fatal("should fail") - } - - // Other lock should be acquired now. - select { - case <-time.After(100 * time.Millisecond): - t.Fatal("other lock wasn't acquired") - case <-done: - } - - // Unlock the other lock too. - if err := accounts.UnlockAccount(accountID, sharedLockID); err != nil { - t.Fatal(err) - } - - // Locks should be empty since they clean up after themselves. - acc := accounts.account(accountID, hk) - if len(acc.locks) != 0 { - t.Fatal("should not have any locks", len(acc.locks)) - } -} diff --git a/internal/bus/forex.go b/internal/bus/forex.go index b6544b911..122056949 100644 --- a/internal/bus/forex.go +++ b/internal/bus/forex.go @@ -2,11 +2,10 @@ package bus import ( "context" - "encoding/json" - "errors" "fmt" - "io" "net/http" + + "go.sia.tech/renterd/internal/utils" ) type ( @@ -27,25 +26,6 @@ func (f *client) SiacoinExchangeRate(ctx context.Context, currency string) (rate } req.Header.Set("Accept", "application/json") - // create http client - resp, err := http.DefaultClient.Do(req) - if err != nil { - return 0, fmt.Errorf("failed to send request: %w", err) - } - defer resp.Body.Close() - - // check status code - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - var errorMessage string - if err := json.NewDecoder(io.LimitReader(resp.Body, 1024)).Decode(&errorMessage); err != nil { - return 0, fmt.Errorf("unexpected status code: %d", resp.StatusCode) - } - return 0, errors.New(errorMessage) - } - - // decode exchange rate - if err := json.NewDecoder(resp.Body).Decode(&rate); err != nil { - return 0, fmt.Errorf("failed to decode response: %w", err) - } + _, _, err = utils.DoRequest(req, &rate) return } diff --git a/internal/bus/pinmanager.go b/internal/bus/pinmanager.go index c128a8392..32e283812 100644 --- a/internal/bus/pinmanager.go +++ b/internal/bus/pinmanager.go @@ -66,8 +66,11 @@ func NewPinManager(alerts alerts.Alerter, broadcaster webhooks.Broadcaster, s St } // start the pin manager - pm.run() - + pm.wg.Add(1) + go func() { + pm.run() + pm.wg.Done() + }() return pm } @@ -146,35 +149,30 @@ func (pm *pinManager) rateExceedsThreshold(threshold float64) bool { } func (pm *pinManager) run() { - pm.wg.Add(1) - go func() { - defer pm.wg.Done() - - t := time.NewTicker(pm.updateInterval) - defer t.Stop() - - var forced bool - for { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) - err := pm.updatePrices(ctx, forced) - if err != nil { - pm.logger.Warn("failed to update prices", zap.Error(err)) - pm.a.RegisterAlert(ctx, newPricePinningFailedAlert(err)) - } else { - pm.a.DismissAlerts(ctx, alertPricePinningID) - } - cancel() - - forced = false - select { - case <-pm.closedChan: - return - case <-pm.triggerChan: - forced = true - case <-t.C: - } + t := time.NewTicker(pm.updateInterval) + defer t.Stop() + + var forced bool + for { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + err := pm.updatePrices(ctx, forced) + if err != nil { + pm.logger.Warn("failed to update prices", zap.Error(err)) + pm.a.RegisterAlert(ctx, newPricePinningFailedAlert(err)) + } else { + pm.a.DismissAlerts(ctx, alertPricePinningID) } - }() + cancel() + + forced = false + select { + case <-pm.closedChan: + return + case <-pm.triggerChan: + forced = true + case <-t.C: + } + } } func (pm *pinManager) updateAutopilotSettings(ctx context.Context, autopilotID string, pins api.AutopilotPins, rate decimal.Decimal) error { diff --git a/internal/gouging/gouging.go b/internal/gouging/gouging.go index 8e729247d..aadfdd57f 100644 --- a/internal/gouging/gouging.go +++ b/internal/gouging/gouging.go @@ -32,7 +32,7 @@ const ( ) var ( - errHostSettingsGouging = errors.New("host settings gouging detected") + ErrHostSettingsGouging = errors.New("host settings gouging detected") ErrPriceTableGouging = errors.New("price table gouging detected") ) @@ -243,7 +243,7 @@ func checkContractGougingRHPv2(period, renewWindow *uint64, hs *rhpv2.HostSettin err = checkContractGouging(*period, *renewWindow, hs.MaxDuration, hs.WindowSize) if err != nil { - err = fmt.Errorf("%w: %v", errHostSettingsGouging, err) + err = fmt.Errorf("%w: %v", ErrHostSettingsGouging, err) } return } @@ -290,14 +290,14 @@ func checkPruneGougingRHPv2(gs api.GougingSettings, hs *rhpv2.HostSettings) erro hs.UploadBandwidthPrice, ) if overflow { - return fmt.Errorf("%w: overflow detected when computing sector download price", errHostSettingsGouging) + return fmt.Errorf("%w: overflow detected when computing sector download price", ErrHostSettingsGouging) } dpptb, overflow := sectorDownloadPrice.Mul64WithOverflow(uint64(bytesPerTB) / rhpv2.SectorSize) // sectors per TB if overflow { - return fmt.Errorf("%w: overflow detected when computing download price per TiB", errHostSettingsGouging) + return fmt.Errorf("%w: overflow detected when computing download price per TiB", ErrHostSettingsGouging) } if !gs.MaxDownloadPrice.IsZero() && dpptb.Cmp(gs.MaxDownloadPrice) > 0 { - return fmt.Errorf("%w: cost per TiB exceeds max dl price: %v > %v", errHostSettingsGouging, dpptb, gs.MaxDownloadPrice) + return fmt.Errorf("%w: cost per TiB exceeds max dl price: %v > %v", ErrHostSettingsGouging, dpptb, gs.MaxDownloadPrice) } return nil } diff --git a/internal/worker/dialer.go b/internal/rhp/dialer.go similarity index 99% rename from internal/worker/dialer.go rename to internal/rhp/dialer.go index 56e51ce42..b2f87b32e 100644 --- a/internal/worker/dialer.go +++ b/internal/rhp/dialer.go @@ -1,4 +1,4 @@ -package worker +package rhp import ( "context" diff --git a/internal/rhp/v2/rhp.go b/internal/rhp/v2/rhp.go index 9786bf4c8..14c2b48c7 100644 --- a/internal/rhp/v2/rhp.go +++ b/internal/rhp/v2/rhp.go @@ -13,6 +13,7 @@ import ( rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" + "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/gouging" "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" @@ -67,12 +68,14 @@ var ( ErrNoSectorsToPrune = errors.New("no sectors to prune") ) +type ( + PrunableRootsFn = func(fcid types.FileContractID, roots []types.Hash256) (indices []uint64, err error) +) + type ( Dialer interface { Dial(ctx context.Context, hk types.PublicKey, address string) (net.Conn, error) } - - PrepareFormFn func(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, discard func(types.Transaction), err error) ) type Client struct { @@ -157,76 +160,42 @@ func (c *Client) Settings(ctx context.Context, hostKey types.PublicKey, hostIP s return } -func (c *Client) FormContract(ctx context.Context, renterAddress types.Address, renterKey types.PrivateKey, hostKey types.PublicKey, hostIP string, renterFunds, hostCollateral types.Currency, endHeight uint64, gougingChecker gouging.Checker, prepareForm PrepareFormFn) (contract rhpv2.ContractRevision, txnSet []types.Transaction, err error) { +func (c *Client) FormContract(ctx context.Context, hostKey types.PublicKey, hostIP string, renterKey types.PrivateKey, txnSet []types.Transaction) (contract rhpv2.ContractRevision, fullTxnSet []types.Transaction, err error) { err = c.withTransport(ctx, hostKey, hostIP, func(t *rhpv2.Transport) (err error) { - settings, err := rpcSettings(ctx, t) - if err != nil { - return err - } - - if breakdown := gougingChecker.CheckSettings(settings); breakdown.Gouging() { - return fmt.Errorf("failed to form contract, gouging check failed: %v", breakdown) - } - - renterTxnSet, discardTxn, err := prepareForm(ctx, renterAddress, renterKey.PublicKey(), renterFunds, hostCollateral, hostKey, settings, endHeight) - if err != nil { - return err - } - - contract, txnSet, err = rpcFormContract(ctx, t, renterKey, renterTxnSet) - if err != nil { - discardTxn(renterTxnSet[len(renterTxnSet)-1]) - return err - } + contract, fullTxnSet, err = rpcFormContract(ctx, t, renterKey, txnSet) return }) return } -func (c *Client) PruneContract(ctx context.Context, renterKey types.PrivateKey, gougingChecker gouging.Checker, hostIP string, hostKey types.PublicKey, fcid types.FileContractID, lastKnownRevisionNumber uint64, toKeep []types.Hash256) (revision *types.FileContractRevision, deleted, remaining uint64, cost types.Currency, err error) { +func (c *Client) PruneContract(ctx context.Context, renterKey types.PrivateKey, gougingChecker gouging.Checker, hostIP string, hostKey types.PublicKey, fcid types.FileContractID, lastKnownRevisionNumber uint64, diffRootsFn PrunableRootsFn) (revision *types.FileContractRevision, spending api.ContractSpending, deleted, remaining uint64, err error) { + log := c.logger.Named("performContractPruning") err = c.withTransport(ctx, hostKey, hostIP, func(t *rhpv2.Transport) error { return c.withRevisionV2(renterKey, gougingChecker, t, fcid, lastKnownRevisionNumber, func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) (err error) { - // fetch roots - got, fetchCost, err := c.fetchContractRoots(t, renterKey, &rev, settings) - if err != nil { - return err - } - - // update cost and revision - cost = cost.Add(fetchCost) + // reference the revision revision = &rev.Revision - keep := make(map[types.Hash256]struct{}) - for _, root := range toKeep { - keep[root] = struct{}{} - } - - // collect indices for roots we want to prune + // fetch roots to delete var indices []uint64 - for i, root := range got { - if _, wanted := keep[root]; wanted { - delete(keep, root) // prevent duplicates - continue - } - indices = append(indices, uint64(i)) - } - if len(indices) == 0 { - return fmt.Errorf("%w: database holds %d, contract contains %d", ErrNoSectorsToPrune, len(toKeep), len(got)) + indices, spending.SectorRoots, err = c.prunableContractRoots(t, renterKey, &rev, settings, func(fcid types.FileContractID, roots []types.Hash256) (indices []uint64, err error) { + startt := time.Now() + defer func() { + log.Debugf("batch diff roots took %v", time.Since(startt)) + }() + return diffRootsFn(fcid, roots) + }) + if err != nil { + return err + } else if len(indices) == 0 { + return ErrNoSectorsToPrune } // delete the roots from the contract - var deleteCost types.Currency - deleted, deleteCost, err = c.deleteContractRoots(t, renterKey, &rev, settings, indices) + deleted, spending.Deletions, err = c.deleteContractRoots(t, renterKey, &rev, settings, indices) if deleted < uint64(len(indices)) { remaining = uint64(len(indices)) - deleted } - // update cost and revision - if deleted > 0 { - cost = cost.Add(deleteCost) - revision = &rev.Revision - } - // return sizes instead of number of roots deleted *= rhpv2.SectorSize remaining *= rhpv2.SectorSize @@ -423,99 +392,144 @@ func (c *Client) deleteContractRoots(t *rhpv2.Transport, renterKey types.Private return } -func (c *Client) fetchContractRoots(t *rhpv2.Transport, renterKey types.PrivateKey, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings) (roots []types.Hash256, cost types.Currency, _ error) { - // download the full set of SectorRoots +func (c *Client) prunableContractRoots(t *rhpv2.Transport, renterKey types.PrivateKey, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, prunableRootsFn PrunableRootsFn) (indices []uint64, cost types.Currency, _ error) { numsectors := rev.NumSectors() for offset := uint64(0); offset < numsectors; { + // calculate the batch size n := batchSizeFetchSectors if offset+n > numsectors { n = numsectors - offset } - // calculate the cost - batchCost, _ := settings.RPCSectorRootsCost(offset, n).Total() - - // TODO: remove once host network is updated - if utils.VersionCmp(settings.Version, "1.6.0") < 0 { - // calculate the response size - proofSize := rhpv2.RangeProofSize(numsectors, offset, offset+n) - responseSize := (proofSize + n) * 32 - if responseSize < minMessageSize { - responseSize = minMessageSize - } - batchCost = settings.BaseRPCPrice.Add(settings.DownloadBandwidthPrice.Mul64(responseSize)) - batchCost = batchCost.Mul64(2) // generous leeway - } - - // check funds - if rev.RenterFunds().Cmp(batchCost) < 0 { - return nil, types.ZeroCurrency, ErrInsufficientFunds - } - - // update the revision number - if rev.Revision.RevisionNumber == math.MaxUint64 { - return nil, types.ZeroCurrency, ErrContractFinalized + // fetch the batch + batch, batchCost, err := c.fetchContractRootsBatch(t, renterKey, rev, settings, offset, n) + if err != nil { + return nil, types.ZeroCurrency, err } - rev.Revision.RevisionNumber++ - // update the revision outputs - newRevision, err := updatedRevision(rev.Revision, batchCost, types.ZeroCurrency) + // fetch prunable roots for this batch + prunable, err := prunableRootsFn(rev.ID(), batch) if err != nil { return nil, types.ZeroCurrency, err } - // build the sector roots request - revisionHash := hashRevision(newRevision) - req := &rhpv2.RPCSectorRootsRequest{ - RootOffset: uint64(offset), - NumRoots: uint64(n), - - RevisionNumber: rev.Revision.RevisionNumber, - ValidProofValues: []types.Currency{ - newRevision.ValidProofOutputs[0].Value, - newRevision.ValidProofOutputs[1].Value, - }, - MissedProofValues: []types.Currency{ - newRevision.MissedProofOutputs[0].Value, - newRevision.MissedProofOutputs[1].Value, - newRevision.MissedProofOutputs[2].Value, - }, - Signature: renterKey.SignHash(revisionHash), + // append the roots, make sure to take the offset into account + for _, index := range prunable { + indices = append(indices, index+offset) } + offset += n - // execute the sector roots RPC - var rootsResp rhpv2.RPCSectorRootsResponse - if err := t.WriteRequest(rhpv2.RPCSectorRootsID, req); err != nil { - return nil, types.ZeroCurrency, err - } else if err := t.ReadResponse(&rootsResp, maxMerkleProofResponseSize); err != nil { - return nil, types.ZeroCurrency, fmt.Errorf("couldn't read sector roots response: %w", err) - } + // update the cost + cost = cost.Add(batchCost) + } + return +} - // verify the host signature - if !rev.HostKey().VerifyHash(revisionHash, rootsResp.Signature) { - return nil, types.ZeroCurrency, errors.New("host's signature is invalid") +func (c *Client) fetchContractRoots(t *rhpv2.Transport, renterKey types.PrivateKey, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings) (roots []types.Hash256, cost types.Currency, _ error) { + numsectors := rev.NumSectors() + for offset := uint64(0); offset < numsectors; { + // calculate the batch size + n := batchSizeFetchSectors + if offset+n > numsectors { + n = numsectors - offset } - rev.Signatures[0].Signature = req.Signature[:] - rev.Signatures[1].Signature = rootsResp.Signature[:] - - // verify the proof - if uint64(len(rootsResp.SectorRoots)) != n { - return nil, types.ZeroCurrency, fmt.Errorf("couldn't verify contract roots proof, host %v, version %v, err: number of roots does not match range %d != %d (num sectors: %d rev size: %d offset: %d)", rev.HostKey(), settings.Version, len(rootsResp.SectorRoots), n, numsectors, rev.Revision.Filesize, offset) - } else if !rhpv2.VerifySectorRangeProof(rootsResp.MerkleProof, rootsResp.SectorRoots, offset, offset+n, numsectors, rev.Revision.FileMerkleRoot) { - return nil, types.ZeroCurrency, fmt.Errorf("couldn't verify contract roots proof, host %v, version %v; %w", rev.HostKey(), settings.Version, ErrInvalidMerkleProof) + + // fetch the batch + batch, batchCost, err := c.fetchContractRootsBatch(t, renterKey, rev, settings, offset, n) + if err != nil { + return nil, types.ZeroCurrency, err } - // append roots - roots = append(roots, rootsResp.SectorRoots...) + // append the roots + roots = append(roots, batch...) offset += n - // update revision - rev.Revision = newRevision + // update the cost cost = cost.Add(batchCost) } return } +func (c *Client) fetchContractRootsBatch(t *rhpv2.Transport, renterKey types.PrivateKey, rev *rhpv2.ContractRevision, settings rhpv2.HostSettings, offset, limit uint64) ([]types.Hash256, types.Currency, error) { + // calculate the cost + cost, _ := settings.RPCSectorRootsCost(offset, limit).Total() + + // TODO: remove once host network is updated + if utils.VersionCmp(settings.Version, "1.6.0") < 0 { + // calculate the response size + proofSize := rhpv2.RangeProofSize(rev.NumSectors(), offset, offset+limit) + responseSize := (proofSize + limit) * 32 + if responseSize < minMessageSize { + responseSize = minMessageSize + } + cost = settings.BaseRPCPrice.Add(settings.DownloadBandwidthPrice.Mul64(responseSize)) + cost = cost.Mul64(2) // generous leeway + } + + // check funds + if rev.RenterFunds().Cmp(cost) < 0 { + return nil, types.ZeroCurrency, ErrInsufficientFunds + } + + // update the revision number + if rev.Revision.RevisionNumber == math.MaxUint64 { + return nil, types.ZeroCurrency, ErrContractFinalized + } + rev.Revision.RevisionNumber++ + + // update the revision outputs + newRevision, err := updatedRevision(rev.Revision, cost, types.ZeroCurrency) + if err != nil { + return nil, types.ZeroCurrency, err + } + + // build the sector roots request + revisionHash := hashRevision(newRevision) + req := &rhpv2.RPCSectorRootsRequest{ + RootOffset: offset, + NumRoots: limit, + + RevisionNumber: rev.Revision.RevisionNumber, + ValidProofValues: []types.Currency{ + newRevision.ValidProofOutputs[0].Value, + newRevision.ValidProofOutputs[1].Value, + }, + MissedProofValues: []types.Currency{ + newRevision.MissedProofOutputs[0].Value, + newRevision.MissedProofOutputs[1].Value, + newRevision.MissedProofOutputs[2].Value, + }, + Signature: renterKey.SignHash(revisionHash), + } + + // execute the sector roots RPC + var rootsResp rhpv2.RPCSectorRootsResponse + if err := t.WriteRequest(rhpv2.RPCSectorRootsID, req); err != nil { + return nil, types.ZeroCurrency, err + } else if err := t.ReadResponse(&rootsResp, maxMerkleProofResponseSize); err != nil { + return nil, types.ZeroCurrency, fmt.Errorf("couldn't read sector roots response: %w", err) + } + + // verify the host signature + if !rev.HostKey().VerifyHash(revisionHash, rootsResp.Signature) { + return nil, cost, errors.New("host's signature is invalid") + } + rev.Signatures[0].Signature = req.Signature[:] + rev.Signatures[1].Signature = rootsResp.Signature[:] + + // verify the proof + if uint64(len(rootsResp.SectorRoots)) != limit { + return nil, cost, fmt.Errorf("couldn't verify contract roots proof, host %v, version %v, err: number of roots does not match range %d != %d (num sectors: %d rev size: %d offset: %d)", rev.HostKey(), settings.Version, len(rootsResp.SectorRoots), limit, rev.NumSectors(), rev.Revision.Filesize, offset) + } else if !rhpv2.VerifySectorRangeProof(rootsResp.MerkleProof, rootsResp.SectorRoots, offset, offset+limit, rev.NumSectors(), rev.Revision.FileMerkleRoot) { + return nil, cost, fmt.Errorf("couldn't verify contract roots proof, host %v, version %v; %w", rev.HostKey(), settings.Version, ErrInvalidMerkleProof) + } + + // update revision + rev.Revision = newRevision + + return rootsResp.SectorRoots, cost, nil +} + func (w *Client) withRevisionV2(renterKey types.PrivateKey, gougingChecker gouging.Checker, t *rhpv2.Transport, fcid types.FileContractID, lastKnownRevisionNumber uint64, fn func(t *rhpv2.Transport, rev rhpv2.ContractRevision, settings rhpv2.HostSettings) error) error { // execute lock RPC var lockResp rhpv2.RPCLockResponse @@ -569,7 +583,7 @@ func (w *Client) withRevisionV2(renterKey types.PrivateKey, gougingChecker gougi // perform gouging checks on settings if breakdown := gougingChecker.CheckSettings(settings); breakdown.Gouging() { - return fmt.Errorf("failed to prune contract: %v", breakdown) + return fmt.Errorf("%w: %v", gouging.ErrHostSettingsGouging, breakdown) } return fn(t, rev, settings) diff --git a/internal/rhp/v3/rhp.go b/internal/rhp/v3/rhp.go index 5ae5d9972..c723d4117 100644 --- a/internal/rhp/v3/rhp.go +++ b/internal/rhp/v3/rhp.go @@ -159,21 +159,21 @@ func (c *Client) FundAccount(ctx context.Context, rev *types.FileContractRevisio }) } -func (c *Client) Renew(ctx context.Context, rrr api.RHPRenewRequest, gougingChecker gouging.Checker, renewer PrepareRenewFunc, signer SignFunc, rev types.FileContractRevision, renterKey types.PrivateKey) (newRev rhpv2.ContractRevision, txnSet []types.Transaction, contractPrice, fundAmount types.Currency, err error) { - err = c.tpool.withTransport(ctx, rrr.HostKey, rrr.SiamuxAddr, func(ctx context.Context, t *transportV3) error { - newRev, txnSet, contractPrice, fundAmount, err = rpcRenew(ctx, rrr, gougingChecker, renewer, signer, t, rev, renterKey) +func (c *Client) Renew(ctx context.Context, gc gouging.Checker, rev types.FileContractRevision, renterKey types.PrivateKey, hostKey types.PublicKey, hostSiamuxAddr string, renewTxnFn PrepareRenewFn, signTxnFn SignTxnFn) (newRev rhpv2.ContractRevision, txnSet []types.Transaction, contractPrice, fundAmount types.Currency, err error) { + err = c.tpool.withTransport(ctx, hostKey, hostSiamuxAddr, func(ctx context.Context, t *transportV3) error { + newRev, txnSet, contractPrice, fundAmount, err = rpcRenew(ctx, t, gc, rev, renterKey, renewTxnFn, signTxnFn) return err }) return } -func (c *Client) SyncAccount(ctx context.Context, rev *types.FileContractRevision, hk types.PublicKey, siamuxAddr string, accID rhpv3.Account, pt rhpv3.SettingsID, rk types.PrivateKey) (balance types.Currency, _ error) { +func (c *Client) SyncAccount(ctx context.Context, rev *types.FileContractRevision, hk types.PublicKey, siamuxAddr string, accID rhpv3.Account, pt rhpv3.HostPriceTable, rk types.PrivateKey) (balance types.Currency, _ error) { return balance, c.tpool.withTransport(ctx, hk, siamuxAddr, func(ctx context.Context, t *transportV3) error { - payment, err := payByContract(rev, types.NewCurrency64(1), accID, rk) + payment, err := payByContract(rev, pt.AccountBalanceCost, accID, rk) if err != nil { return err } - balance, err = rpcAccountBalance(ctx, t, &payment, accID, pt) + balance, err = rpcAccountBalance(ctx, t, &payment, accID, pt.UID) return err }) } diff --git a/internal/rhp/v3/rpc.go b/internal/rhp/v3/rpc.go index 746cac5fe..db9c57469 100644 --- a/internal/rhp/v3/rpc.go +++ b/internal/rhp/v3/rpc.go @@ -26,9 +26,9 @@ type ( // gouging checks before paying for the table. PriceTablePaymentFunc func(pt rhpv3.HostPriceTable) (rhpv3.PaymentMethod, error) - PrepareRenewFunc func(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (resp api.WalletPrepareRenewResponse, discard func(context.Context, types.Transaction, *error), err error) - - SignFunc func(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error + DiscardTxnFn func(err *error) + PrepareRenewFn func(pt rhpv3.HostPriceTable) (toSign []types.Hash256, txnSet []types.Transaction, fundAmount types.Currency, discard DiscardTxnFn, err error) + SignTxnFn func(txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) ) // rpcPriceTable calls the UpdatePriceTable RPC. @@ -343,7 +343,7 @@ func rpcAppendSector(ctx context.Context, t *transportV3, renterKey types.Privat return } -func rpcRenew(ctx context.Context, rrr api.RHPRenewRequest, gougingChecker gouging.Checker, prepareRenew PrepareRenewFunc, signTxn SignFunc, t *transportV3, rev types.FileContractRevision, renterKey types.PrivateKey) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { +func rpcRenew(ctx context.Context, t *transportV3, gc gouging.Checker, rev types.FileContractRevision, renterKey types.PrivateKey, prepareTxnFn PrepareRenewFn, signTxnFn SignTxnFn) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { defer utils.WrapErr(ctx, "RPCRenew", &err) s, err := t.DialStream(ctx) @@ -368,21 +368,20 @@ func rpcRenew(ctx context.Context, rrr api.RHPRenewRequest, gougingChecker gougi } // Perform gouging checks. - if breakdown := gougingChecker.Check(nil, &pt); breakdown.Gouging() { + if breakdown := gc.Check(nil, &pt); breakdown.Gouging() { return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("host gouging during renew: %v", breakdown) } // Prepare the signed transaction that contains the final revision as well // as the new contract - wprr, discard, err := prepareRenew(ctx, rev, rrr.HostAddress, rrr.RenterAddress, renterKey, rrr.RenterFunds, rrr.MinNewCollateral, rrr.MaxFundAmount, pt, rrr.EndHeight, rrr.WindowSize, rrr.ExpectedNewStorage) + toSign, txnSet, fundAmount, discard, err := prepareTxnFn(pt) if err != nil { return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to prepare renew: %w", err) } // Starting from here, we need to make sure to release the txn on error. - defer discard(ctx, wprr.TransactionSet[len(wprr.TransactionSet)-1], &err) + defer discard(&err) - txnSet := wprr.TransactionSet parents, txn := txnSet[:len(txnSet)-1], txnSet[len(txnSet)-1] // Sign only the revision and contract. We can't sign everything because @@ -436,9 +435,7 @@ func rpcRenew(ctx context.Context, rrr api.RHPRenewRequest, gougingChecker gougi WholeTransaction: true, Signatures: []uint64{0, 1}, } - if err := signTxn(ctx, &txn, wprr.ToSign, cf); err != nil { - return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, fmt.Errorf("failed to sign transaction: %w", err) - } + signTxnFn(&txn, toSign, cf) // Create a new no-op revision and sign it. noOpRevision := initialRevision(txn, rev.UnlockConditions.PublicKeys[1], renterKey.PublicKey().UnlockKey()) @@ -478,7 +475,7 @@ func rpcRenew(ctx context.Context, rrr api.RHPRenewRequest, gougingChecker gougi return rhpv2.ContractRevision{ Revision: noOpRevision, Signatures: [2]types.TransactionSignature{renterNoOpRevisionSignature, hostSigs.RevisionSignature}, - }, txnSet, pt.ContractPrice, wprr.FundAmount, nil + }, txnSet, pt.ContractPrice, fundAmount, nil } // wrapRPCErr extracts the innermost error, wraps it in either a errHost or diff --git a/internal/sql/migrations.go b/internal/sql/migrations.go index 377bf6fc5..69a57f413 100644 --- a/internal/sql/migrations.go +++ b/internal/sql/migrations.go @@ -205,6 +205,18 @@ var ( return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00015_reset_drift", log) }, }, + { + ID: "00016_account_owner", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00016_account_owner", log) + }, + }, + { + ID: "00017_unix_ms", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00017_unix_ms", log) + }, + }, } } MetricsMigrations = func(ctx context.Context, migrationsFs embed.FS, log *zap.SugaredLogger) []Migration { @@ -226,6 +238,12 @@ var ( return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00002_idx_wallet_metrics_immature", log) }, }, + { + ID: "00003_unix_ms", + Migrate: func(tx Tx) error { + return performMigration(ctx, tx, migrationsFs, dbIdentifier, "00003_unix_ms", log) + }, + }, } } ) diff --git a/internal/test/config.go b/internal/test/config.go index 1b5d926a0..e6f239adb 100644 --- a/internal/test/config.go +++ b/internal/test/config.go @@ -3,7 +3,6 @@ package test import ( "time" - "github.com/minio/minio-go/v7/pkg/credentials" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/core/types" "go.sia.tech/renterd/api" @@ -27,9 +26,9 @@ var ( Prune: false, }, Hosts: api.HostsConfig{ - MaxDowntimeHours: 10, - MinRecentScanFailures: 10, - AllowRedundantIPs: true, // allow for integration tests by default + MaxDowntimeHours: 10, + MaxConsecutiveScanFailures: 10, + AllowRedundantIPs: true, // allow for integration tests by default }, } @@ -61,5 +60,4 @@ var ( S3AccessKeyID = "TESTINGYNHUWCPKOPSYQ" S3SecretAccessKey = "Rh30BNyj+qNI4ftYRteoZbHJ3X4Ln71QtZkRXzJ9" - S3Credentials = credentials.NewStaticV4(S3AccessKeyID, S3SecretAccessKey, "") ) diff --git a/internal/test/e2e/cluster.go b/internal/test/e2e/cluster.go index 3e01e8ae7..a5d17d97b 100644 --- a/internal/test/e2e/cluster.go +++ b/internal/test/e2e/cluster.go @@ -13,7 +13,10 @@ import ( "testing" "time" - "github.com/minio/minio-go/v7" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + s3aws "github.com/aws/aws-sdk-go/service/s3" "go.sia.tech/core/consensus" "go.sia.tech/core/gateway" "go.sia.tech/core/types" @@ -61,8 +64,7 @@ type TestCluster struct { Autopilot *autopilot.Client Bus *bus.Client Worker *worker.Client - S3 *minio.Client - S3Core *minio.Core + S3 *s3TestClient workerShutdownFns []func(context.Context) error busShutdownFns []func(context.Context) error @@ -71,6 +73,7 @@ type TestCluster struct { network *consensus.Network genesisBlock types.Block + bs bus.Store cm *chain.Manager apID string dbName string @@ -87,6 +90,39 @@ type dbConfig struct { RetryTxIntervals []time.Duration } +func (tc *TestCluster) Accounts() []api.Account { + tc.tt.Helper() + accounts, err := tc.Worker.Accounts(context.Background()) + tc.tt.OK(err) + return accounts +} + +func (tc *TestCluster) ContractRoots(ctx context.Context, fcid types.FileContractID) ([]types.Hash256, error) { + tc.tt.Helper() + + c, err := tc.Bus.Contract(ctx, fcid) + if err != nil { + return nil, err + } + + var h *Host + for _, host := range tc.hosts { + if host.PublicKey() == c.HostKey { + h = host + break + } + } + if h == nil { + return nil, fmt.Errorf("no host found for contract %v", c) + } + + roots, err := h.store.SectorRoots() + if err != nil { + return nil, err + } + return roots[c.ID], nil +} + func (tc *TestCluster) ShutdownAutopilot(ctx context.Context) { tc.tt.Helper() for _, fn := range tc.autopilotShutdownFns { @@ -292,28 +328,29 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { busAddr := fmt.Sprintf("http://%s/bus", busListener.Addr().String()) workerAddr := "http://" + workerListener.Addr().String() - s3Addr := s3Listener.Addr().String() // not fully qualified path + s3Addr := "http://" + s3Listener.Addr().String() // not fully qualified path autopilotAddr := "http://" + autopilotListener.Addr().String() // Create clients. autopilotClient := autopilot.NewClient(autopilotAddr, autopilotPassword) busClient := bus.NewClient(busAddr, busPassword) workerClient := worker.NewClient(workerAddr, workerPassword) - s3Client, err := minio.New(s3Addr, &minio.Options{ - Creds: test.S3Credentials, - Secure: false, - }) - tt.OK(err) - url := s3Client.EndpointURL() - s3Core, err := minio.NewCore(url.Host+url.Path, &minio.Options{ - Creds: test.S3Credentials, - }) - tt.OK(err) + mySession := session.Must(session.NewSession()) + s3AWSClient := s3aws.New(mySession, aws.NewConfig(). + WithEndpoint(s3Addr). + WithRegion("dummy"). + WithS3ForcePathStyle(true). + WithCredentials(credentials.NewCredentials(&credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: test.S3AccessKeyID, + SecretAccessKey: test.S3SecretAccessKey, + }, + }))) // Create bus. busDir := filepath.Join(dir, "bus") - b, bShutdownFn, cm, err := newTestBus(ctx, busDir, busCfg, dbCfg, wk, logger) + b, bShutdownFn, cm, bs, err := newTestBus(ctx, busDir, busCfg, dbCfg, wk, logger) tt.OK(err) busAuth := jape.BasicAuth(busPassword) @@ -371,6 +408,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { logger: logger, network: network, genesisBlock: genesis, + bs: bs, cm: cm, tt: tt, wk: wk, @@ -378,8 +416,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { Autopilot: autopilotClient, Bus: busClient, Worker: workerClient, - S3: s3Client, - S3Core: s3Core, + S3: &s3TestClient{s3AWSClient}, workerShutdownFns: workerShutdownFns, busShutdownFns: busShutdownFns, @@ -423,7 +460,7 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { // Set the test contract set to make sure we can add objects at the // beginning of a test right away. - tt.OK(busClient.SetContractSet(ctx, test.ContractSet, []types.FileContractID{})) + tt.OK(busClient.UpdateContractSet(ctx, test.ContractSet, nil, nil)) // Update the autopilot to use test settings if !opts.skipSettingAutopilot { @@ -484,23 +521,23 @@ func newTestCluster(t *testing.T, opts testClusterOptions) *TestCluster { return cluster } -func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, pk types.PrivateKey, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, *chain.Manager, error) { +func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, pk types.PrivateKey, logger *zap.Logger) (*bus.Bus, func(ctx context.Context) error, *chain.Manager, bus.Store, error) { // create store alertsMgr := alerts.NewManager() storeCfg, err := buildStoreConfig(alertsMgr, dir, cfg.SlabBufferCompletionThreshold, cfgDb, pk, logger) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } sqlStore, err := stores.NewSQLStore(storeCfg) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } // create webhooks manager wh, err := webhooks.NewManager(sqlStore, logger) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } // hookup webhooks <-> alerts @@ -509,35 +546,35 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, // create consensus directory consensusDir := filepath.Join(dir, "consensus") if err := os.MkdirAll(consensusDir, 0700); err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } // create chain database chainPath := filepath.Join(consensusDir, "blockchain.db") bdb, err := coreutils.OpenBoltChainDB(chainPath) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } // create chain manager network, genesis := testNetwork() store, state, err := chain.NewDBStore(bdb, network, genesis) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } cm := chain.NewManager(store, state) // create wallet w, err := wallet.NewSingleAddressWallet(pk, cm, sqlStore, wallet.WithReservationDuration(cfg.UsedUTXOExpiry)) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } // create syncer, peers will reject us if our hostname is empty or // unspecified, so use loopback l, err := net.Listen("tcp", cfg.GatewayAddr) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } syncerAddr := l.Addr().String() host, port, _ := net.SplitHostPort(syncerAddr) @@ -572,11 +609,15 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, } } + // create master key - we currently derive the same key used by the workers + // to ensure contracts formed by the bus can be renewed by the autopilot + masterKey := blake2b.Sum256(append([]byte("worker"), pk...)) + // create bus announcementMaxAgeHours := time.Duration(cfg.AnnouncementMaxAgeHours) * time.Hour - b, err := bus.New(ctx, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) + b, err := bus.New(ctx, masterKey, alertsMgr, wh, cm, s, w, sqlStore, announcementMaxAgeHours, logger) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } shutdownFn := func(ctx context.Context) error { @@ -589,7 +630,7 @@ func newTestBus(ctx context.Context, dir string, cfg config.Bus, cfgDb dbConfig, syncerShutdown(ctx), ) } - return b, shutdownFn, cm, nil + return b, shutdownFn, cm, sqlStore, nil } // addStorageFolderToHosts adds a single storage folder to each host. @@ -701,7 +742,7 @@ func (c *TestCluster) WaitForAccounts() []api.Account { c.waitForHostAccounts(hostsMap) // fetch all accounts - accounts, err := c.Bus.Accounts(context.Background()) + accounts, err := c.Worker.Accounts(context.Background()) c.tt.OK(err) return accounts } @@ -879,6 +920,20 @@ func (c *TestCluster) AddHostsBlocking(n int) []*Host { return hosts } +// MineTransactions tries to mine the transactions in the transaction pool until +// it is empty. +func (c *TestCluster) MineTransactions(ctx context.Context) error { + return test.Retry(100, 100*time.Millisecond, func() error { + txns, err := c.Bus.TransactionPool(ctx) + if err != nil { + return err + } else if len(txns) > 0 { + c.MineBlocks(1) + } + return nil + }) +} + // Shutdown shuts down a TestCluster. func (c *TestCluster) Shutdown() { c.tt.Helper() @@ -898,7 +953,7 @@ func (c *TestCluster) Shutdown() { func (c *TestCluster) waitForHostAccounts(hosts map[types.PublicKey]struct{}) { c.tt.Helper() c.tt.Retry(300, 100*time.Millisecond, func() error { - accounts, err := c.Bus.Accounts(context.Background()) + accounts, err := c.Worker.Accounts(context.Background()) if err != nil { return err } @@ -1029,6 +1084,7 @@ func testDBCfg() dbConfig { func testWorkerCfg() config.Worker { return config.Worker{ + AccountsRefillInterval: time.Second, AllowPrivateIPs: true, ContractLockTimeout: 5 * time.Second, ID: "worker", @@ -1043,7 +1099,6 @@ func testWorkerCfg() config.Worker { func testApCfg() config.Autopilot { return config.Autopilot{ - AccountsRefillInterval: time.Second, Heartbeat: time.Second, ID: api.DefaultAutopilotID, MigrationHealthCutoff: 0.99, diff --git a/internal/test/e2e/cluster_test.go b/internal/test/e2e/cluster_test.go index f9ba9e018..ef81e8d39 100644 --- a/internal/test/e2e/cluster_test.go +++ b/internal/test/e2e/cluster_test.go @@ -9,7 +9,8 @@ import ( "fmt" "io" "math" - "math/big" + "os" + "path/filepath" "reflect" "sort" "strings" @@ -25,6 +26,7 @@ import ( "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" "go.sia.tech/renterd/autopilot/contractor" + "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/test" "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" @@ -875,7 +877,7 @@ func TestUploadDownloadExtended(t *testing.T) { cfg, _ := cluster.AutopilotConfig(context.Background()) cfg.Contracts.Set = t.Name() cluster.UpdateAutopilotConfig(context.Background(), cfg) - tt.OK(b.SetContractSet(context.Background(), t.Name(), nil)) + tt.OK(b.UpdateContractSet(context.Background(), t.Name(), nil, nil)) // assert there are no contracts in the set csc, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: t.Name()}) @@ -1087,7 +1089,6 @@ func TestContractApplyChainUpdates(t *testing.T) { defer cluster.Shutdown() // convenience variables - w := cluster.Worker b := cluster.Bus tt := cluster.tt @@ -1099,9 +1100,8 @@ func TestContractApplyChainUpdates(t *testing.T) { // manually form a contract with the host cs, _ := b.ConsensusState(context.Background()) wallet, _ := b.Wallet(context.Background()) - rev, _, err := w.RHPForm(context.Background(), cs.BlockHeight+test.AutopilotConfig.Contracts.Period+test.AutopilotConfig.Contracts.RenewWindow, h.PublicKey, h.NetAddress, wallet.Address, types.Siacoins(1), types.Siacoins(1)) - tt.OK(err) - contract, err := b.AddContract(context.Background(), rev, rev.Revision.MissedHostPayout().Sub(types.Siacoins(1)), types.Siacoins(1), cs.BlockHeight, api.ContractStatePending) + endHeight := cs.BlockHeight + test.AutopilotConfig.Contracts.Period + test.AutopilotConfig.Contracts.RenewWindow + contract, err := b.FormContract(context.Background(), wallet.Address, types.Siacoins(1), h.PublicKey, h.NetAddress, types.Siacoins(1), endHeight) tt.OK(err) // assert revision height is 0 @@ -1110,13 +1110,12 @@ func TestContractApplyChainUpdates(t *testing.T) { } // broadcast the revision for each contract - fcid := contract.ID - tt.OK(w.RHPBroadcast(context.Background(), fcid)) + tt.OKAll(b.BroadcastContract(context.Background(), contract.ID)) cluster.MineBlocks(1) // check the revision height was updated. tt.Retry(100, 100*time.Millisecond, func() error { - c, err := cluster.Bus.Contract(context.Background(), fcid) + c, err := cluster.Bus.Contract(context.Background(), contract.ID) tt.OK(err) if c.RevisionHeight == 0 { return fmt.Errorf("contract %v should have been revised", c.ID) @@ -1131,148 +1130,75 @@ func TestEphemeralAccounts(t *testing.T) { t.SkipNow() } - // run without autopilot - opts := clusterOptsDefault - opts.skipRunningAutopilot = true - // create cluster - cluster := newTestCluster(t, opts) + cluster := newTestCluster(t, testClusterOptions{ + hosts: 1, + }) defer cluster.Shutdown() // convenience variables - b := cluster.Bus - w := cluster.Worker tt := cluster.tt - tt.OK(b.UpdateSetting(context.Background(), api.SettingRedundancy, api.RedundancySettings{ - MinShards: 1, - TotalShards: 1, - })) - // add a host - hosts := cluster.AddHosts(1) - h, err := b.Host(context.Background(), hosts[0].PublicKey()) - tt.OK(err) - - // scan the host - tt.OKAll(w.RHPScan(context.Background(), h.PublicKey, h.NetAddress, 10*time.Second)) - - // manually form a contract with the host - cs, _ := b.ConsensusState(context.Background()) - wallet, _ := b.Wallet(context.Background()) - rev, _, err := w.RHPForm(context.Background(), cs.BlockHeight+test.AutopilotConfig.Contracts.Period+test.AutopilotConfig.Contracts.RenewWindow, h.PublicKey, h.NetAddress, wallet.Address, types.Siacoins(10), types.Siacoins(1)) - tt.OK(err) - c, err := b.AddContract(context.Background(), rev, rev.Revision.MissedHostPayout().Sub(types.Siacoins(1)), types.Siacoins(1), cs.BlockHeight, api.ContractStatePending) - tt.OK(err) - - tt.OK(b.SetContractSet(context.Background(), test.ContractSet, []types.FileContractID{c.ID})) - - // fund the account - fundAmt := types.Siacoins(1) - tt.OK(w.RHPFund(context.Background(), c.ID, c.HostKey, c.HostIP, c.SiamuxAddr, fundAmt)) - - // fetch accounts - accounts, err := cluster.Bus.Accounts(context.Background()) - tt.OK(err) - // assert account state - acc := accounts[0] - if acc.ID == (rhpv3.Account{}) { + acc := cluster.Accounts()[0] + host := cluster.hosts[0] + if acc.Balance.Cmp(types.Siacoins(1).Big()) < 0 { + t.Fatalf("wrong balance %v", acc.Balance) + } else if acc.ID == (rhpv3.Account{}) { t.Fatal("account id not set") - } else if acc.CleanShutdown { - t.Fatal("account should indicate an unclean shutdown") - } else if !acc.RequiresSync { - t.Fatal("account should require a sync") - } else if acc.HostKey != h.PublicKey { + } else if acc.HostKey != types.PublicKey(host.PublicKey()) { t.Fatal("wrong host") - } else if acc.Balance.Cmp(types.Siacoins(1).Big()) != 0 { - t.Fatalf("wrong balance %v", acc.Balance) - } - - // fetch account from bus directly - busAccounts, err := cluster.Bus.Accounts(context.Background()) - tt.OK(err) - if len(busAccounts) != 1 { - t.Fatal("expected one account but got", len(busAccounts)) - } - busAcc := busAccounts[0] - if !reflect.DeepEqual(busAcc, acc) { - t.Fatal("bus account doesn't match worker account") + } else if !acc.CleanShutdown { + t.Fatal("account should indicate a clean shutdown") + } else if acc.Owner != testWorkerCfg().ID { + t.Fatalf("wrong owner %v", acc.Owner) } - // check that the spending was recorded for the contract. The recorded + // Check that the spending was recorded for the contract. The recorded // spending should be > the fundAmt since it consists of the fundAmt plus // fee. + contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{}) + tt.OK(err) + if len(contracts) != 1 { + t.Fatalf("expected 1 contract, got %v", len(contracts)) + } tt.Retry(10, testBusFlushInterval, func() error { - cm, err := cluster.Bus.Contract(context.Background(), c.ID) + cm, err := cluster.Bus.Contract(context.Background(), contracts[0].ID) tt.OK(err) + fundAmt := types.Siacoins(1) if cm.Spending.FundAccount.Cmp(fundAmt) <= 0 { return fmt.Errorf("invalid spending reported: %v > %v", fundAmt.String(), cm.Spending.FundAccount.String()) } return nil }) - // sync the account - tt.OK(w.RHPSync(context.Background(), c.ID, acc.HostKey, c.HostIP, c.SiamuxAddr)) + // manuall save accounts in bus for 'owner' and mark it clean + acc.Owner = "owner" + tt.OK(cluster.Bus.UpdateAccounts(context.Background(), []api.Account{acc})) - // assert account state - accounts, err = cluster.Bus.Accounts(context.Background()) + // fetch again + busAccounts, err := cluster.Bus.Accounts(context.Background(), "owner") tt.OK(err) - - // assert account state - acc = accounts[0] - if !acc.CleanShutdown { - t.Fatal("account should indicate a clean shutdown") - } else if acc.RequiresSync { - t.Fatal("account should not require a sync") - } else if acc.Drift.Cmp(new(big.Int)) != 0 { - t.Fatalf("account shoult not have drift %v", acc.Drift) + if len(busAccounts) != 1 || busAccounts[0].ID != acc.ID || busAccounts[0].CleanShutdown != acc.CleanShutdown { + t.Fatalf("expected 1 clean account, got %v", len(busAccounts)) } - // update the balance to create some drift - newBalance := fundAmt.Div64(2) - newDrift := new(big.Int).Sub(newBalance.Big(), fundAmt.Big()) - if err := cluster.Bus.SetBalance(context.Background(), busAcc.ID, acc.HostKey, newBalance.Big()); err != nil { - t.Fatal(err) - } - busAccounts, err = cluster.Bus.Accounts(context.Background()) + // again but with invalid owner + busAccounts, err = cluster.Bus.Accounts(context.Background(), "invalid") tt.OK(err) - busAcc = busAccounts[0] - maxNewDrift := newDrift.Add(newDrift, types.NewCurrency64(2).Big()) // forgive 2H - if busAcc.Drift.Cmp(maxNewDrift) > 0 { - t.Fatalf("drift was %v but should be %v", busAcc.Drift, maxNewDrift) + if len(busAccounts) != 0 { + t.Fatalf("expected 0 accounts, got %v", len(busAccounts)) } - // reboot cluster - cluster2 := cluster.Reboot(t) - defer cluster2.Shutdown() - - // check that accounts were loaded from the bus - accounts2, err := cluster2.Bus.Accounts(context.Background()) + // mark accounts unclean + uncleanAcc := acc + uncleanAcc.CleanShutdown = false + tt.OK(cluster.Bus.UpdateAccounts(context.Background(), []api.Account{uncleanAcc})) + busAccounts, err = cluster.Bus.Accounts(context.Background(), "owner") tt.OK(err) - for _, acc := range accounts2 { - if acc.Balance.Cmp(big.NewInt(0)) == 0 { - t.Fatal("account balance wasn't loaded") - } else if acc.Drift.Cmp(big.NewInt(0)) == 0 { - t.Fatal("account drift wasn't loaded") - } else if !acc.CleanShutdown { - t.Fatal("account should indicate a clean shutdown") - } - } - - // reset drift again - if err := cluster2.Bus.ResetDrift(context.Background(), acc.ID); err != nil { - t.Fatal(err) - } - accounts2, err = cluster2.Bus.Accounts(context.Background()) - tt.OK(err) - if accounts2[0].Drift.Cmp(new(big.Int)) != 0 { - t.Fatal("drift wasn't reset", accounts2[0].Drift.String()) - } - accounts2, err = cluster2.Bus.Accounts(context.Background()) - tt.OK(err) - if accounts2[0].Drift.Cmp(new(big.Int)) != 0 { - t.Fatal("drift wasn't reset", accounts2[0].Drift.String()) + if len(busAccounts) != 1 || busAccounts[0].ID != acc.ID || busAccounts[0].CleanShutdown { + t.Fatalf("expected 1 unclean account, got %v, %v", len(busAccounts), busAccounts[0].CleanShutdown) } } @@ -1402,6 +1328,8 @@ func TestParallelDownload(t *testing.T) { func TestEphemeralAccountSync(t *testing.T) { if testing.Short() { t.SkipNow() + } else if mysqlCfg := config.MySQLConfigFromEnv(); mysqlCfg.URI != "" { + t.Skip("skipping MySQL suite") } dir := t.TempDir() @@ -1410,60 +1338,71 @@ func TestEphemeralAccountSync(t *testing.T) { hosts: 1, }) tt := cluster.tt - - // Shut down the autopilot to prevent it from manipulating the account. - cluster.ShutdownAutopilot(context.Background()) + hk := cluster.hosts[0].PublicKey() // Fetch the account balance before setting the balance - accounts, err := cluster.Bus.Accounts(context.Background()) - tt.OK(err) - if len(accounts) != 1 || accounts[0].RequiresSync { - t.Fatal("account shouldn't require a sync") + accounts := cluster.Accounts() + if len(accounts) != 1 { + t.Fatal("account should exist") + } else if accounts[0].Balance.Cmp(types.ZeroCurrency.Big()) == 0 { + t.Fatal("account isn't funded") + } else if accounts[0].RequiresSync { + t.Fatalf("account shouldn't require a sync, got %v", accounts[0].RequiresSync) } acc := accounts[0] - // Set requiresSync flag on bus and balance to 0. - if err := cluster.Bus.SetBalance(context.Background(), acc.ID, acc.HostKey, new(big.Int)); err != nil { - t.Fatal(err) - } - if err := cluster.Bus.ScheduleSync(context.Background(), acc.ID, acc.HostKey); err != nil { - t.Fatal(err) - } - accounts, err = cluster.Bus.Accounts(context.Background()) - tt.OK(err) - if len(accounts) != 1 || !accounts[0].RequiresSync { - t.Fatal("account wasn't updated") - } + // stop autopilot and mine transactions, this prevents an NDF where we + // double spend outputs after restarting the bus + cluster.ShutdownAutopilot(context.Background()) + tt.OK(cluster.MineTransactions(context.Background())) - // Restart cluster to have worker fetch the account from the bus again. - cluster2 := cluster.Reboot(t) - defer cluster2.Shutdown() + // stop the cluster + host := cluster.hosts[0] + cluster.hosts = nil // exclude hosts from shutdown + cluster.Shutdown() - // Account should need a sync. - account, err := cluster2.Bus.Account(context.Background(), acc.ID, acc.HostKey) - tt.OK(err) - if !account.RequiresSync { - t.Fatal("flag wasn't persisted") + // remove the cluster's database + tt.OK(os.Remove(filepath.Join(dir, "bus", "db", "db.sqlite"))) + + // start the cluster again + cluster = newTestCluster(t, testClusterOptions{ + dir: cluster.dir, + logger: cluster.logger, + walletKey: &cluster.wk, + }) + cluster.hosts = append(cluster.hosts, host) + defer cluster.Shutdown() + + // connect to the host again + tt.OK(cluster.Bus.SyncerConnect(context.Background(), host.SyncerAddr())) + cluster.sync() + + // ask for the account, this should trigger its creation + tt.OKAll(cluster.Worker.Account(context.Background(), hk)) + + // make sure we form a contract + cluster.WaitForContracts() + cluster.MineBlocks(1) + + accounts = cluster.Accounts() + if len(accounts) != 1 || accounts[0].ID != acc.ID { + t.Fatal("account should exist") + } else if accounts[0].CleanShutdown || !accounts[0].RequiresSync { + t.Fatal("account shouldn't be marked as clean shutdown or not require a sync, got", accounts[0].CleanShutdown, accounts[0].RequiresSync) } - // Wait for autopilot to sync and reset flag. + // assert account was funded tt.Retry(100, 100*time.Millisecond, func() error { - account, err := cluster2.Bus.Account(context.Background(), acc.ID, acc.HostKey) - if err != nil { - t.Fatal(err) - } - if account.RequiresSync { - return errors.New("account wasn't synced") + accounts = cluster.Accounts() + if len(accounts) != 1 || accounts[0].ID != acc.ID { + return errors.New("account should exist") + } else if accounts[0].Balance.Cmp(types.ZeroCurrency.Big()) == 0 { + return errors.New("account isn't funded") + } else if accounts[0].RequiresSync { + return fmt.Errorf("account shouldn't require a sync, got %v", accounts[0].RequiresSync) } return nil }) - - // Flag should also be reset on bus now. - accounts, err = cluster2.Bus.Accounts(context.Background()) - tt.OK(err) - if len(accounts) != 1 || accounts[0].RequiresSync { - t.Fatal("account wasn't updated") - } } // TestUploadDownloadSameHost uploads a file to the same host through different @@ -1589,7 +1528,7 @@ func TestUnconfirmedContractArchival(t *testing.T) { c := contracts[0] // add a contract to the bus - _, err = cluster.Bus.AddContract(context.Background(), rhpv2.ContractRevision{ + _, err = cluster.bs.AddContract(context.Background(), rhpv2.ContractRevision{ Revision: types.FileContractRevision{ ParentID: types.FileContractID{1}, UnlockConditions: types.UnlockConditions{ @@ -2761,7 +2700,7 @@ func TestHostScan(t *testing.T) { // fetch hosts again with the unix epoch timestamp which should only return // 1 host since that one hasn't been scanned yet toScan, err := b.HostsForScanning(context.Background(), api.HostsForScanningOptions{ - MaxLastScan: api.TimeRFC3339(time.Unix(0, 1)), + MaxLastScan: api.TimeRFC3339(time.UnixMilli(1)), }) tt.OK(err) if len(toScan) != 1 { diff --git a/internal/test/e2e/contracts_test.go b/internal/test/e2e/contracts_test.go new file mode 100644 index 000000000..0304e0909 --- /dev/null +++ b/internal/test/e2e/contracts_test.go @@ -0,0 +1,98 @@ +package e2e + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "go.sia.tech/core/types" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/test" + "go.uber.org/zap/zapcore" +) + +func TestFormContract(t *testing.T) { + // configure the autopilot not to form any contracts + apSettings := test.AutopilotConfig + apSettings.Contracts.Amount = 0 + + // create cluster + opts := clusterOptsDefault + opts.autopilotSettings = &apSettings + opts.logger = newTestLoggerCustom(zapcore.DebugLevel) + cluster := newTestCluster(t, opts) + defer cluster.Shutdown() + + // convenience variables + b := cluster.Bus + a := cluster.Autopilot + tt := cluster.tt + + // add a host + hosts := cluster.AddHosts(1) + h, err := b.Host(context.Background(), hosts[0].PublicKey()) + tt.OK(err) + + // form a contract using the bus + wallet, _ := b.Wallet(context.Background()) + ap, err := b.Autopilot(context.Background(), api.DefaultAutopilotID) + tt.OK(err) + contract, err := b.FormContract(context.Background(), wallet.Address, types.Siacoins(1), h.PublicKey, h.NetAddress, types.Siacoins(1), ap.EndHeight()) + tt.OK(err) + + // assert the contract was added to the bus + _, err = b.Contract(context.Background(), contract.ID) + tt.OK(err) + + // fetch autopilot config + old, err := b.Autopilot(context.Background(), api.DefaultAutopilotID) + tt.OK(err) + + // mine to the renew window + cluster.MineToRenewWindow() + + // wait until autopilot updated the current period + tt.Retry(100, 100*time.Millisecond, func() error { + if curr, _ := b.Autopilot(context.Background(), api.DefaultAutopilotID); curr.CurrentPeriod == old.CurrentPeriod { + return errors.New("autopilot didn't update the current period") + } + return nil + }) + + // update autopilot config to allow for 1 contract, this won't form a + // contract but will ensure we don't skip contract maintenance, which should + // renew the contract we formed + apSettings.Contracts.Amount = 1 + tt.OK(a.UpdateConfig(apSettings)) + + // assert the contract gets renewed and thus maintained + var renewalID types.FileContractID + tt.Retry(300, 100*time.Millisecond, func() error { + contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{}) + if err != nil { + return err + } + if len(contracts) != 1 { + return fmt.Errorf("unexpected number of contracts %d != 1", len(contracts)) + } + if contracts[0].RenewedFrom != contract.ID { + return fmt.Errorf("contract wasn't renewed %v != %v", contracts[0].RenewedFrom, contract.ID) + } + renewalID = contracts[0].ID + return nil + }) + + // assert the contract is part of the contract set + tt.Retry(300, 100*time.Millisecond, func() error { + contracts, err := b.Contracts(context.Background(), api.ContractsOpts{ContractSet: test.ContractSet}) + tt.OK(err) + if len(contracts) != 1 { + return fmt.Errorf("expected 1 contract, got %v", len(contracts)) + } else if contracts[0].ID != renewalID { + return fmt.Errorf("expected contract %v, got %v", contract.ID, contracts[0].ID) + } + return nil + }) +} diff --git a/internal/test/e2e/events_test.go b/internal/test/e2e/events_test.go index 4972adf1b..515bce5a4 100644 --- a/internal/test/e2e/events_test.go +++ b/internal/test/e2e/events_test.go @@ -161,7 +161,7 @@ func TestEvents(t *testing.T) { t.Fatalf("unexpected event %+v", e) } case api.EventContractSetUpdate: - if e.Name != test.ContractSet || len(e.ContractIDs) != 1 || e.ContractIDs[0] != c.ID || e.Timestamp.IsZero() { + if e.Name != test.ContractSet || len(e.ToAdd) != 1 || e.ToAdd[0] != c.ID || len(e.ToRemove) != 0 || e.Timestamp.IsZero() { t.Fatalf("unexpected event %+v", e) } case api.EventConsensusUpdate: diff --git a/internal/test/e2e/gouging_test.go b/internal/test/e2e/gouging_test.go index a40fe0024..851362489 100644 --- a/internal/test/e2e/gouging_test.go +++ b/internal/test/e2e/gouging_test.go @@ -13,7 +13,6 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" - "go.uber.org/zap/zapcore" "lukechampine.com/frand" ) @@ -136,60 +135,6 @@ func TestGouging(t *testing.T) { }) } -// TestAccountFunding is a regression tests that verify we can fund an account -// even if the host is considered gouging, this protects us from not being able -// to download from certain critical hosts when we migrate away from them. -func TestAccountFunding(t *testing.T) { - if testing.Short() { - t.SkipNow() - } - - // run without autopilot - opts := clusterOptsDefault - opts.skipRunningAutopilot = true - opts.logger = newTestLoggerCustom(zapcore.ErrorLevel) - - // create a new test cluster - cluster := newTestCluster(t, opts) - defer cluster.Shutdown() - - // convenience variables - b := cluster.Bus - w := cluster.Worker - tt := cluster.tt - - // add a host - hosts := cluster.AddHosts(1) - h, err := b.Host(context.Background(), hosts[0].PublicKey()) - tt.OK(err) - - // scan the host - _, err = w.RHPScan(context.Background(), h.PublicKey, h.NetAddress, 10*time.Second) - tt.OK(err) - - // manually form a contract with the host - cs, _ := b.ConsensusState(context.Background()) - wallet, _ := b.Wallet(context.Background()) - rev, _, err := w.RHPForm(context.Background(), cs.BlockHeight+test.AutopilotConfig.Contracts.Period+test.AutopilotConfig.Contracts.RenewWindow, h.PublicKey, h.NetAddress, wallet.Address, types.Siacoins(1), types.Siacoins(1)) - tt.OK(err) - c, err := b.AddContract(context.Background(), rev, rev.Revision.MissedHostPayout().Sub(types.Siacoins(1)), types.Siacoins(1), cs.BlockHeight, api.ContractStatePending) - tt.OK(err) - - // fund the account - tt.OK(w.RHPFund(context.Background(), c.ID, c.HostKey, c.HostIP, c.SiamuxAddr, types.Siacoins(1).Div64(2))) - - // update host so it's gouging - settings := hosts[0].settings.Settings() - settings.StoragePrice = types.Siacoins(1) - tt.OK(hosts[0].UpdateSettings(settings)) - - // ensure the price table expires so the worker is forced to fetch it - time.Sleep(defaultHostSettings.PriceTableValidity) - - // fund the account again - tt.OK(w.RHPFund(context.Background(), c.ID, c.HostKey, c.HostIP, c.SiamuxAddr, types.Siacoins(1))) -} - func TestHostMinVersion(t *testing.T) { if testing.Short() { t.SkipNow() diff --git a/internal/test/e2e/host.go b/internal/test/e2e/host.go index bd10b4af1..1ba6b5acf 100644 --- a/internal/test/e2e/host.go +++ b/internal/test/e2e/host.go @@ -283,7 +283,7 @@ func NewHost(privKey types.PrivateKey, dir string, network *consensus.Network, g return nil, fmt.Errorf("failed to create rhp3 listener: %w", err) } - settings, err := settings.NewConfigManager(privKey, db, cm, s, wallet) + settings, err := settings.NewConfigManager(privKey, db, cm, s, wallet, settings.WithValidateNetAddress(false)) if err != nil { return nil, fmt.Errorf("failed to create settings manager: %w", err) } diff --git a/internal/test/e2e/pruning_test.go b/internal/test/e2e/pruning_test.go index 8492bf9f1..84cce4b21 100644 --- a/internal/test/e2e/pruning_test.go +++ b/internal/test/e2e/pruning_test.go @@ -3,6 +3,7 @@ package e2e import ( "bytes" "context" + "errors" "fmt" "math" "strings" @@ -12,6 +13,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/test" + "go.uber.org/zap" ) func TestHostPruning(t *testing.T) { @@ -98,7 +100,9 @@ func TestSectorPruning(t *testing.T) { } // create a cluster - cluster := newTestCluster(t, clusterOptsDefault) + opts := clusterOptsDefault + opts.logger = zap.NewNop() + cluster := newTestCluster(t, opts) defer cluster.Shutdown() // add a helper to check whether a root is in a given slice @@ -121,13 +125,13 @@ func TestSectorPruning(t *testing.T) { numObjects := 10 // add hosts - hosts := cluster.AddHostsBlocking(int(cfg.Contracts.Amount)) + hosts := cluster.AddHostsBlocking(rs.TotalShards) // wait until we have accounts cluster.WaitForAccounts() // wait until we have a contract set - cluster.WaitForContractSetContracts(cfg.Contracts.Set, int(cfg.Contracts.Amount)) + cluster.WaitForContractSetContracts(cfg.Contracts.Set, rs.TotalShards) // add several objects for i := 0; i < numObjects; i++ { @@ -147,7 +151,8 @@ func TestSectorPruning(t *testing.T) { for _, c := range contracts { dbRoots, _, err := b.ContractRoots(context.Background(), c.ID) tt.OK(err) - cRoots, err := w.RHPContractRoots(context.Background(), c.ID) + + cRoots, err := cluster.ContractRoots(context.Background(), c.ID) tt.OK(err) if len(dbRoots) != len(cRoots) { t.Fatal("unexpected number of roots", dbRoots, cRoots) @@ -163,7 +168,7 @@ func TestSectorPruning(t *testing.T) { t.Fatal("unexpected number of roots", n) } - // sleep for a bit to ensure spending records get flushed + // sleep to ensure spending records get flushed time.Sleep(3 * testBusFlushInterval) // assert prunable data is 0 @@ -180,7 +185,7 @@ func TestSectorPruning(t *testing.T) { } // assert amount of prunable data - tt.Retry(100, 100*time.Millisecond, func() error { + tt.Retry(300, 100*time.Millisecond, func() error { res, err = b.PrunableData(context.Background()) tt.OK(err) if res.TotalPrunable != uint64(math.Ceil(float64(numObjects)/2))*rs.SlabSize() { @@ -191,18 +196,21 @@ func TestSectorPruning(t *testing.T) { // prune all contracts for _, c := range contracts { - tt.OKAll(w.RHPPruneContract(context.Background(), c.ID, 0)) - } - - // assert spending records were updated and prunable data is 0 - tt.Retry(10, testBusFlushInterval, func() error { - res, err := b.PrunableData(context.Background()) + res, err := b.PruneContract(context.Background(), c.ID, 0) tt.OK(err) - if res.TotalPrunable != 0 { - return fmt.Errorf("unexpected prunable data: %d", n) + if res.Pruned == 0 { + t.Fatal("expected pruned to be non-zero") + } else if res.Remaining != 0 { + t.Fatal("expected remaining to be zero") } - return nil - }) + } + + // assert prunable data is 0 + res, err = b.PrunableData(context.Background()) + tt.OK(err) + if res.TotalPrunable != 0 { + t.Fatalf("unexpected prunable data: %d", n) + } // assert spending was updated for _, c := range contracts { @@ -222,15 +230,23 @@ func TestSectorPruning(t *testing.T) { tt.OK(b.DeleteObject(context.Background(), api.DefaultBucketName, filename, api.DeleteObjectOptions{})) } - // sleep for a bit to ensure spending records get flushed - time.Sleep(3 * testBusFlushInterval) - // assert amount of prunable data - res, err = b.PrunableData(context.Background()) - tt.OK(err) - if res.TotalPrunable == 0 { - t.Fatal("expected prunable data") - } + tt.Retry(300, 100*time.Millisecond, func() error { + res, err = b.PrunableData(context.Background()) + tt.OK(err) + + if len(res.Contracts) != len(contracts) { + return fmt.Errorf("expected %d contracts, got %d", len(contracts), len(res.Contracts)) + } else if res.TotalPrunable == 0 { + var sizes []string + for _, c := range res.Contracts { + res, _ := b.ContractSize(context.Background(), c.ID) + sizes = append(sizes, fmt.Sprintf("c: %v size: %v prunable: %v", c.ID, res.Size, res.Prunable)) + } + return errors.New("expected prunable data, contract sizes:\n" + strings.Join(sizes, "\n")) + } + return nil + }) // update the host settings so it's gouging host := hosts[0] @@ -249,7 +265,7 @@ func TestSectorPruning(t *testing.T) { } // prune the contract and assert it threw a gouging error - _, _, err = w.RHPPruneContract(context.Background(), c.ID, 0) + _, err = b.PruneContract(context.Background(), c.ID, 0) if err == nil || !strings.Contains(err.Error(), "gouging") { t.Fatal("expected gouging error", err) } diff --git a/internal/test/e2e/s3_test.go b/internal/test/e2e/s3_test.go index 3f20e22ad..5071ae801 100644 --- a/internal/test/e2e/s3_test.go +++ b/internal/test/e2e/s3_test.go @@ -12,8 +12,10 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + s3aws "github.com/aws/aws-sdk-go/service/s3" "github.com/google/go-cmp/cmp" - "github.com/minio/minio-go/v7" rhpv2 "go.sia.tech/core/rhp/v2" "go.sia.tech/gofakes3" "go.sia.tech/renterd/api" @@ -37,7 +39,6 @@ func TestS3Basic(t *testing.T) { defer cluster.Shutdown() // delete default bucket before testing. - s3 := cluster.S3 tt := cluster.tt if err := cluster.Bus.DeleteBucket(context.Background(), api.DefaultBucketName); err != nil { t.Fatal(err) @@ -46,145 +47,123 @@ func TestS3Basic(t *testing.T) { // create bucket bucket := "bucket" objPath := "obj#ct" // special char to check escaping - tt.OK(s3.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{})) + tt.OKAll(cluster.S3.CreateBucket(bucket)) // list buckets - buckets, err := s3.ListBuckets(context.Background()) + lbo, err := cluster.S3.ListBuckets() tt.OK(err) - if len(buckets) != 1 { + if buckets := lbo.buckets; len(buckets) != 1 { t.Fatalf("unexpected number of buckets, %d != 1", len(buckets)) - } else if buckets[0].Name != bucket { - t.Fatalf("unexpected bucket name, %s != %s", buckets[0].Name, bucket) - } else if buckets[0].CreationDate.IsZero() { + } else if buckets[0].name != bucket { + t.Fatalf("unexpected bucket name, %s != %s", buckets[0].name, bucket) + } else if buckets[0].creationDate.IsZero() { t.Fatal("expected non-zero creation date") } // exist buckets - exists, err := s3.BucketExists(context.Background(), bucket) + err = cluster.S3.HeadBucket(bucket) tt.OK(err) - if !exists { - t.Fatal("expected bucket to exist") - } - exists, err = s3.BucketExists(context.Background(), bucket+"nonexistent") - tt.OK(err) - if exists { - t.Fatal("expected bucket to not exist") - } + err = cluster.S3.HeadBucket("nonexistent") + tt.AssertContains(err, "NotFound") // add object to the bucket data := frand.Bytes(10) etag := md5.Sum(data) - uploadInfo, err := s3.PutObject(context.Background(), bucket, objPath, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}) + uploadInfo, err := cluster.S3.PutObject(bucket, objPath, bytes.NewReader(data), putObjectOptions{}) tt.OK(err) - if uploadInfo.ETag != hex.EncodeToString(etag[:]) { - t.Fatalf("expected ETag %v, got %v", hex.EncodeToString(etag[:]), uploadInfo.ETag) + if uploadInfo.etag != api.FormatETag(hex.EncodeToString(etag[:])) { + t.Fatalf("expected ETag %v, got %v", hex.EncodeToString(etag[:]), uploadInfo.etag) } busObject, err := cluster.Bus.Object(context.Background(), bucket, objPath, api.GetObjectOptions{}) tt.OK(err) if busObject.Object == nil { t.Fatal("expected object to exist") - } else if busObject.Object.ETag != uploadInfo.ETag { - t.Fatalf("expected ETag %q, got %q", uploadInfo.ETag, busObject.Object.ETag) + } else if api.FormatETag(busObject.Object.ETag) != uploadInfo.etag { + t.Fatalf("expected ETag %v, got %v", uploadInfo.etag, busObject.Object.ETag) } - _, err = s3.PutObject(context.Background(), bucket+"nonexistent", objPath, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{}) + _, err = cluster.S3.PutObject("nonexistent", objPath, bytes.NewReader(data), putObjectOptions{}) tt.AssertIs(err, errBucketNotExists) // get object - obj, err := s3.GetObject(context.Background(), bucket, objPath, minio.GetObjectOptions{}) + obj, err := cluster.S3.GetObject(bucket, objPath, getObjectOptions{}) tt.OK(err) - if b, err := io.ReadAll(obj); err != nil { + if b, err := io.ReadAll(obj.body); err != nil { t.Fatal(err) } else if !bytes.Equal(b, data) { t.Fatal("data mismatch") - } else if info, err := obj.Stat(); err != nil { - t.Fatal(err) - } else if info.ETag != uploadInfo.ETag { - t.Fatal("unexpected ETag:", info.ETag, uploadInfo.ETag) + } else if obj.etag != uploadInfo.etag { + t.Fatal("unexpected ETag:", obj.etag, uploadInfo.etag) } // stat object - info, err := s3.StatObject(context.Background(), bucket, objPath, minio.StatObjectOptions{}) + info, err := cluster.S3.HeadObject(bucket, objPath) tt.OK(err) - if info.Size != int64(len(data)) { + if info.contentLength != int64(len(data)) { t.Fatal("size mismatch") - } else if info.ETag != uploadInfo.ETag { - t.Fatal("unexpected ETag:", info.ETag) + } else if info.etag != uploadInfo.etag { + t.Fatal("unexpected ETag:", info.etag) } // stat object that doesn't exist - _, err = s3.StatObject(context.Background(), bucket, "nonexistent", minio.StatObjectOptions{}) - if err == nil || !strings.Contains(err.Error(), "The specified key does not exist") { - t.Fatal(err) - } + info, err = cluster.S3.HeadObject("nonexistent", objPath) + tt.AssertContains(err, "NotFound") // add another bucket - tt.OK(s3.MakeBucket(context.Background(), bucket+"2", minio.MakeBucketOptions{})) + bucket2 := "bucket2" + tt.OKAll(cluster.S3.CreateBucket(bucket2)) // copy our object into the new bucket. - res, err := s3.CopyObject(context.Background(), minio.CopyDestOptions{ - Bucket: bucket + "2", - Object: objPath, - }, minio.CopySrcOptions{ - Bucket: bucket, - Object: objPath, - }) + res, err := cluster.S3.CopyObject(bucket, bucket2, objPath, objPath, putObjectOptions{}) tt.OK(err) - if res.LastModified.IsZero() { + if res.lastModified.IsZero() { t.Fatal("expected LastModified to be non-zero") - } else if !res.LastModified.After(start.UTC()) { + } else if !res.lastModified.After(start.UTC()) { t.Fatal("expected LastModified to be after the start of our test") - } else if res.ETag == "" { - t.Fatal("expected ETag to be set") + } else if res.etag != uploadInfo.etag { + t.Fatal("expected correct ETag to be set") } // get copied object - obj, err = s3.GetObject(context.Background(), bucket+"2", objPath, minio.GetObjectOptions{}) + obj, err = cluster.S3.GetObject(bucket2, objPath, getObjectOptions{}) tt.OK(err) - if b, err := io.ReadAll(obj); err != nil { + if b, err := io.ReadAll(obj.body); err != nil { t.Fatal(err) } else if !bytes.Equal(b, data) { t.Fatal("data mismatch") } // assert deleting the bucket fails because it's not empty - err = s3.RemoveBucket(context.Background(), bucket) + err = cluster.S3.DeleteBucket(bucket) tt.AssertIs(err, gofakes3.ErrBucketNotEmpty) // assert deleting the bucket fails because it doesn't exist - err = s3.RemoveBucket(context.Background(), bucket+"nonexistent") + err = cluster.S3.DeleteBucket("nonexistent") tt.AssertIs(err, errBucketNotExists) // remove the object - tt.OK(s3.RemoveObject(context.Background(), bucket, objPath, minio.RemoveObjectOptions{})) + tt.OKAll(cluster.S3.DeleteObject(bucket, objPath)) // try to get object - obj, err = s3.GetObject(context.Background(), bucket, objPath, minio.GetObjectOptions{}) - tt.OK(err) - _, err = io.ReadAll(obj) - tt.AssertContains(err, "The specified key does not exist") + obj, err = cluster.S3.GetObject(bucket, objPath, getObjectOptions{}) + tt.AssertContains(err, "NoSuchKey") // add a few objects to the bucket. - tt.OKAll(s3.PutObject(context.Background(), bucket, "dir/", bytes.NewReader(frand.Bytes(10)), 10, minio.PutObjectOptions{})) - tt.OKAll(s3.PutObject(context.Background(), bucket, "dir/file", bytes.NewReader(frand.Bytes(10)), 10, minio.PutObjectOptions{})) + tmpObj1 := "dir/" + body := frand.Bytes(10) + tt.OKAll(cluster.S3.PutObject(bucket, tmpObj1, bytes.NewReader(body), putObjectOptions{})) + tmpObj2 := "dir/file" + tt.OKAll(cluster.S3.PutObject(bucket, tmpObj2, bytes.NewReader(body), putObjectOptions{})) // delete them using the multi delete endpoint. - objectsCh := make(chan minio.ObjectInfo, 3) - objectsCh <- minio.ObjectInfo{Key: "dir/file"} - objectsCh <- minio.ObjectInfo{Key: "dir/"} - close(objectsCh) - results := s3.RemoveObjects(context.Background(), bucket, objectsCh, minio.RemoveObjectsOptions{}) - for res := range results { - tt.OK(res.Err) - } + tt.OKAll(cluster.S3.DeleteObject(bucket, tmpObj1)) + tt.OKAll(cluster.S3.DeleteObject(bucket, tmpObj2)) // delete bucket - tt.OK(s3.RemoveBucket(context.Background(), bucket)) - exists, err = s3.BucketExists(context.Background(), bucket) + err = cluster.S3.DeleteBucket(bucket) tt.OK(err) - if exists { - t.Fatal("expected bucket to not exist") - } + err = cluster.S3.HeadBucket(bucket) + tt.AssertContains(err, "NotFound") } func TestS3ObjectMetadata(t *testing.T) { @@ -200,7 +179,6 @@ func TestS3ObjectMetadata(t *testing.T) { defer cluster.Shutdown() // convenience variables - s3 := cluster.S3 tt := cluster.tt // create dummy metadata @@ -210,11 +188,11 @@ func TestS3ObjectMetadata(t *testing.T) { } // add object to the bucket - _, err := s3.PutObject(context.Background(), api.DefaultBucketName, t.Name(), bytes.NewReader([]byte(t.Name())), int64(len([]byte(t.Name()))), minio.PutObjectOptions{UserMetadata: metadata}) + _, err := cluster.S3.PutObject(api.DefaultBucketName, t.Name(), bytes.NewReader([]byte(t.Name())), putObjectOptions{metadata: metadata}) tt.OK(err) // create helper to assert metadata is present - assertMetadata := func(want map[string]string, got minio.StringMap) { + assertMetadata := func(want map[string]string, got map[string]string) { t.Helper() for k, wantt := range want { if gott, ok := got[k]; !ok || gott != wantt { @@ -224,84 +202,85 @@ func TestS3ObjectMetadata(t *testing.T) { } // perform GET request - obj, err := s3.GetObject(context.Background(), api.DefaultBucketName, t.Name(), minio.GetObjectOptions{}) + obj, err := cluster.S3.GetObject(api.DefaultBucketName, t.Name(), getObjectOptions{}) tt.OK(err) + assertMetadata(metadata, obj.metadata) - // assert metadata is set - get, err := obj.Stat() + // assert metadata is set on HEAD request + get, err := cluster.S3.HeadObject(api.DefaultBucketName, t.Name()) tt.OK(err) - assertMetadata(metadata, get.UserMetadata) + assertMetadata(metadata, get.metadata) // perform HEAD request - head, err := s3.StatObject(context.Background(), api.DefaultBucketName, t.Name(), minio.StatObjectOptions{}) + head, err := cluster.S3.HeadObject(api.DefaultBucketName, t.Name()) tt.OK(err) - assertMetadata(metadata, head.UserMetadata) + assertMetadata(metadata, head.metadata) // perform metadata update (same src/dst copy) metadata["Baz"] = "updated" - _, err = s3.CopyObject( - context.Background(), - minio.CopyDestOptions{Bucket: api.DefaultBucketName, Object: t.Name(), UserMetadata: metadata, ReplaceMetadata: true}, - minio.CopySrcOptions{Bucket: api.DefaultBucketName, Object: t.Name()}, + _, err = cluster.S3.CopyObject( + api.DefaultBucketName, + api.DefaultBucketName, + t.Name(), + t.Name(), + putObjectOptions{metadata: metadata}, ) tt.OK(err) // perform HEAD request - head, err = s3.StatObject(context.Background(), api.DefaultBucketName, t.Name(), minio.StatObjectOptions{}) + head, err = cluster.S3.HeadObject(api.DefaultBucketName, t.Name()) tt.OK(err) - assertMetadata(metadata, head.UserMetadata) + assertMetadata(metadata, head.metadata) // perform copy metadata["Baz"] = "copied" - _, err = s3.CopyObject( - context.Background(), - minio.CopyDestOptions{Bucket: api.DefaultBucketName, Object: t.Name() + "copied", UserMetadata: metadata, ReplaceMetadata: true}, - minio.CopySrcOptions{Bucket: api.DefaultBucketName, Object: t.Name()}, + _, err = cluster.S3.CopyObject( + api.DefaultBucketName, + api.DefaultBucketName, + t.Name(), + t.Name()+"copied", + putObjectOptions{ + metadata: metadata, + }, ) tt.OK(err) // perform HEAD request - head, err = s3.StatObject(context.Background(), api.DefaultBucketName, t.Name()+"copied", minio.StatObjectOptions{}) + head, err = cluster.S3.HeadObject(api.DefaultBucketName, t.Name()+"copied") tt.OK(err) - assertMetadata(metadata, head.UserMetadata) + assertMetadata(metadata, metadata) // assert the original object's metadata is unchanged metadata["Baz"] = "updated" - head, err = s3.StatObject(context.Background(), api.DefaultBucketName, t.Name(), minio.StatObjectOptions{}) + head, err = cluster.S3.HeadObject(api.DefaultBucketName, t.Name()) tt.OK(err) - assertMetadata(metadata, head.UserMetadata) + assertMetadata(metadata, metadata) // upload a file using multipart upload - core := cluster.S3Core - uid, err := core.NewMultipartUpload(context.Background(), api.DefaultBucketName, "multi", minio.PutObjectOptions{ - UserMetadata: map[string]string{ + uid, err := cluster.S3.NewMultipartUpload(api.DefaultBucketName, "multi", putObjectOptions{ + metadata: map[string]string{ "New": "1", }, }) tt.OK(err) data := frand.Bytes(3) - part, err := core.PutObjectPart(context.Background(), api.DefaultBucketName, "foo", uid, 1, bytes.NewReader(data), int64(len(data)), minio.PutObjectPartOptions{}) + part, err := cluster.S3.PutObjectPart(api.DefaultBucketName, "foo", uid, 1, bytes.NewReader(data), putObjectPartOptions{}) tt.OK(err) - _, err = core.CompleteMultipartUpload(context.Background(), api.DefaultBucketName, "multi", uid, []minio.CompletePart{ + _, err = cluster.S3.CompleteMultipartUpload(api.DefaultBucketName, "multi", uid, []completePart{ { - PartNumber: part.PartNumber, - ETag: part.ETag, - }, - }, minio.PutObjectOptions{ - UserMetadata: map[string]string{ - "Complete": "2", + partNumber: 1, + etag: part.etag, }, - }) + }, putObjectOptions{}) tt.OK(err) // check metadata - head, err = s3.StatObject(context.Background(), api.DefaultBucketName, "multi", minio.StatObjectOptions{}) + head, err = cluster.S3.HeadObject(api.DefaultBucketName, "multi") tt.OK(err) assertMetadata(map[string]string{ - "New": "1", - "Complete": "2", - }, head.UserMetadata) + "New": "1", + }, head.metadata) } func TestS3Authentication(t *testing.T) { @@ -313,33 +292,38 @@ func TestS3Authentication(t *testing.T) { defer cluster.Shutdown() tt := cluster.tt - assertAuth := func(c *minio.Core, shouldWork bool) { + assertAuth := func(c *s3aws.S3, shouldWork bool) { t.Helper() - _, err := c.ListObjectsV2(api.DefaultBucketName, "/", "", "", "", 100) + bucket := api.DefaultBucketName + _, err := c.ListObjectsV2(&s3aws.ListObjectsV2Input{ + Bucket: &bucket, + }) if shouldWork && err != nil { t.Fatal(err) } else if !shouldWork && err == nil { t.Fatal("expected error") } else if !shouldWork && err != nil && !strings.Contains(err.Error(), "AccessDenied") { - t.Fatal("wrong error") + t.Fatal("wrong error", shouldWork, err) } } - // Create client. - url := cluster.S3.EndpointURL().Host - s3Unauthenticated, err := minio.NewCore(url, &minio.Options{ - Creds: nil, // no authentication + // Create client that is not authenticated + cfg := cluster.S3.Config() + cfg.Credentials = credentials.NewCredentials(&credentials.StaticProvider{ + Value: credentials.Value{ + AccessKeyID: "unknownkey", + SecretAccessKey: "somesecret", + }, }) - tt.OK(err) + + mySession := session.Must(session.NewSession()) + s3Unauthenticated := s3aws.New(mySession, &cfg) // List bucket. Shouldn't work. assertAuth(s3Unauthenticated, false) - // Create client with credentials and try again.. - s3Authenticated, err := minio.NewCore(url, &minio.Options{ - Creds: test.S3Credentials, - }) - tt.OK(err) + // Use authenticated client + s3Authenticated := cluster.S3.s3 // List buckets. Should work. assertAuth(s3Authenticated, true) @@ -382,20 +366,18 @@ func TestS3List(t *testing.T) { }) defer cluster.Shutdown() - s3 := cluster.S3 - core := cluster.S3Core tt := cluster.tt // create bucket - tt.OK(s3.MakeBucket(context.Background(), "bucket", minio.MakeBucketOptions{})) + tt.OK(cluster.S3.CreateBucket("bucket")) // manually create the 'a/' object as a directory. It should also be // possible to call StatObject on it without errors. - tt.OKAll(s3.PutObject(context.Background(), "bucket", "a/", bytes.NewReader(nil), 0, minio.PutObjectOptions{})) - so, err := s3.StatObject(context.Background(), "bucket", "a/", minio.StatObjectOptions{}) + tt.OKAll(cluster.S3.PutObject("bucket", "a/", bytes.NewReader(nil), putObjectOptions{})) + so, err := cluster.S3.HeadObject("bucket", "a/") tt.OK(err) - if so.Key != "a/" { - t.Fatal("unexpected key:", so.Key) + if so.key != "a/" { + t.Fatal("unexpected key:", so.key) } objects := []string{ @@ -410,19 +392,19 @@ func TestS3List(t *testing.T) { } for _, object := range objects { data := frand.Bytes(10) - tt.OKAll(s3.PutObject(context.Background(), "bucket", object, bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})) + tt.OKAll(cluster.S3.PutObject("bucket", object, bytes.NewReader(data), putObjectOptions{})) } - flatten := func(res minio.ListBucketResult) []string { + flatten := func(res listObjectsResponse) []string { var objs []string - for _, obj := range res.Contents { - if !strings.HasSuffix(obj.Key, "/") && obj.LastModified.IsZero() { - t.Fatal("expected non-zero LastModified", obj.Key) + for _, obj := range res.contents { + if !strings.HasSuffix(obj.key, "/") && obj.lastModified.IsZero() { + t.Fatal("expected non-zero LastModified", obj.key) } - objs = append(objs, obj.Key) + objs = append(objs, obj.key) } - for _, cp := range res.CommonPrefixes { - objs = append(objs, cp.Prefix) + for _, cp := range res.commonPrefixes { + objs = append(objs, cp) } return objs } @@ -495,7 +477,12 @@ func TestS3List(t *testing.T) { }, } for i, test := range tests { - result, err := core.ListObjects("bucket", test.prefix, test.marker, test.delimiter, 1000) + result, err := cluster.S3.ListObjects("bucket", listObjectsOptions{ + prefix: test.prefix, + marker: test.marker, + delimiter: test.delimiter, + maxKeys: 1000, + }) if err != nil { t.Fatal(err) } @@ -503,10 +490,10 @@ func TestS3List(t *testing.T) { if !cmp.Equal(test.want, got) { t.Errorf("test %d: unexpected response, want %v got %v", i, test.want, got) } - for _, obj := range result.Contents { - if obj.ETag == "" { + for _, obj := range result.contents { + if obj.etag == "" { t.Fatal("expected non-empty ETag") - } else if obj.LastModified.IsZero() { + } else if obj.lastModified.IsZero() { t.Fatal("expected non-zero LastModified") } } @@ -517,16 +504,19 @@ func TestS3List(t *testing.T) { expectedOrder := []string{"a/", "a/a/a", "a/b", "ab", "b", "c/a", "d", "y/", "y/y/y/y"} hasMore := true for i := 0; hasMore; i++ { - result, err := core.ListObjectsV2("bucket", "", "", marker, "", 1) + result, err := cluster.S3.ListObjects("bucket", listObjectsOptions{ + marker: marker, + maxKeys: 1, + }) if err != nil { t.Fatal(err) - } else if len(result.Contents) != 1 { - t.Fatalf("unexpected number of objects, %d != 1", len(result.Contents)) - } else if result.Contents[0].Key != expectedOrder[i] { - t.Errorf("unexpected object, %s != %s", result.Contents[0].Key, expectedOrder[i]) + } else if len(result.contents) != 1 { + t.Fatalf("unexpected number of objects, %d != 1", len(result.contents)) + } else if result.contents[0].key != expectedOrder[i] { + t.Errorf("unexpected object, %s != %s", result.contents[0].key, expectedOrder[i]) } - marker = result.NextContinuationToken - hasMore = result.IsTruncated + marker = result.nextMarker + hasMore = result.truncated } } @@ -540,15 +530,13 @@ func TestS3MultipartUploads(t *testing.T) { uploadPacking: true, }) defer cluster.Shutdown() - s3 := cluster.S3 - core := cluster.S3Core tt := cluster.tt // Create bucket. - tt.OK(s3.MakeBucket(context.Background(), "multipart", minio.MakeBucketOptions{})) + tt.OK(cluster.S3.CreateBucket("multipart")) // Start a new multipart upload. - uploadID, err := core.NewMultipartUpload(context.Background(), "multipart", "foo", minio.PutObjectOptions{}) + uploadID, err := cluster.S3.NewMultipartUpload("multipart", "foo", putObjectOptions{}) tt.OK(err) if uploadID == "" { t.Fatal("expected non-empty upload ID") @@ -556,15 +544,15 @@ func TestS3MultipartUploads(t *testing.T) { // Start another one in the default bucket. This should not show up when // listing the uploads in the 'multipart' bucket. - tt.OKAll(core.NewMultipartUpload(context.Background(), api.DefaultBucketName, "foo", minio.PutObjectOptions{})) + tt.OKAll(cluster.S3.NewMultipartUpload(api.DefaultBucketName, "foo", putObjectOptions{})) // List uploads - lmu, err := core.ListMultipartUploads(context.Background(), "multipart", "", "", "", "", 0) + uploads, err := cluster.S3.ListMultipartUploads("multipart") tt.OK(err) - if len(lmu.Uploads) != 1 { - t.Fatal("expected 1 upload", len(lmu.Uploads)) - } else if upload := lmu.Uploads[0]; upload.UploadID != uploadID || upload.Key != "foo" { - t.Fatal("unexpected upload:", upload.UploadID, upload.Key) + if len(uploads) != 1 { + t.Fatal("expected 1 upload", len(uploads)) + } else if upload := uploads[0]; upload.uploadID != uploadID || upload.key != "foo" { + t.Fatal("unexpected upload:", upload.uploadID, upload.key) } // delete default bucket for the remainder of the test. This makes sure we @@ -573,101 +561,99 @@ func TestS3MultipartUploads(t *testing.T) { // Add 3 parts out of order to make sure the object is reconstructed // correctly. - putPart := func(partNum int, data []byte) string { + putPart := func(partNum int64, data []byte) string { t.Helper() - part, err := core.PutObjectPart(context.Background(), "multipart", "foo", uploadID, partNum, bytes.NewReader(data), int64(len(data)), minio.PutObjectPartOptions{}) + part, err := cluster.S3.PutObjectPart("multipart", "foo", uploadID, partNum, bytes.NewReader(data), putObjectPartOptions{}) tt.OK(err) - if part.ETag == "" { + if part.etag == "" { t.Fatal("expected non-empty ETag") } - return part.ETag + return part.etag } etag2 := putPart(2, []byte("world")) etag1 := putPart(1, []byte("hello")) etag3 := putPart(3, []byte("!")) // List parts - lop, err := core.ListObjectParts(context.Background(), "multipart", "foo", uploadID, 0, 0) + lop, err := cluster.S3.ListObjectParts("multipart", "foo", uploadID) tt.OK(err) - if lop.Bucket != "multipart" || lop.Key != "foo" || lop.UploadID != uploadID || len(lop.ObjectParts) != 3 { + if lop.bucket != "multipart" || lop.key != "foo" || lop.uploadId != uploadID || len(lop.objectParts) != 3 { t.Fatal("unexpected response:", lop) - } else if part1 := lop.ObjectParts[0]; part1.PartNumber != 1 || part1.Size != 5 || part1.ETag == "" { + } else if part1 := lop.objectParts[0]; part1.partNumber != 1 || part1.size != 5 || part1.etag == "" { t.Fatal("unexpected part:", part1) - } else if part2 := lop.ObjectParts[1]; part2.PartNumber != 2 || part2.Size != 5 || part2.ETag == "" { + } else if part2 := lop.objectParts[1]; part2.partNumber != 2 || part2.size != 5 || part2.etag == "" { t.Fatal("unexpected part:", part2) - } else if part3 := lop.ObjectParts[2]; part3.PartNumber != 3 || part3.Size != 1 || part3.ETag == "" { + } else if part3 := lop.objectParts[2]; part3.partNumber != 3 || part3.size != 1 || part3.etag == "" { t.Fatal("unexpected part:", part3) } // Complete upload - ui, err := core.CompleteMultipartUpload(context.Background(), "multipart", "foo", uploadID, []minio.CompletePart{ + ui, err := cluster.S3.CompleteMultipartUpload("multipart", "foo", uploadID, []completePart{ { - PartNumber: 1, - ETag: etag1, + partNumber: 1, + etag: etag1, }, { - PartNumber: 2, - ETag: etag2, + partNumber: 2, + etag: etag2, }, { - PartNumber: 3, - ETag: etag3, + partNumber: 3, + etag: etag3, }, - }, minio.PutObjectOptions{}) + }, putObjectOptions{}) tt.OK(err) - if ui.Bucket != "multipart" || ui.Key != "foo" || ui.ETag == "" { + if ui.bucket != "multipart" || ui.key != "foo" || ui.etag == "" { t.Fatal("unexpected response:", ui) } // Download object expectedData := []byte("helloworld!") - downloadedObj, err := s3.GetObject(context.Background(), "multipart", "foo", minio.GetObjectOptions{}) + downloadedObj, err := cluster.S3.GetObject("multipart", "foo", getObjectOptions{}) tt.OK(err) - if data, err := io.ReadAll(downloadedObj); err != nil { + if data, err := io.ReadAll(downloadedObj.body); err != nil { t.Fatal(err) } else if !bytes.Equal(data, expectedData) { t.Fatal("unexpected data:", string(data)) - } else if info, err := downloadedObj.Stat(); err != nil { - t.Fatal(err) - } else if info.ETag != ui.ETag { - t.Fatal("unexpected ETag:", info.ETag) - } else if info.Size != int64(len(expectedData)) { - t.Fatal("unexpected size:", info.Size) + } else if downloadedObj.etag != ui.etag { + t.Fatal("unexpected ETag:", downloadedObj.etag) } // Stat object - if info, err := s3.StatObject(context.Background(), "multipart", "foo", minio.StatObjectOptions{}); err != nil { + if info, err := cluster.S3.HeadObject("multipart", "foo"); err != nil { t.Fatal(err) - } else if info.ETag != ui.ETag { - t.Fatal("unexpected ETag:", info.ETag) - } else if info.Size != int64(len(expectedData)) { - t.Fatal("unexpected size:", info.Size) + } else if info.etag != ui.etag { + t.Fatal("unexpected ETag:", info.etag) + } else if info.contentLength != int64(len(expectedData)) { + t.Fatal("unexpected size:", info.contentLength) } // Download again with range request. - b := make([]byte, 5) - downloadedObj, err = s3.GetObject(context.Background(), "multipart", "foo", minio.GetObjectOptions{}) + downloadedObj, err = cluster.S3.GetObject("multipart", "foo", getObjectOptions{ + offset: 5, + length: 5, + }) tt.OK(err) - if _, err = downloadedObj.ReadAt(b, 5); err != nil { + if data, err := io.ReadAll(downloadedObj.body); err != nil { t.Fatal(err) - } else if !bytes.Equal(b, []byte("world")) { - t.Fatal("unexpected data:", string(b)) + } else if !bytes.Equal(data, []byte("world")) { + t.Fatal("unexpected data:", string(data)) } // Start a second multipart upload. - uploadID, err = core.NewMultipartUpload(context.Background(), "multipart", "bar", minio.PutObjectOptions{}) + uploadID, err = cluster.S3.NewMultipartUpload("multipart", "bar", putObjectOptions{}) tt.OK(err) // Add a part. putPart(1, []byte("bar")) // Abort upload - tt.OK(core.AbortMultipartUpload(context.Background(), "multipart", "bar", uploadID)) + tt.OK(cluster.S3.AbortMultipartUpload("multipart", "bar", uploadID)) // List it. - res, err := core.ListMultipartUploads(context.Background(), "multipart", "", "", "", "", 0) + uploads, err = cluster.S3.ListMultipartUploads("multipart") tt.OK(err) - if len(res.Uploads) != 0 { + if len(uploads) != 0 { t.Fatal("expected 0 uploads") } } @@ -688,8 +674,6 @@ func TestS3MultipartPruneSlabs(t *testing.T) { }) defer cluster.Shutdown() - s3 := cluster.S3 - core := cluster.S3Core bucket := "multipart" tt := cluster.tt @@ -697,10 +681,10 @@ func TestS3MultipartPruneSlabs(t *testing.T) { tt.OK(cluster.Bus.DeleteBucket(context.Background(), api.DefaultBucketName)) // Create bucket. - tt.OK(s3.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{})) + tt.OK(cluster.S3.CreateBucket(bucket)) // Start a new multipart upload. - uploadID, err := core.NewMultipartUpload(context.Background(), bucket, "foo", minio.PutObjectOptions{}) + uploadID, err := cluster.S3.NewMultipartUpload(bucket, "foo", putObjectOptions{}) tt.OK(err) if uploadID == "" { t.Fatal("expected non-empty upload ID") @@ -708,12 +692,12 @@ func TestS3MultipartPruneSlabs(t *testing.T) { // Add 1 part to the upload. data := frand.Bytes(5) - tt.OKAll(core.PutObjectPart(context.Background(), bucket, "foo", uploadID, 1, bytes.NewReader(data), int64(len(data)), minio.PutObjectPartOptions{})) + tt.OKAll(cluster.S3.PutObjectPart(bucket, "foo", uploadID, 1, bytes.NewReader(data), putObjectPartOptions{})) // Upload 1 regular object. It will share the same packed slab, cause the // packed slab to be complete and start a new one. data = frand.Bytes(test.RedundancySettings.MinShards*rhpv2.SectorSize - 1) - tt.OKAll(s3.PutObject(context.Background(), bucket, "bar", bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})) + tt.OKAll(cluster.S3.PutObject(bucket, "bar", bytes.NewReader(data), putObjectOptions{})) // Block until the buffer is uploaded. tt.Retry(100, 100*time.Millisecond, func() error { @@ -728,7 +712,7 @@ func TestS3MultipartPruneSlabs(t *testing.T) { // Upload another object that overwrites the first one, triggering a call to // 'pruneSlabs'. data = frand.Bytes(5) - tt.OKAll(s3.PutObject(context.Background(), bucket, "bar", bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{})) + tt.OKAll(cluster.S3.PutObject(bucket, "bar", bytes.NewReader(data), putObjectOptions{})) } func TestS3SpecialChars(t *testing.T) { @@ -741,35 +725,36 @@ func TestS3SpecialChars(t *testing.T) { uploadPacking: true, }) defer cluster.Shutdown() - s3 := cluster.S3 tt := cluster.tt // manually create the 'a/' object as a directory. It should also be // possible to call StatObject on it without errors. objectKey := "foo/höst (1).log" - tt.OKAll(s3.PutObject(context.Background(), api.DefaultBucketName, objectKey, bytes.NewReader([]byte("bar")), 0, minio.PutObjectOptions{})) - so, err := s3.StatObject(context.Background(), api.DefaultBucketName, objectKey, minio.StatObjectOptions{}) + tt.OKAll(cluster.S3.PutObject(api.DefaultBucketName, objectKey, bytes.NewReader([]byte("bar")), putObjectOptions{})) + so, err := cluster.S3.HeadObject(api.DefaultBucketName, objectKey) tt.OK(err) - if so.Key != objectKey { - t.Fatal("unexpected key:", so.Key) + if so.key != objectKey { + t.Fatal("unexpected key:", so.key) } - for res := range s3.ListObjects(context.Background(), api.DefaultBucketName, minio.ListObjectsOptions{Prefix: "foo/"}) { - tt.OK(res.Err) - if res.Key != objectKey { - t.Fatal("unexpected key:", res.Key) + lor, err := cluster.S3.ListObjects(api.DefaultBucketName, listObjectsOptions{prefix: "foo/"}) + tt.OK(err) + for _, res := range lor.contents { + if res.key != objectKey { + t.Fatal("unexpected key:", res.key) } } // delete it and verify its gone. - tt.OK(s3.RemoveObject(context.Background(), api.DefaultBucketName, objectKey, minio.RemoveObjectOptions{})) - so, err = s3.StatObject(context.Background(), api.DefaultBucketName, objectKey, minio.StatObjectOptions{}) + tt.OK(cluster.S3.DeleteObject(api.DefaultBucketName, objectKey)) + so, err = cluster.S3.HeadObject(api.DefaultBucketName, objectKey) if err == nil { t.Fatal("shouldn't exist", err) } - for res := range s3.ListObjects(context.Background(), api.DefaultBucketName, minio.ListObjectsOptions{Prefix: "foo/"}) { - tt.OK(res.Err) - if res.Key == objectKey { - t.Fatal("unexpected key:", res.Key) + lor, err = cluster.S3.ListObjects(api.DefaultBucketName, listObjectsOptions{prefix: "foo/"}) + for _, res := range lor.contents { + tt.OK(err) + if res.key == objectKey { + t.Fatal("unexpected key:", res.key) } } } diff --git a/internal/test/e2e/s3client.go b/internal/test/e2e/s3client.go new file mode 100644 index 000000000..d8271c929 --- /dev/null +++ b/internal/test/e2e/s3client.go @@ -0,0 +1,430 @@ +package e2e + +import ( + "encoding/base64" + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go/aws" + s3aws "github.com/aws/aws-sdk-go/service/s3" +) + +type ( + s3TestClient struct { + s3 *s3aws.S3 + } +) + +// request and response types +type ( + bucketInfo struct { + creationDate time.Time + name string + } + + copyObjectResponse struct { + lastModified time.Time + etag string + } + + listBucketResponse struct { + buckets []bucketInfo + } + + listObjectsOptions struct { + prefix string + marker string + delimiter string + maxKeys int64 + } + + listObjectsResponse struct { + contents []headObjectResponse + commonPrefixes []string + nextMarker string + truncated bool + } + + completePart struct { + partNumber int64 + etag string + } + + getObjectOptions struct { + offset int64 + length int64 + } + + getObjectResponse struct { + body io.ReadCloser + etag string + lastModified time.Time + metadata map[string]string + } + + headObjectResponse struct { + contentLength int64 + etag string + key string + lastModified time.Time + metadata map[string]string + } + + multipartUploadInfo struct { + bucket string + key string + uploadID string + } + + putObjectOptions struct { + metadata map[string]string + } + + putObjectPartOptions struct { + } + + putObjectResponse struct { + etag string + } + + putObjectPartResponse struct { + etag string + } + + uploadInfo struct { + bucket string + etag string + key string + uploadID string + } +) + +func (c *s3TestClient) Config() aws.Config { + return c.s3.Config +} + +func (c *s3TestClient) AbortMultipartUpload(bucket, key string, uploadID string) error { + var input s3aws.AbortMultipartUploadInput + input.SetBucket(bucket) + input.SetKey(key) + input.SetUploadId(uploadID) + _, err := c.s3.AbortMultipartUpload(&input) + return err +} + +func (c *s3TestClient) CompleteMultipartUpload(bucket, object, uploadID string, parts []completePart, opts putObjectOptions) (uploadInfo, error) { + var input s3aws.CompleteMultipartUploadInput + input.SetBucket(bucket) + input.SetKey(object) + input.SetUploadId(uploadID) + var upload s3aws.CompletedMultipartUpload + var inputParts []*s3aws.CompletedPart + for i := range parts { + inputParts = append(inputParts, &s3aws.CompletedPart{ + PartNumber: &parts[i].partNumber, + }) + upload.SetParts(inputParts) + } + input.SetMultipartUpload(&upload) + + resp, err := c.s3.CompleteMultipartUpload(&input) + if err != nil { + return uploadInfo{}, err + } + return uploadInfo{ + bucket: *resp.Bucket, + etag: *resp.ETag, + key: *resp.Key, + uploadID: uploadID, + }, nil +} + +func (c *s3TestClient) CopyObject(srcBucket, dstBucket, srcKey, dstKey string, opts putObjectOptions) (copyObjectResponse, error) { + var input s3aws.CopyObjectInput + input.SetCopySource(fmt.Sprintf("%s/%s", srcBucket, srcKey)) + input.SetBucket(dstBucket) + input.SetKey(dstKey) + if opts.metadata != nil { + md := make(map[string]*string) + for k := range opts.metadata { + v := opts.metadata[k] + md[k] = &v + } + input.SetMetadata(md) + } + resp, err := c.s3.CopyObject(&input) + if err != nil { + return copyObjectResponse{}, err + } + return copyObjectResponse{ + lastModified: *resp.CopyObjectResult.LastModified, + etag: *resp.CopyObjectResult.ETag, + }, nil +} + +func (c *s3TestClient) CreateBucket(bucket string) error { + var input s3aws.CreateBucketInput + input.SetBucket(bucket) + _, err := c.s3.CreateBucket(&input) + return err +} + +func (c *s3TestClient) DeleteBucket(bucket string) error { + var input s3aws.DeleteBucketInput + input.SetBucket(bucket) + _, err := c.s3.DeleteBucket(&input) + return err +} + +func (c *s3TestClient) DeleteObject(bucket, objKey string) error { + var input s3aws.DeleteObjectInput + input.SetBucket(bucket) + input.SetKey(objKey) + _, err := c.s3.DeleteObject(&input) + return err +} + +func (c *s3TestClient) GetObject(bucket, objKey string, opts getObjectOptions) (getObjectResponse, error) { + var input s3aws.GetObjectInput + input.SetBucket(bucket) + input.SetKey(objKey) + if hasOffset, hasLength := opts.offset > 0, opts.length > 0; hasOffset || hasLength { + if hasLength { + input.SetRange(fmt.Sprintf("bytes=%d-%d", opts.offset, opts.offset+opts.length-1)) + } else { + input.SetRange(fmt.Sprintf("bytes=%d-", opts.offset)) + } + } + resp, err := c.s3.GetObject(&input) + if err != nil { + return getObjectResponse{}, err + } + md := make(map[string]string) + for k, v := range resp.Metadata { + if v != nil { + md[k] = *v + } + } + return getObjectResponse{ + etag: *resp.ETag, + body: resp.Body, + lastModified: *resp.LastModified, + metadata: md, + }, nil +} + +func (c *s3TestClient) HeadBucket(bucket string) error { + var input s3aws.HeadBucketInput + input.SetBucket(bucket) + _, err := c.s3.HeadBucket(&input) + if err != nil { + return err + } + return nil +} + +func (c *s3TestClient) HeadObject(bucket, objKey string) (headObjectResponse, error) { + var input s3aws.HeadObjectInput + input.SetBucket(bucket) + input.SetKey(objKey) + resp, err := c.s3.HeadObject(&input) + if err != nil { + return headObjectResponse{}, err + } + md := make(map[string]string) + for k, v := range resp.Metadata { + if v != nil { + md[k] = *v + } + } + return headObjectResponse{ + etag: *resp.ETag, + contentLength: *resp.ContentLength, + key: objKey, + lastModified: *resp.LastModified, + metadata: md, + }, nil +} + +func (c *s3TestClient) ListBuckets() (lbr listBucketResponse, err error) { + resp, err := c.s3.ListBuckets(&s3aws.ListBucketsInput{}) + if err != nil { + return listBucketResponse{}, err + } + for _, b := range resp.Buckets { + lbr.buckets = append(lbr.buckets, bucketInfo{ + name: *b.Name, + creationDate: *b.CreationDate, + }) + } + return lbr, nil +} + +func (c *s3TestClient) ListMultipartUploads(bucket string) ([]multipartUploadInfo, error) { + var input s3aws.ListMultipartUploadsInput + input.SetBucket(bucket) + resp, err := c.s3.ListMultipartUploads(&input) + if err != nil { + return nil, err + } + var uploads []multipartUploadInfo + for _, u := range resp.Uploads { + uploads = append(uploads, multipartUploadInfo{ + bucket: bucket, + key: *u.Key, + uploadID: *u.UploadId, + }) + } + return uploads, nil +} + +type listObjectPartsResponse struct { + bucket string + key string + uploadId string + objectParts []objectPart +} + +type objectPart struct { + partNumber int64 + size int64 + etag string +} + +func (c *s3TestClient) ListObjectParts(bucket, objKey, uploadID string) (lopr listObjectPartsResponse, err error) { + var input s3aws.ListPartsInput + input.SetBucket(bucket) + input.SetKey(objKey) + input.SetUploadId(uploadID) + resp, err := c.s3.ListParts(&input) + if err != nil { + return listObjectPartsResponse{}, err + } + lopr.bucket = *resp.Bucket + lopr.key = *resp.Key + lopr.uploadId = *resp.UploadId + for _, p := range resp.Parts { + lopr.objectParts = append(lopr.objectParts, objectPart{ + partNumber: *p.PartNumber, + size: *p.Size, + etag: *p.ETag, + }) + } + return lopr, err +} + +func (c *s3TestClient) ListObjects(bucket string, opts listObjectsOptions) (lor listObjectsResponse, err error) { + var input s3aws.ListObjectsV2Input + input.SetBucket(bucket) + if opts.prefix != "" { + input.SetPrefix(opts.prefix) + } + if opts.marker != "" { + opts.marker = base64.URLEncoding.EncodeToString([]byte(opts.marker)) + input.SetContinuationToken(opts.marker) + } + if opts.delimiter != "" { + input.SetDelimiter(opts.delimiter) + } + if opts.maxKeys != 0 { + input.SetMaxKeys(opts.maxKeys) + } + resp, err := c.s3.ListObjectsV2(&input) + if err != nil { + return listObjectsResponse{}, err + } + for _, content := range resp.Contents { + lor.contents = append(lor.contents, headObjectResponse{ + contentLength: *content.Size, + etag: *content.ETag, + key: *content.Key, + lastModified: *content.LastModified, + }) + } + for _, prefix := range resp.CommonPrefixes { + lor.commonPrefixes = append(lor.commonPrefixes, *prefix.Prefix) + } + lor.truncated = *resp.IsTruncated + if resp.NextContinuationToken != nil { + m, err := base64.URLEncoding.DecodeString(*resp.NextContinuationToken) + if err != nil { + return listObjectsResponse{}, err + } + lor.nextMarker = string(m) + } + return lor, nil +} + +func (c *s3TestClient) NewMultipartUpload(bucket, objKey string, opts putObjectOptions) (string, error) { + var input s3aws.CreateMultipartUploadInput + input.SetBucket(bucket) + input.SetKey(objKey) + if opts.metadata != nil { + md := make(map[string]*string) + for k := range opts.metadata { + v := opts.metadata[k] // copy to avoid reference to loop variable + md[k] = &v + } + input.SetMetadata(md) + } + resp, err := c.s3.CreateMultipartUpload(&input) + if err != nil { + return "", err + } + return *resp.UploadId, nil +} + +func (c *s3TestClient) PutObject(bucket, objKey string, body io.ReadSeeker, opts putObjectOptions) (putObjectResponse, error) { + contentLength, err := body.Seek(0, io.SeekEnd) + if err != nil { + return putObjectResponse{}, err + } else if _, err := body.Seek(0, io.SeekStart); err != nil { + return putObjectResponse{}, err + } + var input s3aws.PutObjectInput + input.SetBucket(bucket) + input.SetBody(body) + input.SetKey(objKey) + input.SetContentLength(contentLength) + + if opts.metadata != nil { + md := make(map[string]*string) + for k := range opts.metadata { + v := opts.metadata[k] // copy to avoid reference to loop variable + md[k] = &v + } + input.SetMetadata(md) + } + + resp, err := c.s3.PutObject(&input) + if err != nil { + return putObjectResponse{}, err + } + return putObjectResponse{ + etag: *resp.ETag, + }, nil +} + +func (c *s3TestClient) PutObjectPart(bucket, objKey, uploadID string, partNum int64, body io.ReadSeeker, opts putObjectPartOptions) (putObjectPartResponse, error) { + contentLength, err := body.Seek(0, io.SeekEnd) + if err != nil { + return putObjectPartResponse{}, err + } else if _, err := body.Seek(0, io.SeekStart); err != nil { + return putObjectPartResponse{}, err + } + var input s3aws.UploadPartInput + input.SetBucket(bucket) + input.SetKey(objKey) + input.SetUploadId(uploadID) + input.SetPartNumber(partNum) + input.SetBody(body) + input.SetContentLength(contentLength) + part, err := c.s3.UploadPart(&input) + if err != nil { + return putObjectPartResponse{}, err + } + return putObjectPartResponse{ + etag: *part.ETag, + }, nil +} diff --git a/internal/test/e2e/uploads_test.go b/internal/test/e2e/uploads_test.go index 3f83fd7e4..c601becd6 100644 --- a/internal/test/e2e/uploads_test.go +++ b/internal/test/e2e/uploads_test.go @@ -135,7 +135,7 @@ func TestUploadingSectorsCache(t *testing.T) { } } - cr, err := w.RHPContractRoots(context.Background(), id) + cr, err := cluster.ContractRoots(context.Background(), id) tt.OK(err) expected := make(map[types.Hash256]struct{}) for _, root := range cr { diff --git a/internal/utils/keys.go b/internal/utils/keys.go new file mode 100644 index 000000000..0122491a0 --- /dev/null +++ b/internal/utils/keys.go @@ -0,0 +1,66 @@ +package utils + +import ( + "fmt" + + "go.sia.tech/core/types" + "golang.org/x/crypto/blake2b" +) + +type ( + MasterKey [32]byte + AccountsKey types.PrivateKey +) + +// DeriveAccountsKey derives an accounts key from a masterkey which is used +// to derive individual account keys from. +func (key *MasterKey) DeriveAccountsKey(workerID string) AccountsKey { + keyPath := fmt.Sprintf("accounts/%s", workerID) + return AccountsKey(key.deriveSubKey(keyPath)) +} + +// DeriveContractKey derives a contract key from a masterkey which is used to +// form, renew and revise contracts. +func (key *MasterKey) DeriveContractKey(hostKey types.PublicKey) types.PrivateKey { + seed := blake2b.Sum256(append(key.deriveSubKey("renterkey"), hostKey[:]...)) + pk := types.NewPrivateKeyFromSeed(seed[:]) + for i := range seed { + seed[i] = 0 + } + return pk +} + +// deriveSubKey can be used to derive a sub-masterkey from the worker's +// masterkey to use for a specific purpose. Such as deriving more keys for +// ephemeral accounts. +func (key *MasterKey) deriveSubKey(purpose string) types.PrivateKey { + seed := blake2b.Sum256(append(key[:], []byte(purpose)...)) + pk := types.NewPrivateKeyFromSeed(seed[:]) + for i := range seed { + seed[i] = 0 + } + return pk +} + +// DeriveAccountKey derives an account plus key for a given host and worker. +// Each worker has its own account for a given host. That makes concurrency +// around keeping track of an accounts balance and refilling it a lot easier in +// a multi-worker setup. +func (key *AccountsKey) DeriveAccountKey(hk types.PublicKey) types.PrivateKey { + index := byte(0) // not used yet but can be used to derive more than 1 account per host + + // Append the host for which to create it and the index to the + // corresponding sub-key. + subKey := *key + data := make([]byte, 0, len(subKey)+len(hk)+1) + data = append(data, subKey[:]...) + data = append(data, hk[:]...) + data = append(data, index) + + seed := types.HashBytes(data) + pk := types.NewPrivateKeyFromSeed(seed[:]) + for i := range seed { + seed[i] = 0 + } + return pk +} diff --git a/internal/utils/web.go b/internal/utils/web.go index 6f0caa571..471270deb 100644 --- a/internal/utils/web.go +++ b/internal/utils/web.go @@ -1,8 +1,10 @@ package utils import ( + "encoding/json" "errors" "fmt" + "io" "net" "net/http" _ "net/http/pprof" @@ -80,3 +82,21 @@ func OpenBrowser(url string) error { return fmt.Errorf("unsupported platform %q", runtime.GOOS) } } + +func DoRequest(req *http.Request, resp interface{}) (http.Header, int, error) { + r, err := http.DefaultClient.Do(req) + if err != nil { + return nil, 0, err + } + defer r.Body.Close() + defer io.Copy(io.Discard, r.Body) + + if r.StatusCode < 200 || r.StatusCode >= 300 { + lr := io.LimitReader(r.Body, 1<<20) // 1MiB + errMsg, _ := io.ReadAll(lr) + return http.Header{}, 0, fmt.Errorf("HTTP error: %s (status: %d)", string(errMsg), r.StatusCode) + } else if resp != nil { + return http.Header{}, 0, json.NewDecoder(r.Body).Decode(resp) + } + return r.Header, r.StatusCode, nil +} diff --git a/internal/worker/accounts.go b/internal/worker/accounts.go new file mode 100644 index 000000000..1022075f1 --- /dev/null +++ b/internal/worker/accounts.go @@ -0,0 +1,614 @@ +package worker + +import ( + "context" + "errors" + "fmt" + "math/big" + "sync" + "time" + + rhpv3 "go.sia.tech/core/rhp/v3" + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" + "go.sia.tech/renterd/api" + rhp3 "go.sia.tech/renterd/internal/rhp/v3" + "go.sia.tech/renterd/internal/utils" + "go.uber.org/zap" +) + +var ( + ErrAccountNotFound = errors.New("account doesn't exist") + + errMaxDriftExceeded = errors.New("drift on account is too large") +) + +var ( + minBalance = types.Siacoins(1).Div64(2).Big() + maxBalance = types.Siacoins(1) + maxNegDrift = new(big.Int).Neg(types.Siacoins(10).Big()) + + alertAccountRefillID = alerts.RandomAlertID() // constant until restarted +) + +type ( + AccountFunder interface { + FundAccount(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, desired types.Currency) error + } + + AccountSyncer interface { + SyncAccount(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, siamuxAddr string) error + } + + AccountStore interface { + Accounts(context.Context, string) ([]api.Account, error) + UpdateAccounts(context.Context, []api.Account) error + } + + ConsensusState interface { + ConsensusState(ctx context.Context) (api.ConsensusState, error) + } + + DownloadContracts interface { + DownloadContracts(ctx context.Context) ([]api.ContractMetadata, error) + } +) + +type ( + AccountMgr struct { + alerts alerts.Alerter + funder AccountFunder + syncer AccountSyncer + dc DownloadContracts + cs ConsensusState + s AccountStore + key utils.AccountsKey + logger *zap.SugaredLogger + owner string + refillInterval time.Duration + revisionSubmissionBuffer uint64 + shutdownCtx context.Context + shutdownCancel context.CancelFunc + wg sync.WaitGroup + + mu sync.Mutex + byID map[rhpv3.Account]*Account + inProgressRefills map[types.PublicKey]struct{} + lastLoggedRefillErr map[types.PublicKey]time.Time + } + + Account struct { + key types.PrivateKey + logger *zap.SugaredLogger + + rwmu sync.RWMutex + + mu sync.Mutex + requiresSyncTime time.Time + acc api.Account + } +) + +// NewAccountManager creates a new account manager. It will load all accounts +// from the given store and mark the shutdown as unclean. When Shutdown is +// called it will save all accounts. +func NewAccountManager(key utils.AccountsKey, owner string, alerter alerts.Alerter, funder AccountFunder, syncer AccountSyncer, cs ConsensusState, dc DownloadContracts, s AccountStore, refillInterval time.Duration, l *zap.Logger) (*AccountMgr, error) { + logger := l.Named("accounts").Sugar() + + shutdownCtx, shutdownCancel := context.WithCancel(context.Background()) + a := &AccountMgr{ + alerts: alerter, + funder: funder, + syncer: syncer, + cs: cs, + dc: dc, + s: s, + key: key, + logger: logger, + owner: owner, + + inProgressRefills: make(map[types.PublicKey]struct{}), + lastLoggedRefillErr: make(map[types.PublicKey]time.Time), + refillInterval: refillInterval, + shutdownCtx: shutdownCtx, + shutdownCancel: shutdownCancel, + + byID: make(map[rhpv3.Account]*Account), + } + a.wg.Add(1) + go func() { + a.run() + a.wg.Done() + }() + return a, nil +} + +// Account returns the account with the given id. +func (a *AccountMgr) Account(hostKey types.PublicKey) api.Account { + acc := a.account(hostKey) + return acc.convert() +} + +// Accounts returns all accounts. +func (a *AccountMgr) Accounts() []api.Account { + a.mu.Lock() + defer a.mu.Unlock() + accounts := make([]api.Account, 0, len(a.byID)) + for _, acc := range a.byID { + accounts = append(accounts, acc.convert()) + } + return accounts +} + +// ResetDrift resets the drift on an account. +func (a *AccountMgr) ResetDrift(id rhpv3.Account) error { + a.mu.Lock() + account, exists := a.byID[id] + if !exists { + a.mu.Unlock() + return ErrAccountNotFound + } + a.mu.Unlock() + + account.resetDrift() + return nil +} + +func (a *AccountMgr) Shutdown(ctx context.Context) error { + accounts := a.Accounts() + err := a.s.UpdateAccounts(ctx, accounts) + if err != nil { + a.logger.Errorf("failed to save %v accounts: %v", len(accounts), err) + return err + } + a.logger.Infof("successfully saved %v accounts", len(accounts)) + + a.shutdownCancel() + + done := make(chan struct{}) + go func() { + a.wg.Wait() + close(done) + }() + select { + case <-ctx.Done(): + return fmt.Errorf("accountMgrShutdown interrupted: %w", context.Cause(ctx)) + case <-done: + } + return nil +} + +func (a *AccountMgr) account(hk types.PublicKey) *Account { + a.mu.Lock() + defer a.mu.Unlock() + + // Derive account key. + accKey := a.key.DeriveAccountKey(hk) + accID := rhpv3.Account(accKey.PublicKey()) + + // Create account if it doesn't exist. + acc, exists := a.byID[accID] + if !exists { + acc = &Account{ + key: accKey, + logger: a.logger.Named(accID.String()), + acc: api.Account{ + ID: accID, + CleanShutdown: false, + HostKey: hk, + Balance: big.NewInt(0), + Drift: big.NewInt(0), + Owner: a.owner, + RequiresSync: true, // force sync on new account + }, + } + a.byID[accID] = acc + } + return acc +} + +// ForHost returns an account to use for a given host. If the account +// doesn't exist, a new one is created. +func (a *AccountMgr) ForHost(hk types.PublicKey) *Account { + return a.account(hk) +} + +func (a *AccountMgr) run() { + // wait for store to become available + var saved []api.Account + var err error + ticker := time.NewTicker(5 * time.Second) + for { + aCtx, cancel := context.WithTimeout(a.shutdownCtx, 30*time.Second) + saved, err = a.s.Accounts(aCtx, a.owner) + cancel() + if err == nil { + break + } + + a.logger.Warn("failed to fetch accounts from bus - retrying in a few seconds", zap.Error(err)) + select { + case <-a.shutdownCtx.Done(): + return + case <-ticker.C: + } + } + + // stop ticker + ticker.Stop() + select { + case <-ticker.C: + default: + } + + // add accounts + a.mu.Lock() + accounts := make(map[rhpv3.Account]*Account, len(saved)) + for _, acc := range saved { + accKey := a.key.DeriveAccountKey(acc.HostKey) + if rhpv3.Account(accKey.PublicKey()) != acc.ID { + a.logger.Errorf("account key derivation mismatch %v != %v", accKey.PublicKey(), acc.ID) + continue + } + acc.RequiresSync = true // force sync on reboot + account := &Account{ + acc: acc, + key: accKey, + logger: a.logger.Named(acc.ID.String()), + requiresSyncTime: time.Now(), + } + accounts[account.acc.ID] = account + } + a.mu.Unlock() + + // mark the shutdown as unclean, this will be overwritten on shutdown + uncleanAccounts := append([]api.Account(nil), saved...) + for i := range uncleanAccounts { + uncleanAccounts[i].CleanShutdown = false + } + err = a.s.UpdateAccounts(a.shutdownCtx, uncleanAccounts) + if err != nil { + a.logger.Error("failed to mark account shutdown as unclean", zap.Error(err)) + } + + ticker = time.NewTicker(a.refillInterval) + for { + select { + case <-a.shutdownCtx.Done(): + return // shutdown + case <-ticker.C: + } + a.refillAccounts() + } +} + +func (a *AccountMgr) markRefillInProgress(hk types.PublicKey) bool { + a.mu.Lock() + defer a.mu.Unlock() + _, inProgress := a.inProgressRefills[hk] + if inProgress { + return false + } + a.inProgressRefills[hk] = struct{}{} + return true +} + +func (a *AccountMgr) markRefillDone(hk types.PublicKey) { + a.mu.Lock() + defer a.mu.Unlock() + _, inProgress := a.inProgressRefills[hk] + if !inProgress { + panic("releasing a refill that hasn't been in progress") + } + delete(a.inProgressRefills, hk) +} + +// refillWorkerAccounts refills all accounts on a worker that require a refill. +// To avoid slow hosts preventing refills for fast hosts, a separate goroutine +// is used for every host. If a slow host's account is still being refilled by a +// goroutine from a previous call, refillWorkerAccounts will skip that account +// until the previously launched goroutine returns. +func (a *AccountMgr) refillAccounts() { + // fetch config + cs, err := a.cs.ConsensusState(a.shutdownCtx) + if err != nil { + a.logger.Errorw(fmt.Sprintf("failed to fetch consensus state for refill: %v", err)) + return + } + + // fetch all contracts + contracts, err := a.dc.DownloadContracts(a.shutdownCtx) + if err != nil { + a.logger.Errorw(fmt.Sprintf("failed to fetch contracts for refill: %v", err)) + return + } else if len(contracts) == 0 { + return + } + + // refill accounts in separate goroutines + for _, c := range contracts { + // launch refill if not already in progress + if a.markRefillInProgress(c.HostKey) { + go func(contract api.ContractMetadata) { + defer a.markRefillDone(contract.HostKey) + + rCtx, cancel := context.WithTimeout(a.shutdownCtx, 5*time.Minute) + defer cancel() + + // refill + err := a.refillAccount(rCtx, c, cs.BlockHeight, a.revisionSubmissionBuffer) + + // determine whether to log something + shouldLog := true + a.mu.Lock() + if t, exists := a.lastLoggedRefillErr[contract.HostKey]; !exists || err == nil { + a.lastLoggedRefillErr[contract.HostKey] = time.Now() + } else if time.Since(t) < time.Hour { + // only log error once per hour per account + shouldLog = false + } + a.mu.Unlock() + + if err != nil && shouldLog { + a.logger.Error("failed to refill account for host", zap.Stringer("hostKey", contract.HostKey), zap.Error(err)) + } else { + a.logger.Infow("successfully refilled account for host", zap.Stringer("hostKey", contract.HostKey), zap.Error(err)) + } + }(c) + } + } +} + +func (a *AccountMgr) refillAccount(ctx context.Context, contract api.ContractMetadata, bh, revisionSubmissionBuffer uint64) error { + // fetch the account + account := a.Account(contract.HostKey) + + // check if the contract is too close to the proof window to be revised, + // trying to refill the account would result in the host not returning the + // revision and returning an obfuscated error + if (bh + revisionSubmissionBuffer) > contract.WindowStart { + return fmt.Errorf("contract %v is too close to the proof window to be revised", contract.ID) + } + + // check if a host is potentially cheating before refilling. + // We only check against the max drift if the account's drift is + // negative because we don't care if we have more money than + // expected. + if account.Drift.Cmp(maxNegDrift) < 0 { + alert := newAccountRefillAlert(account.ID, contract, errMaxDriftExceeded, + "accountID", account.ID.String(), + "hostKey", contract.HostKey.String(), + "balance", account.Balance.String(), + "drift", account.Drift.String(), + ) + _ = a.alerts.RegisterAlert(a.shutdownCtx, alert) + return fmt.Errorf("not refilling account since host is potentially cheating: %w", errMaxDriftExceeded) + } else { + _ = a.alerts.DismissAlerts(a.shutdownCtx, alerts.IDForAccount(alertAccountRefillID, account.ID)) + } + + // check if a resync is needed + if account.RequiresSync { + // sync the account + err := a.syncer.SyncAccount(ctx, contract.ID, contract.HostKey, contract.SiamuxAddr) + if err != nil { + return fmt.Errorf("failed to sync account's balance: %w", err) + } + + // refetch the account after syncing + account = a.Account(contract.HostKey) + } + + // check if refill is needed + if account.Balance.Cmp(minBalance) >= 0 { + return nil + } + + // fund the account + err := a.funder.FundAccount(ctx, contract.ID, contract.HostKey, maxBalance) + if err != nil { + return fmt.Errorf("failed to fund account: %w", err) + } + return nil +} + +// WithSync syncs an accounts balance with the bus. To do so, the account is +// locked while the balance is fetched through balanceFn. +func (a *Account) WithSync(balanceFn func() (types.Currency, error)) error { + a.rwmu.Lock() + defer a.rwmu.Unlock() + + balance, err := balanceFn() + if err != nil { + return err + } + + a.setBalance(balance.Big()) + return nil +} + +func (a *Account) ID() rhpv3.Account { + return a.acc.ID +} + +func (a *Account) Key() types.PrivateKey { + return a.key +} + +// WithDeposit increases the balance of an account by the amount returned by +// amtFn if amtFn doesn't return an error. +func (a *Account) WithDeposit(amtFn func(types.Currency) (types.Currency, error)) error { + a.rwmu.RLock() + defer a.rwmu.RUnlock() + + a.mu.Lock() + balance := types.NewCurrency(a.acc.Balance.Uint64(), new(big.Int).Rsh(a.acc.Balance, 64).Uint64()) + a.mu.Unlock() + + amt, err := amtFn(balance) + if err != nil { + return err + } + a.addAmount(amt.Big()) + return nil +} + +// WithWithdrawal decreases the balance of an account by the amount returned by +// amtFn. The amount is still withdrawn if amtFn returns an error since some +// costs are non-refundable. +func (a *Account) WithWithdrawal(amtFn func() (types.Currency, error)) error { + a.rwmu.RLock() + defer a.rwmu.RUnlock() + + // return early if the account needs to sync + a.mu.Lock() + if a.acc.RequiresSync { + a.mu.Unlock() + return fmt.Errorf("%w; account requires resync", rhp3.ErrBalanceInsufficient) + } + + // return early if our account is not funded + if a.acc.Balance.Cmp(big.NewInt(0)) <= 0 { + a.mu.Unlock() + return rhp3.ErrBalanceInsufficient + } + a.mu.Unlock() + + // execute amtFn + amt, err := amtFn() + + // in case of an insufficient balance, we schedule a sync + if rhp3.IsBalanceInsufficient(err) { + a.ScheduleSync() + } + + // if an amount was returned, we withdraw it + if !amt.IsZero() { + a.addAmount(new(big.Int).Neg(amt.Big())) + } + return err +} + +// AddAmount applies the provided amount to an account through addition. So the +// input can be both a positive or negative number depending on whether a +// withdrawal or deposit is recorded. If the account doesn't exist, it is +// created. +func (a *Account) addAmount(amt *big.Int) { + a.mu.Lock() + defer a.mu.Unlock() + + // Update balance. + balanceBefore := a.acc.Balance + a.acc.Balance.Add(a.acc.Balance, amt) + + // Log deposits. + if amt.Cmp(big.NewInt(0)) > 0 { + a.logger.Infow("account balance was increased", + "account", a.acc.ID, + "host", a.acc.HostKey.String(), + "amt", amt.String(), + "balanceBefore", balanceBefore, + "balanceAfter", a.acc.Balance.String()) + } +} + +func (a *Account) convert() api.Account { + a.mu.Lock() + defer a.mu.Unlock() + return api.Account{ + ID: a.acc.ID, + CleanShutdown: a.acc.CleanShutdown, + HostKey: a.acc.HostKey, + Balance: new(big.Int).Set(a.acc.Balance), + Drift: new(big.Int).Set(a.acc.Drift), + Owner: a.acc.Owner, + RequiresSync: a.acc.RequiresSync, + } +} + +func (a *Account) resetDrift() { + a.mu.Lock() + a.acc.Drift.SetInt64(0) + a.mu.Unlock() +} + +// scheduleSync sets the requiresSync flag of an account. +func (a *Account) ScheduleSync() { + a.mu.Lock() + defer a.mu.Unlock() + + // Only update the sync flag to 'true' if some time has passed since the + // last time it was set. That way we avoid multiple workers setting it after + // failing at the same time, causing multiple syncs in the process. + if time.Since(a.requiresSyncTime) < 30*time.Second { + a.logger.Warn("not scheduling account sync since it was scheduled too recently", zap.Stringer("account", a.acc.ID)) + return + } + a.acc.RequiresSync = true + a.requiresSyncTime = time.Now() + + // Log scheduling a sync. + a.logger.Infow("account sync was scheduled", + "account", a.ID, + "host", a.acc.HostKey.String(), + "balance", a.acc.Balance.String(), + "drift", a.acc.Drift.String()) +} + +// setBalance sets the balance of a given account to the provided amount. If the +// account doesn't exist, it is created. +// If an account hasn't been saved successfully upon the last shutdown, no drift +// will be added upon the first call to SetBalance. +func (a *Account) setBalance(balance *big.Int) { + a.mu.Lock() + defer a.mu.Unlock() + + // save previous values + prevBalance := new(big.Int).Set(a.acc.Balance) + prevDrift := new(big.Int).Set(a.acc.Drift) + + // update balance + a.acc.Balance.Set(balance) + + // update drift + drift := new(big.Int).Sub(balance, prevBalance) + if a.acc.CleanShutdown { + a.acc.Drift = a.acc.Drift.Add(a.acc.Drift, drift) + } + + // reset fields + a.acc.CleanShutdown = true + a.acc.RequiresSync = false + + // log account changes + a.logger.Infow("account balance was reset", + zap.Stringer("account", a.acc.ID), + zap.Stringer("host", a.acc.HostKey), + zap.Stringer("balanceBefore", prevBalance), + zap.Stringer("balanceAfter", balance), + zap.Stringer("driftBefore", prevDrift), + zap.Stringer("driftAfter", a.acc.Drift), + zap.Bool("firstDrift", a.acc.Drift.Cmp(big.NewInt(0)) != 0 && prevDrift.Cmp(big.NewInt(0)) == 0), + zap.Bool("cleanshutdown", a.acc.CleanShutdown), + zap.Stringer("drift", drift)) +} + +func newAccountRefillAlert(id rhpv3.Account, contract api.ContractMetadata, err error, keysAndValues ...string) alerts.Alert { + data := map[string]interface{}{ + "error": err.Error(), + "accountID": id.String(), + "contractID": contract.ID.String(), + "hostKey": contract.HostKey.String(), + } + for i := 0; i < len(keysAndValues); i += 2 { + data[keysAndValues[i]] = keysAndValues[i+1] + } + + return alerts.Alert{ + ID: alerts.IDForAccount(alertAccountRefillID, id), + Severity: alerts.SeverityError, + Message: "Ephemeral account refill failed", + Data: data, + Timestamp: time.Now(), + } +} diff --git a/internal/worker/accounts_test.go b/internal/worker/accounts_test.go new file mode 100644 index 000000000..d33e67200 --- /dev/null +++ b/internal/worker/accounts_test.go @@ -0,0 +1,161 @@ +package worker + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" + "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" + "go.uber.org/zap" +) + +type mockAccountMgrBackend struct { + contracts []api.ContractMetadata +} + +func (b *mockAccountMgrBackend) Alerts(context.Context, alerts.AlertsOpts) (alerts.AlertsResponse, error) { + return alerts.AlertsResponse{}, nil +} + +func (b *mockAccountMgrBackend) DismissAlerts(context.Context, ...types.Hash256) error { + return nil +} + +func (b *mockAccountMgrBackend) RegisterAlert(context.Context, alerts.Alert) error { + return nil +} + +func (b *mockAccountMgrBackend) FundAccount(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, balance types.Currency) error { + return nil +} +func (b *mockAccountMgrBackend) SyncAccount(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, siamuxAddr string) error { + return nil +} +func (b *mockAccountMgrBackend) Accounts(context.Context, string) ([]api.Account, error) { + return []api.Account{}, nil +} +func (b *mockAccountMgrBackend) UpdateAccounts(context.Context, []api.Account) error { + return nil +} +func (b *mockAccountMgrBackend) ConsensusState(ctx context.Context) (api.ConsensusState, error) { + return api.ConsensusState{}, nil +} +func (b *mockAccountMgrBackend) DownloadContracts(ctx context.Context) ([]api.ContractMetadata, error) { + return nil, nil +} + +func TestAccounts(t *testing.T) { + // create a manager with an account for a single host + hk := types.PublicKey{1} + b := &mockAccountMgrBackend{ + contracts: []api.ContractMetadata{ + { + ID: types.FileContractID{1}, + HostKey: hk, + }, + }, + } + mgr, err := NewAccountManager(utils.AccountsKey(types.GeneratePrivateKey()), "test", b, b, b, b, b, b, time.Second, zap.NewNop()) + if err != nil { + t.Fatal(err) + } + + // create account + account := mgr.ForHost(hk) + + // assert account exists + accounts := mgr.Accounts() + if len(accounts) != 1 { + t.Fatalf("expected 1 account but got %v", len(accounts)) + } + + comparer := cmp.Comparer(func(i1, i2 *big.Int) bool { + return i1.Cmp(i2) == 0 + }) + + // Newly created accounts are !cleanShutdown and require a sync. Simulate a + // sync to change that. + for _, acc := range accounts { + if expected := (api.Account{ + CleanShutdown: false, + RequiresSync: true, + ID: account.ID(), + HostKey: hk, + Balance: types.ZeroCurrency.Big(), + Drift: types.ZeroCurrency.Big(), + Owner: "test", + }); !cmp.Equal(acc, expected, comparer) { + t.Fatal("account doesn't match expectation", cmp.Diff(acc, expected, comparer)) + } + } + + // set balance to 0SC to simulate a sync + account.setBalance(types.ZeroCurrency.Big()) + + acc := mgr.Account(hk) + if expected := (api.Account{ + CleanShutdown: true, + RequiresSync: false, + ID: account.ID(), + HostKey: hk, + Balance: types.ZeroCurrency.Big(), + Drift: types.ZeroCurrency.Big(), + Owner: "test", + }); !cmp.Equal(acc, expected, comparer) { + t.Fatal("account doesn't match expectation", cmp.Diff(acc, expected, comparer)) + } + + // fund with 1 SC + account.addAmount(types.Siacoins(1).Big()) + + acc = mgr.Account(hk) + if expected := (api.Account{ + CleanShutdown: true, + RequiresSync: false, + ID: account.ID(), + HostKey: hk, + Balance: types.Siacoins(1).Big(), + Drift: types.ZeroCurrency.Big(), + Owner: "test", + }); !cmp.Equal(acc, expected, comparer) { + t.Fatal("account doesn't match expectation", cmp.Diff(acc, expected, comparer)) + } + + // schedule a sync + account.ScheduleSync() + + acc = mgr.Account(hk) + if expected := (api.Account{ + CleanShutdown: true, + RequiresSync: true, + ID: account.ID(), + HostKey: hk, + Balance: types.Siacoins(1).Big(), + Drift: types.ZeroCurrency.Big(), + Owner: "test", + }); !cmp.Equal(acc, expected, comparer) { + t.Fatal("account doesn't match expectation", cmp.Diff(acc, expected, comparer)) + } + + // update the balance to create some drift, sync should be reset + newBalance := types.Siacoins(1).Div64(2).Big() + newDrift := new(big.Int).Neg(newBalance) + account.setBalance(newBalance) + acc = mgr.Account(hk) + if expected := (api.Account{ + CleanShutdown: true, + RequiresSync: false, + ID: account.ID(), + HostKey: hk, + Balance: newBalance, + Drift: newDrift, + Owner: "test", + }); !cmp.Equal(acc, expected, comparer) { + t.Fatal("account doesn't match expectation", cmp.Diff(acc, expected, comparer)) + } +} diff --git a/internal/worker/events_test.go b/internal/worker/events_test.go index 76c89bbfb..95a74da91 100644 --- a/internal/worker/events_test.go +++ b/internal/worker/events_test.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "io" "net/http" "net/http/httptest" "sync" @@ -16,6 +15,7 @@ import ( "go.sia.tech/jape" "go.sia.tech/renterd/alerts" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/webhooks" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" @@ -117,7 +117,7 @@ func TestEventSubscriber(t *testing.T) { } // setup a server - mux := jape.Mux(map[string]jape.Handler{"POST /events": func(jc jape.Context) { + mux := jape.Mux(map[string]jape.Handler{"POST /event": func(jc jape.Context) { var event webhooks.Event if jc.Decode(&event) != nil { return @@ -132,7 +132,7 @@ func TestEventSubscriber(t *testing.T) { defer srv.Close() // register the subscriber - eventsURL := fmt.Sprintf("http://%v/events", srv.Listener.Addr().String()) + eventsURL := fmt.Sprintf("http://%v/event", srv.Listener.Addr().String()) go func() { if err := s.Register(context.Background(), eventsURL); err != nil { t.Error(err) @@ -202,18 +202,7 @@ func sendEvent(url string, event webhooks.Event) error { if err != nil { return err } - defer io.ReadAll(req.Body) // always drain body - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - errStr, err := io.ReadAll(req.Body) - if err != nil { - return fmt.Errorf("failed to read response body: %w", err) - } - return fmt.Errorf("Webhook returned unexpected status %v: %v", resp.StatusCode, string(errStr)) - } - return nil + _, _, err = utils.DoRequest(req, nil) + return err } diff --git a/stores/accounts.go b/stores/accounts.go index 183582b8b..ca5b70c7b 100644 --- a/stores/accounts.go +++ b/stores/accounts.go @@ -8,24 +8,14 @@ import ( ) // Accounts returns all accounts from the db. -func (s *SQLStore) Accounts(ctx context.Context) (accounts []api.Account, err error) { +func (s *SQLStore) Accounts(ctx context.Context, owner string) (accounts []api.Account, err error) { err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - accounts, err = tx.Accounts(ctx) + accounts, err = tx.Accounts(ctx, owner) return err }) return } -// SetUncleanShutdown sets the clean shutdown flag on the accounts to 'false' -// and also sets the 'requires_sync' flag. That way, the autopilot will know to -// sync all accounts after an unclean shutdown and the bus will know not to -// apply drift. -func (s *SQLStore) SetUncleanShutdown(ctx context.Context) error { - return s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { - return tx.SetUncleanShutdown(ctx) - }) -} - // SaveAccounts saves the given accounts in the db, overwriting any existing // ones. func (s *SQLStore) SaveAccounts(ctx context.Context, accounts []api.Account) error { diff --git a/stores/autopilot_test.go b/stores/autopilot_test.go index 1ed78370c..9559b2dbe 100644 --- a/stores/autopilot_test.go +++ b/stores/autopilot_test.go @@ -37,9 +37,9 @@ func TestAutopilotStore(t *testing.T) { Set: testContractSet, }, Hosts: api.HostsConfig{ - MaxDowntimeHours: 10, - MinRecentScanFailures: 10, - AllowRedundantIPs: true, // allow for integration tests by default + MaxDowntimeHours: 10, + MaxConsecutiveScanFailures: 10, + AllowRedundantIPs: true, // allow for integration tests by default }, } diff --git a/stores/bench_test.go b/stores/bench_test.go new file mode 100644 index 000000000..60f75b52f --- /dev/null +++ b/stores/bench_test.go @@ -0,0 +1,167 @@ +package stores + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "testing" + "time" + + "go.sia.tech/core/types" + isql "go.sia.tech/renterd/internal/sql" + "go.sia.tech/renterd/object" + "go.sia.tech/renterd/stores/sql" + "go.sia.tech/renterd/stores/sql/sqlite" + "go.uber.org/zap" + "lukechampine.com/frand" +) + +// BenchmarkPrunableContractRoots benchmarks diffing the roots of a contract +// with a given set of roots to determine which roots are prunable. +// +// 15.32 MB/s | M1 Max | cd32fad7 (diff ~2TiB of contract data per second) +func BenchmarkPrunableContractRoots(b *testing.B) { + // define parameters + batchSize := int64(25600) // 100GiB of contract data + contractSize := 1 << 40 // 1TiB contract + sectorSize := 4 << 20 // 4MiB sector + numSectors := contractSize / sectorSize + + // create database + db, err := newTestDB(context.Background(), b.TempDir()) + if err != nil { + b.Fatal(err) + } + + // prepare database + fcid := types.FileContractID{1} + roots, err := prepareDB(db.DB(), fcid, numSectors) + if err != nil { + b.Fatal(err) + } + + // prepare batch + frand.Shuffle(len(roots), func(i, j int) { + roots[i], roots[j] = roots[j], roots[i] + }) + batch := roots[:batchSize] + + // start benchmark + b.ResetTimer() + b.SetBytes(batchSize * 32) + for i := 0; i < b.N; i++ { + if err := db.Transaction(context.Background(), func(tx sql.DatabaseTx) error { + indices, err := tx.PrunableContractRoots(context.Background(), fcid, batch) + if err != nil { + return err + } else if len(indices) != 0 { + return errors.New("expected no prunable roots") + } + return nil + }); err != nil { + b.Fatal(err) + } + } +} + +func prepareDB(db *isql.DB, fcid types.FileContractID, n int) (roots []types.Hash256, _ error) { + // insert host + hk := types.PublicKey{1} + res, err := db.Exec(context.Background(), ` +INSERT INTO hosts (public_key) VALUES (?)`, sql.PublicKey(hk)) + if err != nil { + return nil, err + } + hostID, err := res.LastInsertId() + if err != nil { + return nil, err + } + + // insert contract + res, err = db.Exec(context.Background(), ` +INSERT INTO contracts (host_id, fcid,start_height) VALUES (?, ?, ?)`, hostID, sql.FileContractID(fcid), 0) + if err != nil { + return nil, err + } + contractID, err := res.LastInsertId() + if err != nil { + return nil, err + } + + // insert slab + key := object.GenerateEncryptionKey() + res, err = db.Exec(context.Background(), ` +INSERT INTO slabs (created_at, `+"`key`"+`) VALUES (?, ?)`, time.Now(), sql.EncryptionKey(key)) + if err != nil { + return nil, err + } + slabID, err := res.LastInsertId() + if err != nil { + return nil, err + } + + // insert sectors + insertSectorStmt, err := db.Prepare(context.Background(), ` +INSERT INTO sectors (db_slab_id, slab_index, latest_host, root) VALUES (?, ?, ?, ?) RETURNING id`) + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to insert sector: %w", err) + } + defer insertSectorStmt.Close() + var sectorIDs []int64 + for i := 0; i < n; i++ { + var sectorID int64 + roots = append(roots, frand.Entropy256()) + err := insertSectorStmt.QueryRow(context.Background(), slabID, i, sql.PublicKey(hk), sql.Hash256(roots[i])).Scan(§orID) + if err != nil { + return nil, fmt.Errorf("failed to insert sector: %w", err) + } + sectorIDs = append(sectorIDs, sectorID) + } + + // insert contract sectors + insertLinkStmt, err := db.Prepare(context.Background(), ` +INSERT INTO contract_sectors (db_contract_id, db_sector_id) VALUES (?, ?)`) + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to insert contract sectors: %w", err) + } + defer insertLinkStmt.Close() + for _, sectorID := range sectorIDs { + if _, err := insertLinkStmt.Exec(context.Background(), contractID, sectorID); err != nil { + return nil, fmt.Errorf("failed to insert contract sector: %w", err) + } + } + + // sanity check + var cnt int + err = db.QueryRow(context.Background(), ` +SELECT COUNT(s.root) +FROM contracts c +INNER JOIN contract_sectors cs ON cs.db_contract_id = c.id +INNER JOIN sectors s ON cs.db_sector_id = s.id +WHERE c.fcid = ?`, sql.FileContractID(fcid)).Scan(&cnt) + if cnt != n { + return nil, fmt.Errorf("expected %v sectors, got %v", n, cnt) + } + + return +} + +func newTestDB(ctx context.Context, dir string) (*sqlite.MainDatabase, error) { + db, err := sqlite.Open(filepath.Join(dir, "db.sqlite")) + if err != nil { + return nil, err + } + + dbMain, err := sqlite.NewMainDatabase(db, zap.NewNop(), 100*time.Millisecond, 100*time.Millisecond) + if err != nil { + return nil, err + } + + err = dbMain.Migrate(ctx) + if err != nil { + return nil, err + } + + return dbMain, nil +} diff --git a/stores/hostdb_test.go b/stores/hostdb_test.go index b4bba0dc1..ba3b2c1ac 100644 --- a/stores/hostdb_test.go +++ b/stores/hostdb_test.go @@ -430,7 +430,7 @@ func TestRecordScan(t *testing.T) { t.Fatal(err) } if host.Interactions != (api.HostInteractions{}) { - t.Fatal("mismatch") + t.Fatal("mismatch", cmp.Diff(host.Interactions, api.HostInteractions{})) } if host.Settings != (rhpv2.HostSettings{}) { t.Fatal("mismatch") @@ -487,7 +487,7 @@ func TestRecordScan(t *testing.T) { // We expect no uptime or downtime from only a single scan. uptime := time.Duration(0) downtime := time.Duration(0) - if host.Interactions.LastScan.UnixNano() != firstScanTime.UnixNano() { + if host.Interactions.LastScan.UnixMilli() != firstScanTime.UnixMilli() { t.Fatal("wrong time") } host.Interactions.LastScan = time.Time{} @@ -517,7 +517,7 @@ func TestRecordScan(t *testing.T) { host, err = ss.Host(ctx, hk) if err != nil { t.Fatal(err) - } else if host.Interactions.LastScan.UnixNano() != secondScanTime.UnixNano() { + } else if host.Interactions.LastScan.UnixMilli() != secondScanTime.UnixMilli() { t.Fatal("wrong time") } else if time.Now().After(host.PriceTable.Expiry) { t.Fatal("invalid expiry") @@ -555,7 +555,7 @@ func TestRecordScan(t *testing.T) { if err != nil { t.Fatal(err) } - if host.Interactions.LastScan.UnixNano() != thirdScanTime.UnixNano() { + if host.Interactions.LastScan.UnixMilli() != thirdScanTime.UnixMilli() { t.Fatal("wrong time") } host.Interactions.LastScan = time.Time{} diff --git a/stores/metadata.go b/stores/metadata.go index fe26bc29e..2a7723142 100644 --- a/stores/metadata.go +++ b/stores/metadata.go @@ -237,12 +237,15 @@ func (s *SQLStore) ContractSize(ctx context.Context, id types.FileContractID) (c return cs, err } -func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds []types.FileContractID) error { - wanted := make(map[types.FileContractID]struct{}) - for _, fcid := range contractIds { - wanted[types.FileContractID(fcid)] = struct{}{} +func (s *SQLStore) UpdateContractSet(ctx context.Context, name string, toAdd, toRemove []types.FileContractID) error { + toAddMap := make(map[types.FileContractID]struct{}) + for _, fcid := range toAdd { + toAddMap[fcid] = struct{}{} + } + toRemoveMap := make(map[types.FileContractID]struct{}) + for _, fcid := range toRemove { + toRemoveMap[fcid] = struct{}{} } - var diff []types.FileContractID var nContractsAfter int err := s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { @@ -251,19 +254,20 @@ func (s *SQLStore) SetContractSet(ctx context.Context, name string, contractIds if err != nil && !errors.Is(err, api.ErrContractSetNotFound) { return fmt.Errorf("failed to fetch contracts: %w", err) } + diff = nil // reset for _, c := range prevContracts { - if _, exists := wanted[c.ID]; !exists { + if _, exists := toAddMap[c.ID]; exists { + delete(toAddMap, c.ID) // already exists + } else if _, exists := toRemoveMap[c.ID]; exists { diff = append(diff, c.ID) // removal - } else { - delete(wanted, c.ID) } } - for fcid := range wanted { + for _, fcid := range toAdd { diff = append(diff, fcid) // addition } // update contract set - if err := tx.SetContractSet(ctx, name, contractIds); err != nil { + if err := tx.UpdateContractSet(ctx, name, toAdd, toRemove); err != nil { return fmt.Errorf("failed to set contract set: %w", err) } // fetch contracts after update @@ -669,6 +673,14 @@ func (s *SQLStore) ObjectsBySlabKey(ctx context.Context, bucket string, slabKey return } +func (s *SQLStore) PrunableContractRoots(ctx context.Context, fcid types.FileContractID, roots []types.Hash256) (indices []uint64, err error) { + err = s.db.Transaction(ctx, func(tx sql.DatabaseTx) error { + indices, err = tx.PrunableContractRoots(ctx, fcid, roots) + return err + }) + return +} + // MarkPackedSlabsUploaded marks the given slabs as uploaded and deletes them // from the buffer. func (s *SQLStore) MarkPackedSlabsUploaded(ctx context.Context, slabs []api.UploadedPackedSlab) error { diff --git a/stores/metadata_test.go b/stores/metadata_test.go index 6d0639e5d..40242f53f 100644 --- a/stores/metadata_test.go +++ b/stores/metadata_test.go @@ -126,6 +126,82 @@ SET health = ( return err } +func TestPrunableContractRoots(t *testing.T) { + // create a SQL store + ss := newTestSQLStore(t, defaultTestSQLStoreConfig) + defer ss.Close() + + // add a contract + hks, err := ss.addTestHosts(1) + if err != nil { + t.Fatal(err) + } + fcids, _, err := ss.addTestContracts(hks) + if err != nil { + t.Fatal(err) + } + + // add 4 objects + for i := 1; i <= 4; i++ { + if _, err := ss.addTestObject(fmt.Sprintf("%s_%d", t.Name(), i), object.Object{ + Key: object.GenerateEncryptionKey(), + Slabs: []object.SlabSlice{ + { + Slab: object.Slab{ + Key: object.GenerateEncryptionKey(), + MinShards: 1, + Shards: newTestShards(hks[0], fcids[0], types.Hash256{byte(i)}), + }, + }, + }, + }); err != nil { + t.Fatal(err) + } + } + + // assert there's 4 roots in the database + roots, err := ss.ContractRoots(context.Background(), fcids[0]) + if err != nil { + t.Fatal(err) + } else if len(roots) != 4 { + t.Fatal("unexpected number of roots", len(roots)) + } + + // diff the roots - should be empty + indices, err := ss.PrunableContractRoots(context.Background(), fcids[0], roots) + if err != nil { + t.Fatal(err) + } else if len(indices) != 0 { + t.Fatal("unexpected number of indices", len(indices)) + } + + // delete every other object + if err := ss.RemoveObjectBlocking(context.Background(), api.DefaultBucketName, fmt.Sprintf("%s_1", t.Name())); err != nil { + t.Fatal(err) + } + if err := ss.RemoveObjectBlocking(context.Background(), api.DefaultBucketName, fmt.Sprintf("%s_3", t.Name())); err != nil { + t.Fatal(err) + } + + // assert there's 2 roots left + updated, err := ss.ContractRoots(context.Background(), fcids[0]) + if err != nil { + t.Fatal(err) + } else if len(updated) != 2 { + t.Fatal("unexpected number of roots", len(updated)) + } + + // diff the roots again, should return indices 0 and 2 + indices, err = ss.PrunableContractRoots(context.Background(), fcids[0], roots) + if err != nil { + t.Fatal(err) + } else if len(indices) != 2 { + t.Fatal("unexpected number of indices", len(indices)) + } else if indices[0] != 0 || indices[1] != 2 { + t.Fatal("unexpected indices", indices) + } +} + // TestObjectBasic tests the hydration of raw objects works when we fetch // objects from the metadata store. func TestObjectBasic(t *testing.T) { @@ -418,7 +494,7 @@ func TestSQLContractStore(t *testing.T) { } // Add a contract set with our contract and assert we can fetch it using the set name - if err := ss.SetContractSet(ctx, "foo", []types.FileContractID{contracts[0].ID}); err != nil { + if err := ss.UpdateContractSet(ctx, "foo", []types.FileContractID{contracts[0].ID}, nil); err != nil { t.Fatal(err) } if contracts, err := ss.Contracts(ctx, api.ContractsOpts{ContractSet: "foo"}); err != nil { @@ -431,7 +507,7 @@ func TestSQLContractStore(t *testing.T) { } // Add another contract set. - if err := ss.SetContractSet(ctx, "foo2", []types.FileContractID{contracts[0].ID}); err != nil { + if err := ss.UpdateContractSet(ctx, "foo2", []types.FileContractID{contracts[0].ID}, nil); err != nil { t.Fatal(err) } @@ -605,7 +681,7 @@ func TestRenewedContract(t *testing.T) { } // create a contract set with both contracts. - if err := ss.SetContractSet(context.Background(), "test", []types.FileContractID{fcid1, fcid2}); err != nil { + if err := ss.UpdateContractSet(context.Background(), "test", []types.FileContractID{fcid1, fcid2}, nil); err != nil { t.Fatal(err) } @@ -1309,7 +1385,7 @@ func TestObjectHealth(t *testing.T) { } // all contracts are good - if err := ss.SetContractSet(context.Background(), testContractSet, fcids); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, fcids, nil); err != nil { t.Fatal(err) } @@ -1362,7 +1438,7 @@ func TestObjectHealth(t *testing.T) { } // update contract to impact the object's health - if err := ss.SetContractSet(context.Background(), testContractSet, []types.FileContractID{fcids[0], fcids[2], fcids[3], fcids[4]}); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, []types.FileContractID{fcids[0], fcids[2], fcids[3], fcids[4]}, []types.FileContractID{fcids[1]}); err != nil { t.Fatal(err) } if err := ss.RefreshHealth(context.Background()); err != nil { @@ -1375,7 +1451,7 @@ func TestObjectHealth(t *testing.T) { if err != nil { t.Fatal(err) } else if obj.Health != expectedHealth { - t.Fatal("wrong health", obj.Health) + t.Fatal("wrong health", obj.Health, expectedHealth) } // assert health is returned correctly by ObjectEntries @@ -1399,7 +1475,7 @@ func TestObjectHealth(t *testing.T) { } // update contract set again to make sure the 2nd slab has even worse health - if err := ss.SetContractSet(context.Background(), testContractSet, []types.FileContractID{fcids[0], fcids[2], fcids[3]}); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, []types.FileContractID{fcids[0], fcids[2], fcids[3]}, []types.FileContractID{fcids[4]}); err != nil { t.Fatal(err) } if err := ss.RefreshHealth(context.Background()); err != nil { @@ -1675,7 +1751,7 @@ func TestSearchObjects(t *testing.T) { assertEqual := func(got []api.ObjectMetadata, want []api.ObjectMetadata) { t.Helper() if len(got) != len(want) { - t.Fatalf("unexpected result, we want %d items and we got %d items \ndiff: %v", len(want), len(got), cmp.Diff(got, want)) + t.Fatalf("unexpected result, we want %d items and we got %d items \ndiff: %v", len(want), len(got), cmp.Diff(got, want, cmp.Comparer(api.CompareTimeRFC3339))) } for i := range got { if !metadataEquals(got[i], want[i]) { @@ -1733,7 +1809,7 @@ func TestUnhealthySlabs(t *testing.T) { // update the contract set goodContracts := []types.FileContractID{fcid1, fcid2, fcid3} - if err := ss.SetContractSet(context.Background(), testContractSet, goodContracts); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, goodContracts, nil); err != nil { t.Fatal(err) } @@ -1899,7 +1975,7 @@ func TestUnhealthySlabsNegHealth(t *testing.T) { fcid1 := fcids[0] // add it to the contract set - if err := ss.SetContractSet(context.Background(), testContractSet, fcids); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, fcids, nil); err != nil { t.Fatal(err) } @@ -1958,7 +2034,7 @@ func TestUnhealthySlabsNoContracts(t *testing.T) { fcid1 := fcids[0] // add it to the contract set - if err := ss.SetContractSet(context.Background(), testContractSet, fcids); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, fcids, nil); err != nil { t.Fatal(err) } @@ -2038,7 +2114,7 @@ func TestUnhealthySlabsNoRedundancy(t *testing.T) { // select the first two contracts as good contracts goodContracts := []types.FileContractID{fcid1, fcid2} - if err := ss.SetContractSet(context.Background(), testContractSet, goodContracts); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, goodContracts, nil); err != nil { t.Fatal(err) } @@ -2258,7 +2334,7 @@ func TestUpdateSlab(t *testing.T) { // select contracts h1 and h3 as good contracts (h2 is bad) goodContracts := []types.FileContractID{fcid1, fcid3} - if err := ss.SetContractSet(ctx, testContractSet, goodContracts); err != nil { + if err := ss.UpdateContractSet(ctx, testContractSet, goodContracts, nil); err != nil { t.Fatal(err) } @@ -2333,7 +2409,7 @@ func TestUpdateSlab(t *testing.T) { } // update the slab to change its contract set. - if err := ss.SetContractSet(ctx, "other", nil); err != nil { + if err := ss.UpdateContractSet(ctx, "other", nil, nil); err != nil { t.Fatal(err) } err = ss.UpdateSlab(ctx, slab, "other") @@ -3986,7 +4062,7 @@ func TestSlabHealthInvalidation(t *testing.T) { refreshHealth(s1, s2) // add 2 contracts to the contract set - if err := ss.SetContractSet(context.Background(), testContractSet, fcids[:2]); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, fcids[:2], nil); err != nil { t.Fatal(err) } assertHealthValid(s1, false) @@ -3996,7 +4072,7 @@ func TestSlabHealthInvalidation(t *testing.T) { refreshHealth(s1, s2) // switch out the contract set with two new contracts - if err := ss.SetContractSet(context.Background(), testContractSet, fcids[2:]); err != nil { + if err := ss.UpdateContractSet(context.Background(), testContractSet, fcids[2:], fcids[:2]); err != nil { t.Fatal(err) } assertHealthValid(s1, false) @@ -4083,7 +4159,7 @@ func TestRefreshHealth(t *testing.T) { if err != nil { t.Fatal(err) } - err = ss.SetContractSet(context.Background(), testContractSet, fcids) + err = ss.UpdateContractSet(context.Background(), testContractSet, fcids, nil) if err != nil { t.Fatal(err) } @@ -4128,7 +4204,7 @@ func TestRefreshHealth(t *testing.T) { } // update contract set to not contain the first contract - err = ss.SetContractSet(context.Background(), testContractSet, fcids[1:]) + err = ss.UpdateContractSet(context.Background(), testContractSet, fcids[1:], fcids[:1]) if err != nil { t.Fatal(err) } @@ -4144,7 +4220,7 @@ func TestRefreshHealth(t *testing.T) { // update contract set again to increase health of o1 again and lower health // of o2 - err = ss.SetContractSet(context.Background(), testContractSet, fcids[:6]) + err = ss.UpdateContractSet(context.Background(), testContractSet, fcids[:6], fcids[6:]) if err != nil { t.Fatal(err) } @@ -4182,7 +4258,7 @@ func TestSlabCleanup(t *testing.T) { // create contract set err := ss.db.Transaction(context.Background(), func(tx sql.DatabaseTx) error { - return tx.SetContractSet(context.Background(), testContractSet, nil) + return tx.UpdateContractSet(context.Background(), testContractSet, nil, nil) }) if err != nil { t.Fatal(err) diff --git a/stores/metrics.go b/stores/metrics.go index 62dbde8ce..2c5dd314b 100644 --- a/stores/metrics.go +++ b/stores/metrics.go @@ -40,14 +40,6 @@ func (s *SQLStore) ContractSetMetrics(ctx context.Context, start time.Time, n ui return } -func (s *SQLStore) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) (metrics []api.PerformanceMetric, err error) { - err = s.dbMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) (txErr error) { - metrics, txErr = tx.PerformanceMetrics(ctx, start, n, interval, opts) - return - }) - return -} - func (s *SQLStore) RecordContractMetric(ctx context.Context, metrics ...api.ContractMetric) error { return s.dbMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { return tx.RecordContractMetric(ctx, metrics...) @@ -72,12 +64,6 @@ func (s *SQLStore) RecordContractSetMetric(ctx context.Context, metrics ...api.C }) } -func (s *SQLStore) RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error { - return s.dbMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { - return tx.RecordPerformanceMetric(ctx, metrics...) - }) -} - func (s *SQLStore) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { return s.dbMetrics.Transaction(ctx, func(tx sql.MetricsDatabaseTx) error { return tx.RecordWalletMetric(ctx, metrics...) diff --git a/stores/metrics_test.go b/stores/metrics_test.go index 9a9f7b71b..76852e3ed 100644 --- a/stores/metrics_test.go +++ b/stores/metrics_test.go @@ -431,91 +431,6 @@ func TestNormaliseTimestamp(t *testing.T) { } } -func TestPerformanceMetrics(t *testing.T) { - ss := newTestSQLStore(t, defaultTestSQLStoreConfig) - defer ss.Close() - - // Create metrics to query. - actions := []string{"download", "upload"} - hosts := []types.PublicKey{types.GeneratePrivateKey().PublicKey(), types.GeneratePrivateKey().PublicKey()} - origins := []string{"worker1", "worker2"} - durations := []time.Duration{time.Second, time.Hour} - times := []time.Time{time.UnixMilli(3), time.UnixMilli(1), time.UnixMilli(2)} - var i byte - for _, action := range actions { - for _, host := range hosts { - for _, origin := range origins { - for _, duration := range durations { - for _, recordedTime := range times { - if err := ss.RecordPerformanceMetric(context.Background(), api.PerformanceMetric{ - Action: action, - Timestamp: api.TimeRFC3339(recordedTime), - Duration: duration, - HostKey: host, - Origin: origin, - }); err != nil { - t.Fatal(err) - } - i++ - } - } - } - } - } - - assertMetrics := func(start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts, expected int, cmp func(api.PerformanceMetric)) { - t.Helper() - metrics, err := ss.PerformanceMetrics(context.Background(), start, n, interval, opts) - if err != nil { - t.Fatal(err) - } - if len(metrics) != expected { - t.Fatalf("expected %v metrics, got %v", expected, len(metrics)) - } else if !sort.SliceIsSorted(metrics, func(i, j int) bool { - return time.Time(metrics[i].Timestamp).Before(time.Time(metrics[j].Timestamp)) - }) { - t.Fatal("expected metrics to be sorted by time") - } - for _, m := range metrics { - cmp(m) - } - } - - // Query without any filters. - start := time.UnixMilli(1) - assertMetrics(start, 3, time.Millisecond, api.PerformanceMetricsQueryOpts{}, 3, func(m api.PerformanceMetric) {}) - - // Filter by actions. - assertMetrics(start, 3, time.Millisecond, api.PerformanceMetricsQueryOpts{Action: actions[0]}, 3, func(m api.PerformanceMetric) { - if m.Action != actions[0] { - t.Fatalf("expected action to be %v, got %v", actions[0], m.Action) - } - }) - - // Filter by hosts. - assertMetrics(start, 3, time.Millisecond, api.PerformanceMetricsQueryOpts{HostKey: hosts[0]}, 3, func(m api.PerformanceMetric) { - if m.HostKey != hosts[0] { - t.Fatalf("expected hosts to be %v, got %v", hosts[0], m.HostKey) - } - }) - - // Filter by reporters. - assertMetrics(start, 3, time.Millisecond, api.PerformanceMetricsQueryOpts{Origin: origins[0]}, 3, func(m api.PerformanceMetric) { - if m.Origin != origins[0] { - t.Fatalf("expected origin to be %v, got %v", origins[0], m.Origin) - } - }) - - // Prune metrics - if err := ss.PruneMetrics(context.Background(), api.MetricPerformance, time.UnixMilli(3)); err != nil { - t.Fatal(err) - } else if metrics, err := ss.PerformanceMetrics(context.Background(), time.UnixMilli(1), 3, time.Millisecond, api.PerformanceMetricsQueryOpts{}); err != nil { - t.Fatal(err) - } else if len(metrics) != 1 { - t.Fatalf("expected 1 metric, got %v", len(metrics)) - } -} - func TestWalletMetrics(t *testing.T) { ss := newTestSQLStore(t, defaultTestSQLStoreConfig) defer ss.Close() diff --git a/stores/sql/database.go b/stores/sql/database.go index cc2aab0df..79fe789e9 100644 --- a/stores/sql/database.go +++ b/stores/sql/database.go @@ -52,7 +52,7 @@ type ( AbortMultipartUpload(ctx context.Context, bucket, key string, uploadID string) error // Accounts returns all accounts from the db. - Accounts(ctx context.Context) ([]api.Account, error) + Accounts(ctx context.Context, owner string) ([]api.Account, error) // AddMultipartPart adds a part to an unfinished multipart upload. AddMultipartPart(ctx context.Context, bucket, key, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error @@ -243,6 +243,10 @@ type ( // ProcessChainUpdate applies the given chain update to the database. ProcessChainUpdate(ctx context.Context, applyFn func(ChainUpdateTx) error) error + // PrunableContractRoots returns the indices of roots that are not in + // the contract. + PrunableContractRoots(ctx context.Context, fcid types.FileContractID, roots []types.Hash256) (indices []uint64, err error) + // PruneEmptydirs prunes any directories that are empty. PruneEmptydirs(ctx context.Context) error @@ -307,7 +311,7 @@ type ( ResetLostSectors(ctx context.Context, hk types.PublicKey) error // SaveAccounts saves the given accounts in the db, overwriting any - // existing ones and setting the clean shutdown flag. + // existing ones. SaveAccounts(ctx context.Context, accounts []api.Account) error // SearchHosts returns a list of hosts that match the provided filters @@ -317,13 +321,10 @@ type ( // substring. SearchObjects(ctx context.Context, bucket, substring string, offset, limit int) ([]api.ObjectMetadata, error) - // SetUncleanShutdown sets the clean shutdown flag on the accounts to - // 'false' and also marks them as requiring a resync. - SetUncleanShutdown(ctx context.Context) error - - // SetContractSet creates the contract set with the given name and - // associates it with the provided contract IDs. - SetContractSet(ctx context.Context, name string, contractIds []types.FileContractID) error + // UpdateContractSet adds/removes the provided contract ids to/from + // the contract set. The contract set is created in the process if + // it doesn't exist already. + UpdateContractSet(ctx context.Context, name string, toAdd, toRemove []types.FileContractID) error // Setting returns the setting with the given key from the database. Setting(ctx context.Context, key string) (string, error) @@ -426,9 +427,6 @@ type ( // time range and options. ContractSetMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.ContractSetMetricsQueryOpts) ([]api.ContractSetMetric, error) - // PerformanceMetrics returns performance metrics for the given time range - PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) - // PruneMetrics deletes metrics of a certain type older than the given // cutoff time. PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error @@ -445,9 +443,6 @@ type ( // RecordContractSetMetric records contract set metrics. RecordContractSetMetric(ctx context.Context, metrics ...api.ContractSetMetric) error - // RecordPerformanceMetric records performance metrics. - RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error - // RecordWalletMetric records wallet metrics. RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error diff --git a/stores/sql/main.go b/stores/sql/main.go index bb03bd86d..557a5f666 100644 --- a/stores/sql/main.go +++ b/stores/sql/main.go @@ -97,8 +97,15 @@ func AbortMultipartUpload(ctx context.Context, tx sql.Tx, bucket, key string, up return errors.New("failed to delete multipart upload for unknown reason") } -func Accounts(ctx context.Context, tx sql.Tx) ([]api.Account, error) { - rows, err := tx.Query(ctx, "SELECT account_id, clean_shutdown, host, balance, drift, requires_sync FROM ephemeral_accounts") +func Accounts(ctx context.Context, tx sql.Tx, owner string) ([]api.Account, error) { + var whereExpr string + var args []any + if owner != "" { + whereExpr = "WHERE owner = ?" + args = append(args, owner) + } + rows, err := tx.Query(ctx, fmt.Sprintf("SELECT account_id, clean_shutdown, host, balance, drift, requires_sync, owner FROM ephemeral_accounts %s", whereExpr), + args...) if err != nil { return nil, fmt.Errorf("failed to fetch accounts: %w", err) } @@ -107,7 +114,7 @@ func Accounts(ctx context.Context, tx sql.Tx) ([]api.Account, error) { var accounts []api.Account for rows.Next() { a := api.Account{Balance: new(big.Int), Drift: new(big.Int)} // init big.Int - if err := rows.Scan((*PublicKey)(&a.ID), &a.CleanShutdown, (*PublicKey)(&a.HostKey), (*BigInt)(a.Balance), (*BigInt)(a.Drift), &a.RequiresSync); err != nil { + if err := rows.Scan((*PublicKey)(&a.ID), &a.CleanShutdown, (*PublicKey)(&a.HostKey), (*BigInt)(a.Balance), (*BigInt)(a.Drift), &a.RequiresSync, &a.Owner); err != nil { return nil, fmt.Errorf("failed to scan account: %w", err) } accounts = append(accounts, a) @@ -117,7 +124,7 @@ func Accounts(ctx context.Context, tx sql.Tx) ([]api.Account, error) { func AncestorContracts(ctx context.Context, tx sql.Tx, fcid types.FileContractID, startHeight uint64) ([]api.ArchivedContract, error) { rows, err := tx.Query(ctx, ` - WITH RECURSIVE ancestors AS + WITH RECURSIVE ancestors AS ( SELECT * FROM archived_contracts @@ -663,7 +670,7 @@ func HostsForScanning(ctx context.Context, tx sql.Tx, maxLastScan time.Time, off } rows, err := tx.Query(ctx, "SELECT public_key, net_address FROM hosts WHERE last_scan < ? LIMIT ? OFFSET ?", - maxLastScan.UnixNano(), limit, offset) + UnixTimeMS(maxLastScan), limit, offset) if err != nil { return nil, fmt.Errorf("failed to fetch hosts for scanning: %w", err) } @@ -715,7 +722,7 @@ func InsertContract(ctx context.Context, tx sql.Tx, rev rhpv2.ContractRevision, res, err := tx.Exec(ctx, ` INSERT INTO contracts (created_at, host_id, fcid, renewed_from, contract_price, state, total_cost, proof_height, - revision_height, revision_number, size, start_height, window_start, window_end, upload_spending, download_spending, + revision_height, revision_number, size, start_height, window_start, window_end, upload_spending, download_spending, fund_account_spending, delete_spending, list_spending) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `, time.Now(), hostID, FileContractID(rev.ID()), FileContractID(renewedFrom), Currency(contractPrice), @@ -1744,7 +1751,7 @@ func RecordHostScans(ctx context.Context, tx sql.Tx, scans []api.HostScan) error now := time.Now() for _, scan := range scans { - scanTime := scan.Timestamp.UnixNano() + scanTime := scan.Timestamp.UnixMilli() _, err = stmt.Exec(ctx, scan.Success, // scanned scan.Success, // last_scan_success @@ -1820,7 +1827,7 @@ func RemoveOfflineHosts(ctx context.Context, tx sql.Tx, minRecentFailures uint64 FROM contracts INNER JOIN hosts h ON h.id = contracts.host_id WHERE recent_downtime >= ? AND recent_scan_failures >= ? - `, maxDownTime, minRecentFailures) + `, DurationMS(maxDownTime), minRecentFailures) if err != nil { return 0, fmt.Errorf("failed to fetch contracts: %w", err) } @@ -1844,7 +1851,7 @@ func RemoveOfflineHosts(ctx context.Context, tx sql.Tx, minRecentFailures uint64 // delete hosts res, err := tx.Exec(ctx, "DELETE FROM hosts WHERE recent_downtime >= ? AND recent_scan_failures >= ?", - maxDownTime, minRecentFailures) + DurationMS(maxDownTime), minRecentFailures) if err != nil { return 0, fmt.Errorf("failed to delete hosts: %w", err) } @@ -1902,7 +1909,7 @@ func QueryContracts(ctx context.Context, tx sql.Tx, whereExprs []string, whereAr SELECT c.fcid, c.renewed_from, c.contract_price, c.state, c.total_cost, c.proof_height, c.revision_height, c.revision_number, c.size, c.start_height, c.window_start, c.window_end, c.upload_spending, c.download_spending, c.fund_account_spending, c.delete_spending, c.list_spending, - COALESCE(cs.name, ""), h.net_address, h.public_key, h.settings->>'$.siamuxport' AS siamux_port + COALESCE(cs.name, ""), h.net_address, h.public_key, COALESCE(h.settings->>'$.siamuxport', "") AS siamux_port FROM contracts AS c INNER JOIN hosts h ON h.id = c.host_id LEFT JOIN contract_set_contracts csc ON csc.db_contract_id = c.id @@ -2127,8 +2134,8 @@ func SearchHosts(ctx context.Context, tx sql.Tx, autopilot, filterMode, usabilit var resolvedAddresses string err := rows.Scan(&hostID, &h.KnownSince, &h.LastAnnouncement, (*PublicKey)(&h.PublicKey), &h.NetAddress, (*PriceTable)(&h.PriceTable.HostPriceTable), &pte, - (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeNS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, - &h.Interactions.SecondToLastScanSuccess, &h.Interactions.Uptime, &h.Interactions.Downtime, + (*HostSettings)(&h.Settings), &h.Interactions.TotalScans, (*UnixTimeMS)(&h.Interactions.LastScan), &h.Interactions.LastScanSuccess, + &h.Interactions.SecondToLastScanSuccess, (*DurationMS)(&h.Interactions.Uptime), (*DurationMS)(&h.Interactions.Downtime), &h.Interactions.SuccessfulInteractions, &h.Interactions.FailedInteractions, &h.Interactions.LostSectors, &h.Scanned, &resolvedAddresses, &h.Blocked, ) @@ -2229,20 +2236,12 @@ func Settings(ctx context.Context, tx sql.Tx) ([]string, error) { return settings, nil } -func SetUncleanShutdown(ctx context.Context, tx sql.Tx) error { - _, err := tx.Exec(ctx, "UPDATE ephemeral_accounts SET clean_shutdown = 0, requires_sync = 1") - if err != nil { - return fmt.Errorf("failed to set unclean shutdown: %w", err) - } - return err -} - func Slab(ctx context.Context, tx sql.Tx, key object.EncryptionKey) (object.Slab, error) { // fetch slab var slabID int64 slab := object.Slab{Key: key} err := tx.QueryRow(ctx, ` - SELECT id, health, min_shards + SELECT id, health, min_shards FROM slabs sla WHERE sla.key = ? `, EncryptionKey(key)).Scan(&slabID, &slab.Health, &slab.MinShards) @@ -2548,7 +2547,7 @@ func scanWalletEvent(s Scanner) (wallet.Event, error) { var inflow, outflow Currency var edata []byte var etype string - var ts UnixTimeNS + var ts UnixTimeMS if err := s.Scan( &eventID, &blockID, diff --git a/stores/sql/metrics.go b/stores/sql/metrics.go index 6f6e5420f..966b091a4 100644 --- a/stores/sql/metrics.go +++ b/stores/sql/metrics.go @@ -89,7 +89,7 @@ func ContractPruneMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uin &m.HostVersion, (*Unsigned64)(&m.Pruned), (*Unsigned64)(&m.Remaining), - &m.Duration, + (*DurationMS)(&m.Duration), ) if err != nil { err = fmt.Errorf("failed to scan contract prune metric: %w", err) @@ -144,29 +144,6 @@ func ContractSetMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint6 }) } -func PerformanceMetrics(ctx context.Context, tx sql.Tx, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { - return queryPeriods(ctx, tx, start, n, interval, opts, func(rows *sql.LoggedRows) (m api.PerformanceMetric, err error) { - var placeHolder int64 - var placeHolderTime time.Time - var timestamp UnixTimeMS - err = rows.Scan( - &placeHolder, - &placeHolderTime, - ×tamp, - &m.Action, - (*PublicKey)(&m.HostKey), - &m.Origin, - &m.Duration, - ) - if err != nil { - err = fmt.Errorf("failed to scan contract set metric: %w", err) - return - } - m.Timestamp = api.TimeRFC3339(normaliseTimestamp(start, interval, timestamp)) - return - }) -} - func PruneMetrics(ctx context.Context, tx sql.Tx, metric string, cutoff time.Time) error { if metric == "" { return errors.New("metric must be set") @@ -269,7 +246,7 @@ func RecordContractPruneMetric(ctx context.Context, tx sql.Tx, metrics ...api.Co metric.HostVersion, Unsigned64(metric.Pruned), Unsigned64(metric.Remaining), - metric.Duration, + (DurationMS)(metric.Duration), ) if err != nil { return fmt.Errorf("failed to insert contract prune metric: %w", err) @@ -337,34 +314,6 @@ func RecordContractSetMetric(ctx context.Context, tx sql.Tx, metrics ...api.Cont return nil } -func RecordPerformanceMetric(ctx context.Context, tx sql.Tx, metrics ...api.PerformanceMetric) error { - insertStmt, err := tx.Prepare(ctx, "INSERT INTO performance (created_at, timestamp, action, host, origin, duration) VALUES (?, ?, ?, ?, ?, ?)") - if err != nil { - return fmt.Errorf("failed to prepare statement to insert performance metric: %w", err) - } - defer insertStmt.Close() - - for _, metric := range metrics { - res, err := insertStmt.Exec(ctx, - time.Now().UTC(), - UnixTimeMS(metric.Timestamp), - metric.Action, - PublicKey(metric.HostKey), - metric.Origin, - metric.Duration, - ) - if err != nil { - return fmt.Errorf("failed to insert performance metric: %w", err) - } else if n, err := res.RowsAffected(); err != nil { - return fmt.Errorf("failed to get rows affected: %w", err) - } else if n == 0 { - return fmt.Errorf("failed to insert performance metric: no rows affected") - } - } - - return nil -} - func RecordWalletMetric(ctx context.Context, tx sql.Tx, metrics ...api.WalletMetric) error { insertStmt, err := tx.Prepare(ctx, "INSERT INTO wallets (created_at, timestamp, confirmed_lo, confirmed_hi, spendable_lo, spendable_hi, unconfirmed_lo, unconfirmed_hi, immature_hi, immature_lo) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") if err != nil { diff --git a/stores/sql/mysql/chain.go b/stores/sql/mysql/chain.go index 56d5ba340..4e5720c9e 100644 --- a/stores/sql/mysql/chain.go +++ b/stores/sql/mysql/chain.go @@ -102,7 +102,7 @@ func (c chainUpdateTx) WalletApplyIndex(index types.ChainIndex, created, spent [ e.Type, data, e.MaturityHeight, - ssql.UnixTimeNS(e.Timestamp), + ssql.UnixTimeMS(e.Timestamp), ); err != nil { return fmt.Errorf("failed to insert new event: %w", err) } diff --git a/stores/sql/mysql/main.go b/stores/sql/mysql/main.go index 08ff0010e..4d2b730c8 100644 --- a/stores/sql/mysql/main.go +++ b/stores/sql/mysql/main.go @@ -26,6 +26,10 @@ import ( "go.uber.org/zap" ) +const ( + batchSizeInsertSectors = 500 +) + type ( MainDatabase struct { db *sql.DB @@ -95,8 +99,8 @@ func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path return ssql.AbortMultipartUpload(ctx, tx, bucket, path, uploadID) } -func (tx *MainDatabaseTx) Accounts(ctx context.Context) ([]api.Account, error) { - return ssql.Accounts(ctx, tx) +func (tx *MainDatabaseTx) Accounts(ctx context.Context, owner string) ([]api.Account, error) { + return ssql.Accounts(ctx, tx, owner) } func (tx *MainDatabaseTx) AddMultipartPart(ctx context.Context, bucket, path, contractSet, eTag, uploadID string, partNumber int, slices object.SlabSlices) error { @@ -566,6 +570,62 @@ func (tx *MainDatabaseTx) ProcessChainUpdate(ctx context.Context, fn func(ssql.C }) } +func (tx *MainDatabaseTx) PrunableContractRoots(ctx context.Context, fcid types.FileContractID, roots []types.Hash256) (indices []uint64, err error) { + // build tmp table name + tmpTable := strings.ReplaceAll(fmt.Sprintf("tmp_host_roots_%s", fcid.String()[:8]), ":", "_") + + // create temporary table + _, err = tx.Exec(ctx, fmt.Sprintf(` +DROP TABLE IF EXISTS %s; +CREATE TEMPORARY TABLE %s (idx INT, root varbinary(32)) ENGINE=MEMORY; +CREATE INDEX %s_idx ON %s (root(32));`, tmpTable, tmpTable, tmpTable, tmpTable)) + if err != nil { + return nil, fmt.Errorf("failed to create temporary table: %w", err) + } + + // defer removal + defer func() { + if _, err := tx.Exec(ctx, fmt.Sprintf(`DROP TABLE %s;`, tmpTable)); err != nil { + tx.log.Warnw("failed to drop temporary table", zap.Error(err)) + } + }() + + // insert roots in batches + for i := 0; i < len(roots); i += batchSizeInsertSectors { + end := i + batchSizeInsertSectors + if end > len(roots) { + end = len(roots) + } + + var params []interface{} + for i, r := range roots[i:end] { + params = append(params, uint64(i), ssql.Hash256(r)) + } + + _, err = tx.Exec(ctx, fmt.Sprintf(`INSERT INTO %s (idx, root) VALUES %s`, tmpTable, strings.TrimSuffix(strings.Repeat("(?, ?), ", end-i), ", ")), params...) + if err != nil { + return nil, fmt.Errorf("failed to insert into roots into temporary table: %w", err) + } + } + + // execute query + rows, err := tx.Query(ctx, fmt.Sprintf(`SELECT idx FROM %s tmp LEFT JOIN sectors s ON s.root = tmp.root WHERE s.root IS NULL`, tmpTable)) + if err != nil { + return nil, fmt.Errorf("failed to fetch contract roots: %w", err) + } + defer rows.Close() + + // fetch indices + for rows.Next() { + var idx uint64 + if err := rows.Scan(&idx); err != nil { + return nil, fmt.Errorf("failed to scan root index: %w", err) + } + indices = append(indices, idx) + } + return +} + func (tx *MainDatabaseTx) PruneEmptydirs(ctx context.Context) error { stmt, err := tx.Prepare(ctx, ` DELETE @@ -719,11 +779,11 @@ func (tx *MainDatabaseTx) ResetLostSectors(ctx context.Context, hk types.PublicK func (tx MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Account) error { // clean_shutdown = 1 after save stmt, err := tx.Prepare(ctx, ` - INSERT INTO ephemeral_accounts (created_at, account_id, clean_shutdown, host, balance, drift, requires_sync) - VAlUES (?, ?, 1, ?, ?, ?, ?) + INSERT INTO ephemeral_accounts (created_at, account_id, clean_shutdown, host, balance, drift, requires_sync, owner) + VAlUES (?, ?, ?, ?, ?, ?, ?, ?) ON DUPLICATE KEY UPDATE account_id = VALUES(account_id), - clean_shutdown = 1, + clean_shutdown = VALUES(clean_shutdown), host = VALUES(host), balance = VALUES(balance), drift = VALUES(drift), @@ -735,7 +795,7 @@ func (tx MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Accoun defer stmt.Close() for _, acc := range accounts { - res, err := stmt.Exec(ctx, time.Now(), (ssql.PublicKey)(acc.ID), (ssql.PublicKey)(acc.HostKey), (*ssql.BigInt)(acc.Balance), (*ssql.BigInt)(acc.Drift), acc.RequiresSync) + res, err := stmt.Exec(ctx, time.Now(), (ssql.PublicKey)(acc.ID), acc.CleanShutdown, (ssql.PublicKey)(acc.HostKey), (*ssql.BigInt)(acc.Balance), (*ssql.BigInt)(acc.Drift), acc.RequiresSync, acc.Owner) if err != nil { return fmt.Errorf("failed to insert account %v: %w", acc.ID, err) } else if n, err := res.RowsAffected(); err != nil { @@ -768,57 +828,6 @@ func (tx *MainDatabaseTx) SelectObjectMetadataExpr() string { return "o.object_id, o.size, o.health, o.mime_type, o.created_at, o.etag" } -func (tx *MainDatabaseTx) SetContractSet(ctx context.Context, name string, contractIds []types.FileContractID) error { - res, err := tx.Exec(ctx, "INSERT INTO contract_sets (name) VALUES (?) ON DUPLICATE KEY UPDATE id = last_insert_id(id)", name) - if err != nil { - return fmt.Errorf("failed to insert contract set: %w", err) - } - - csID, err := res.LastInsertId() - if err != nil { - return fmt.Errorf("failed to fetch contract set id: %w", err) - } - - // handle empty set - if len(contractIds) == 0 { - _, err := tx.Exec(ctx, "DELETE FROM contract_set_contracts WHERE db_contract_set_id = ?", csID) - return err - } - - // prepare fcid args and query - fcidQuery := strings.Repeat("?, ", len(contractIds)-1) + "?" - fcidArgs := make([]interface{}, len(contractIds)) - for i, fcid := range contractIds { - fcidArgs[i] = ssql.FileContractID(fcid) - } - - // remove unwanted contracts - _, err = tx.Exec(ctx, fmt.Sprintf(` - DELETE csc - FROM contract_set_contracts csc - INNER JOIN contracts c ON c.id = csc.db_contract_id - WHERE c.fcid NOT IN (%s) - `, fcidQuery), fcidArgs...) - if err != nil { - return fmt.Errorf("failed to delete contract set contracts: %w", err) - } - - // add missing contracts - args := []interface{}{csID} - args = append(args, fcidArgs...) - _, err = tx.Exec(ctx, fmt.Sprintf(` - INSERT INTO contract_set_contracts (db_contract_set_id, db_contract_id) - SELECT ?, c.id - FROM contracts c - WHERE c.fcid IN (%s) - ON DUPLICATE KEY UPDATE db_contract_set_id = VALUES(db_contract_set_id) - `, fcidQuery), args...) - if err != nil { - return fmt.Errorf("failed to add contract set contracts: %w", err) - } - return nil -} - func (tx *MainDatabaseTx) Setting(ctx context.Context, key string) (string, error) { return ssql.Setting(ctx, tx, key) } @@ -827,10 +836,6 @@ func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { return ssql.Settings(ctx, tx) } -func (tx *MainDatabaseTx) SetUncleanShutdown(ctx context.Context) error { - return ssql.SetUncleanShutdown(ctx, tx) -} - func (tx *MainDatabaseTx) Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) { return ssql.Slab(ctx, tx, key) } @@ -866,6 +871,62 @@ func (tx *MainDatabaseTx) UpdateBucketPolicy(ctx context.Context, bucket string, return ssql.UpdateBucketPolicy(ctx, tx, bucket, bp) } +func (tx *MainDatabaseTx) UpdateContractSet(ctx context.Context, name string, toAdd, toRemove []types.FileContractID) error { + res, err := tx.Exec(ctx, "INSERT INTO contract_sets (name) VALUES (?) ON DUPLICATE KEY UPDATE id = last_insert_id(id)", name) + if err != nil { + return fmt.Errorf("failed to insert contract set: %w", err) + } + + // if no changes are needed, return after creating the set + if len(toAdd)+len(toRemove) == 0 { + return nil + } + + csID, err := res.LastInsertId() + if err != nil { + return fmt.Errorf("failed to fetch contract set id: %w", err) + } + + prepareQuery := func(fcids []types.FileContractID) (string, []any) { + args := []any{csID} + query := strings.Repeat("?, ", len(fcids)-1) + "?" + for _, fcid := range fcids { + args = append(args, ssql.FileContractID(fcid)) + } + return query, args + } + + // remove unwanted contracts first + if len(toRemove) > 0 { + query, args := prepareQuery(toRemove) + _, err = tx.Exec(ctx, fmt.Sprintf(` + DELETE csc + FROM contract_set_contracts csc + INNER JOIN contracts c ON c.id = csc.db_contract_id + WHERE csc.db_contract_set_id = ? AND c.fcid IN (%s) + `, query), args...) + if err != nil { + return fmt.Errorf("failed to remove contracts: %w", err) + } + } + + // add new contracts + if len(toAdd) > 0 { + query, args := prepareQuery(toAdd) + _, err = tx.Exec(ctx, fmt.Sprintf(` + INSERT INTO contract_set_contracts (db_contract_set_id, db_contract_id) + SELECT ?, c.id + FROM contracts c + WHERE c.fcid IN (%s) + ON DUPLICATE KEY UPDATE db_contract_set_id = VALUES(db_contract_set_id) + `, query), args...) + if err != nil { + return fmt.Errorf("failed to add contract set contracts: %w", err) + } + } + return nil +} + func (tx *MainDatabaseTx) UpdateHostAllowlistEntries(ctx context.Context, add, remove []types.PublicKey, clear bool) error { if clear { if _, err := tx.Exec(ctx, "DELETE FROM host_allowlist_entries"); err != nil { diff --git a/stores/sql/mysql/metrics.go b/stores/sql/mysql/metrics.go index e7ef23813..8a64fa155 100644 --- a/stores/sql/mysql/metrics.go +++ b/stores/sql/mysql/metrics.go @@ -89,10 +89,6 @@ func (tx *MetricsDatabaseTx) ContractSetMetrics(ctx context.Context, start time. return ssql.ContractSetMetrics(ctx, tx, start, n, interval, opts) } -func (tx *MetricsDatabaseTx) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { - return ssql.PerformanceMetrics(ctx, tx, start, n, interval, opts) -} - func (tx *MetricsDatabaseTx) PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error { return ssql.PruneMetrics(ctx, tx, metric, cutoff) } @@ -113,10 +109,6 @@ func (tx *MetricsDatabaseTx) RecordContractSetMetric(ctx context.Context, metric return ssql.RecordContractSetMetric(ctx, tx, metrics...) } -func (tx *MetricsDatabaseTx) RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error { - return ssql.RecordPerformanceMetric(ctx, tx, metrics...) -} - func (tx *MetricsDatabaseTx) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { return ssql.RecordWalletMetric(ctx, tx, metrics...) } diff --git a/stores/sql/mysql/migrations/main/migration_00016_account_owner.sql b/stores/sql/mysql/migrations/main/migration_00016_account_owner.sql new file mode 100644 index 000000000..8f188ae7a --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00016_account_owner.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS ephemeral_accounts; + +CREATE TABLE `ephemeral_accounts` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT, + `created_at` datetime(3) DEFAULT NULL, + `account_id` varbinary(32) NOT NULL, + `clean_shutdown` tinyint(1) DEFAULT '0', + `host` longblob NOT NULL, + `balance` longtext, + `drift` longtext, + `requires_sync` tinyint(1) DEFAULT NULL, + `owner` varchar(128) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE KEY `account_id` (`account_id`), + KEY `idx_ephemeral_accounts_requires_sync` (`requires_sync`), + KEY `idx_ephemeral_accounts_owner` (`owner`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; \ No newline at end of file diff --git a/stores/sql/mysql/migrations/main/migration_00017_unix_ms.sql b/stores/sql/mysql/migrations/main/migration_00017_unix_ms.sql new file mode 100644 index 000000000..dee60ba92 --- /dev/null +++ b/stores/sql/mysql/migrations/main/migration_00017_unix_ms.sql @@ -0,0 +1,4 @@ +UPDATE hosts SET hosts.last_scan = CAST(hosts.last_scan / 1000000 AS SIGNED); +UPDATE hosts SET hosts.uptime = CAST(hosts.uptime / 1000000 AS SIGNED); +UPDATE hosts SET hosts.downtime = CAST(hosts.downtime / 1000000 AS SIGNED); +UPDATE wallet_events SET wallet_events.timestamp = CAST(wallet_events.timestamp / 1000000 AS SIGNED); diff --git a/stores/sql/mysql/migrations/main/schema.sql b/stores/sql/mysql/migrations/main/schema.sql index 51a5c5629..96008942a 100644 --- a/stores/sql/mysql/migrations/main/schema.sql +++ b/stores/sql/mysql/migrations/main/schema.sql @@ -225,9 +225,11 @@ CREATE TABLE `ephemeral_accounts` ( `balance` longtext, `drift` longtext, `requires_sync` tinyint(1) DEFAULT NULL, + `owner` varchar(128) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `account_id` (`account_id`), - KEY `idx_ephemeral_accounts_requires_sync` (`requires_sync`) + KEY `idx_ephemeral_accounts_requires_sync` (`requires_sync`), + KEY `idx_ephemeral_accounts_owner` (`owner`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; -- dbAllowlistEntry diff --git a/stores/sql/mysql/migrations/metrics/migration_00003_unix_ms.sql b/stores/sql/mysql/migrations/metrics/migration_00003_unix_ms.sql new file mode 100644 index 000000000..0960761d3 --- /dev/null +++ b/stores/sql/mysql/migrations/metrics/migration_00003_unix_ms.sql @@ -0,0 +1,2 @@ +UPDATE contract_prunes SET contract_prunes.timestamp = CAST(contract_prunes.timestamp / 1000000 AS SIGNED); +DROP TABLE IF EXISTS performance; diff --git a/stores/sql/mysql/migrations/metrics/schema.sql b/stores/sql/mysql/migrations/metrics/schema.sql index 7c4c27d6c..8dcb97769 100644 --- a/stores/sql/mysql/migrations/metrics/schema.sql +++ b/stores/sql/mysql/migrations/metrics/schema.sql @@ -86,23 +86,6 @@ CREATE TABLE `contracts` ( KEY `idx_contracts_fcid_timestamp` (`fcid`,`timestamp`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; --- dbPerformanceMetric -CREATE TABLE `performance` ( - `id` bigint unsigned NOT NULL AUTO_INCREMENT, - `created_at` datetime(3) DEFAULT NULL, - `timestamp` bigint NOT NULL, - `action` varchar(191) NOT NULL, - `host` varbinary(32) NOT NULL, - `origin` varchar(191) NOT NULL, - `duration` bigint NOT NULL, - PRIMARY KEY (`id`), - KEY `idx_performance_host` (`host`), - KEY `idx_performance_origin` (`origin`), - KEY `idx_performance_duration` (`duration`), - KEY `idx_performance_timestamp` (`timestamp`), - KEY `idx_performance_action` (`action`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; - -- dbWalletMetric CREATE TABLE `wallets` ( `id` bigint unsigned NOT NULL AUTO_INCREMENT, @@ -122,4 +105,4 @@ CREATE TABLE `wallets` ( KEY `idx_spendable` (`spendable_lo`,`spendable_hi`), KEY `idx_unconfirmed` (`unconfirmed_lo`,`unconfirmed_hi`), KEY `idx_wallets_immature` (`immature_lo`,`immature_hi`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; \ No newline at end of file +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci; diff --git a/stores/sql/sqlite/chain.go b/stores/sql/sqlite/chain.go index 74a35d7b8..0ec937b73 100644 --- a/stores/sql/sqlite/chain.go +++ b/stores/sql/sqlite/chain.go @@ -105,7 +105,7 @@ func (c chainUpdateTx) WalletApplyIndex(index types.ChainIndex, created, spent [ e.Type, data, e.MaturityHeight, - ssql.UnixTimeNS(e.Timestamp), + ssql.UnixTimeMS(e.Timestamp), ); err != nil { return fmt.Errorf("failed to insert new event: %w", err) } diff --git a/stores/sql/sqlite/main.go b/stores/sql/sqlite/main.go index b72ec5e8c..d70d9b438 100644 --- a/stores/sql/sqlite/main.go +++ b/stores/sql/sqlite/main.go @@ -25,6 +25,10 @@ import ( "go.uber.org/zap" ) +const ( + batchSizeInsertSectors = 500 +) + type ( MainDatabase struct { db *sql.DB @@ -90,8 +94,8 @@ func (b *MainDatabase) wrapTxn(tx sql.Tx) *MainDatabaseTx { return &MainDatabaseTx{tx, b.log.Named(hex.EncodeToString(frand.Bytes(16)))} } -func (tx *MainDatabaseTx) Accounts(ctx context.Context) ([]api.Account, error) { - return ssql.Accounts(ctx, tx) +func (tx *MainDatabaseTx) Accounts(ctx context.Context, owner string) ([]api.Account, error) { + return ssql.Accounts(ctx, tx, owner) } func (tx *MainDatabaseTx) AbortMultipartUpload(ctx context.Context, bucket, path string, uploadID string) error { @@ -447,7 +451,6 @@ func (tx *MainDatabaseTx) InvalidateSlabHealthByFCID(ctx context.Context, fcids ) `, strings.Repeat("?, ", len(fcids)-1)+"?"), args...) if err != nil { - fmt.Println(strings.Repeat("?, ", len(fcids)-1) + "?") return 0, err } return res.RowsAffected() @@ -563,6 +566,76 @@ func (tx *MainDatabaseTx) ProcessChainUpdate(ctx context.Context, fn func(ssql.C }) } +func (tx *MainDatabaseTx) PrunableContractRoots(ctx context.Context, fcid types.FileContractID, roots []types.Hash256) (indices []uint64, err error) { + // build tmp table name + tmpTable := strings.ReplaceAll(fmt.Sprintf("tmp_host_roots_%s", fcid.String()[:8]), ":", "_") + + // create temporary table + _, err = tx.Exec(ctx, fmt.Sprintf(` +DROP TABLE IF EXISTS %s; +CREATE TEMPORARY TABLE %s (idx INT, root blob); +CREATE INDEX %s_idx ON %s (root);`, tmpTable, tmpTable, tmpTable, tmpTable)) + if err != nil { + return nil, fmt.Errorf("failed to create temporary table: %w", err) + } + + // defer removal + defer func() { + if _, err := tx.Exec(ctx, fmt.Sprintf(`DROP TABLE %s;`, tmpTable)); err != nil { + tx.log.Warnw("failed to drop temporary table", zap.Error(err)) + } + }() + + // prepare insert statement + insertStmt, err := tx.Prepare(ctx, fmt.Sprintf(`INSERT INTO %s (idx, root) VALUES %s`, tmpTable, strings.TrimSuffix(strings.Repeat("(?, ?), ", batchSizeInsertSectors), ", "))) + if err != nil { + return nil, fmt.Errorf("failed to prepare statement to insert contract roots: %w", err) + } + defer insertStmt.Close() + + // insert roots in batches + for i := 0; i < len(roots); i += batchSizeInsertSectors { + end := i + batchSizeInsertSectors + if end > len(roots) { + end = len(roots) + } + + var params []interface{} + for i, r := range roots[i:end] { + params = append(params, uint64(i), ssql.Hash256(r)) + } + + if len(params) == batchSizeInsertSectors { + _, err := insertStmt.Exec(ctx, params...) + if err != nil { + return nil, fmt.Errorf("failed to insert into roots into temporary table: %w", err) + } + } else { + _, err = tx.Exec(ctx, fmt.Sprintf(`INSERT INTO %s (idx, root) VALUES %s`, tmpTable, strings.TrimSuffix(strings.Repeat("(?, ?), ", end-i), ", ")), params...) + if err != nil { + return nil, fmt.Errorf("failed to insert into roots into temporary table: %w", err) + } + } + } + + // execute query + rows, err := tx.Query(ctx, fmt.Sprintf(`SELECT idx FROM %s tmp LEFT JOIN sectors s ON s.root = tmp.root WHERE s.root IS NULL`, tmpTable)) + if err != nil { + return nil, fmt.Errorf("failed to fetch contract roots: %w", err) + } + defer rows.Close() + + // fetch indices + for rows.Next() { + var idx uint64 + if err := rows.Scan(&idx); err != nil { + return nil, fmt.Errorf("failed to scan root index: %w", err) + } + indices = append(indices, idx) + } + return +} + func (tx *MainDatabaseTx) PruneEmptydirs(ctx context.Context) error { stmt, err := tx.Prepare(ctx, ` DELETE @@ -717,11 +790,11 @@ func (tx *MainDatabaseTx) ResetLostSectors(ctx context.Context, hk types.PublicK func (tx *MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Account) error { // clean_shutdown = 1 after save stmt, err := tx.Prepare(ctx, ` - INSERT INTO ephemeral_accounts (created_at, account_id, clean_shutdown, host, balance, drift, requires_sync) - VAlUES (?, ?, 1, ?, ?, ?, ?) + INSERT INTO ephemeral_accounts (created_at, account_id, clean_shutdown, host, balance, drift, requires_sync, owner) + VAlUES (?, ?, ?, ?, ?, ?, ?, ?) ON CONFLICT(account_id) DO UPDATE SET account_id = EXCLUDED.account_id, - clean_shutdown = 1, + clean_shutdown = EXCLUDED.clean_shutdown, host = EXCLUDED.host, balance = EXCLUDED.balance, drift = EXCLUDED.drift, @@ -733,7 +806,7 @@ func (tx *MainDatabaseTx) SaveAccounts(ctx context.Context, accounts []api.Accou defer stmt.Close() for _, acc := range accounts { - res, err := stmt.Exec(ctx, time.Now(), (ssql.PublicKey)(acc.ID), (ssql.PublicKey)(acc.HostKey), (*ssql.BigInt)(acc.Balance), (*ssql.BigInt)(acc.Drift), acc.RequiresSync) + res, err := stmt.Exec(ctx, time.Now(), (ssql.PublicKey)(acc.ID), acc.CleanShutdown, (ssql.PublicKey)(acc.HostKey), (*ssql.BigInt)(acc.Balance), (*ssql.BigInt)(acc.Drift), acc.RequiresSync, acc.Owner) if err != nil { return fmt.Errorf("failed to insert account %v: %w", acc.ID, err) } else if n, err := res.RowsAffected(); err != nil { @@ -769,51 +842,56 @@ func (tx *MainDatabaseTx) SelectObjectMetadataExpr() string { return "o.object_id, o.size, o.health, o.mime_type, DATETIME(o.created_at), o.etag" } -func (tx *MainDatabaseTx) SetContractSet(ctx context.Context, name string, contractIds []types.FileContractID) error { +func (tx *MainDatabaseTx) UpdateContractSet(ctx context.Context, name string, toAdd, toRemove []types.FileContractID) error { var csID int64 err := tx.QueryRow(ctx, "INSERT INTO contract_sets (name) VALUES (?) ON CONFLICT(name) DO UPDATE SET id = id RETURNING id", name).Scan(&csID) if err != nil { return fmt.Errorf("failed to fetch contract set id: %w", err) } - // handle empty set - if len(contractIds) == 0 { - _, err := tx.Exec(ctx, "DELETE FROM contract_set_contracts WHERE db_contract_set_id = ?", csID) - return err - } - - // prepare fcid args and query - fcidQuery := strings.Repeat("?, ", len(contractIds)-1) + "?" - fcidArgs := make([]interface{}, len(contractIds)) - for i, fcid := range contractIds { - fcidArgs[i] = ssql.FileContractID(fcid) + // if no changes are needed, return after creating the set + if len(toAdd)+len(toRemove) == 0 { + return nil } - // remove unwanted contracts - args := []interface{}{csID} - args = append(args, fcidArgs...) - _, err = tx.Exec(ctx, fmt.Sprintf(` - DELETE FROM contract_set_contracts - WHERE db_contract_set_id = ? AND db_contract_id NOT IN ( - SELECT id - FROM contracts - WHERE contracts.fcid IN (%s) - ) - `, fcidQuery), args...) - if err != nil { - return fmt.Errorf("failed to delete contract set contracts: %w", err) + prepareQuery := func(fcids []types.FileContractID) (string, []any) { + args := []any{csID} + query := strings.Repeat("?, ", len(fcids)-1) + "?" + for _, fcid := range fcids { + args = append(args, ssql.FileContractID(fcid)) + } + return query, args + } + + // remove unwanted contracts first + if len(toRemove) > 0 { + query, args := prepareQuery(toRemove) + _, err = tx.Exec(ctx, fmt.Sprintf(` + DELETE FROM contract_set_contracts + WHERE db_contract_set_id = ? AND db_contract_id IN ( + SELECT id + FROM contracts + WHERE contracts.fcid IN (%s) + ) + `, query), args...) + if err != nil { + return fmt.Errorf("failed to delete contract set contracts: %w", err) + } } - // add missing contracts - _, err = tx.Exec(ctx, fmt.Sprintf(` - INSERT INTO contract_set_contracts (db_contract_set_id, db_contract_id) - SELECT ?, c.id - FROM contracts c - WHERE c.fcid IN (%s) - ON CONFLICT(db_contract_set_id, db_contract_id) DO NOTHING - `, fcidQuery), args...) - if err != nil { - return fmt.Errorf("failed to add contract set contracts: %w", err) + // add new contracts + if len(toAdd) > 0 { + query, args := prepareQuery(toAdd) + _, err = tx.Exec(ctx, fmt.Sprintf(` + INSERT INTO contract_set_contracts (db_contract_set_id, db_contract_id) + SELECT ?, c.id + FROM contracts c + WHERE c.fcid IN (%s) + ON CONFLICT(db_contract_set_id, db_contract_id) DO NOTHING + `, query), args...) + if err != nil { + return fmt.Errorf("failed to add contract set contracts: %w", err) + } } return nil } @@ -826,10 +904,6 @@ func (tx *MainDatabaseTx) Settings(ctx context.Context) ([]string, error) { return ssql.Settings(ctx, tx) } -func (tx *MainDatabaseTx) SetUncleanShutdown(ctx context.Context) error { - return ssql.SetUncleanShutdown(ctx, tx) -} - func (tx *MainDatabaseTx) Slab(ctx context.Context, key object.EncryptionKey) (object.Slab, error) { return ssql.Slab(ctx, tx, key) } diff --git a/stores/sql/sqlite/metrics.go b/stores/sql/sqlite/metrics.go index df912d7c7..70281b114 100644 --- a/stores/sql/sqlite/metrics.go +++ b/stores/sql/sqlite/metrics.go @@ -88,10 +88,6 @@ func (tx *MetricsDatabaseTx) ContractSetMetrics(ctx context.Context, start time. return ssql.ContractSetMetrics(ctx, tx, start, n, interval, opts) } -func (tx *MetricsDatabaseTx) PerformanceMetrics(ctx context.Context, start time.Time, n uint64, interval time.Duration, opts api.PerformanceMetricsQueryOpts) ([]api.PerformanceMetric, error) { - return ssql.PerformanceMetrics(ctx, tx, start, n, interval, opts) -} - func (tx *MetricsDatabaseTx) PruneMetrics(ctx context.Context, metric string, cutoff time.Time) error { return ssql.PruneMetrics(ctx, tx, metric, cutoff) } @@ -112,10 +108,6 @@ func (tx *MetricsDatabaseTx) RecordContractSetMetric(ctx context.Context, metric return ssql.RecordContractSetMetric(ctx, tx, metrics...) } -func (tx *MetricsDatabaseTx) RecordPerformanceMetric(ctx context.Context, metrics ...api.PerformanceMetric) error { - return ssql.RecordPerformanceMetric(ctx, tx, metrics...) -} - func (tx *MetricsDatabaseTx) RecordWalletMetric(ctx context.Context, metrics ...api.WalletMetric) error { return ssql.RecordWalletMetric(ctx, tx, metrics...) } diff --git a/stores/sql/sqlite/migrations/main/migration_00016_account_owner.sql b/stores/sql/sqlite/migrations/main/migration_00016_account_owner.sql new file mode 100644 index 000000000..359830ba0 --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00016_account_owner.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS ephemeral_accounts; + +CREATE TABLE `ephemeral_accounts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`account_id` blob NOT NULL UNIQUE,`clean_shutdown` numeric DEFAULT false,`host` blob NOT NULL,`balance` text,`drift` text,`requires_sync` numeric, `owner` text NOT NULL); +CREATE INDEX `idx_ephemeral_accounts_requires_sync` ON `ephemeral_accounts`(`requires_sync`); +CREATE INDEX `idx_ephemeral_accounts_owner` ON `ephemeral_accounts`(`owner`); \ No newline at end of file diff --git a/stores/sql/sqlite/migrations/main/migration_00017_unix_ms.sql b/stores/sql/sqlite/migrations/main/migration_00017_unix_ms.sql new file mode 100644 index 000000000..992897d7e --- /dev/null +++ b/stores/sql/sqlite/migrations/main/migration_00017_unix_ms.sql @@ -0,0 +1,4 @@ +UPDATE hosts SET last_scan = CAST(last_scan / 1000000 AS SIGNED); +UPDATE hosts SET uptime = CAST(uptime / 1000000 AS SIGNED); +UPDATE hosts SET downtime = CAST(downtime / 1000000 AS SIGNED); +UPDATE wallet_events SET timestamp = CAST(timestamp / 1000000 AS SIGNED); diff --git a/stores/sql/sqlite/migrations/main/schema.sql b/stores/sql/sqlite/migrations/main/schema.sql index 647e6cfdd..6d8d0ee6c 100644 --- a/stores/sql/sqlite/migrations/main/schema.sql +++ b/stores/sql/sqlite/migrations/main/schema.sql @@ -130,8 +130,9 @@ CREATE TABLE `settings` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` dat CREATE INDEX `idx_settings_key` ON `settings`(`key`); -- dbAccount -CREATE TABLE `ephemeral_accounts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`account_id` blob NOT NULL UNIQUE,`clean_shutdown` numeric DEFAULT false,`host` blob NOT NULL,`balance` text,`drift` text,`requires_sync` numeric); +CREATE TABLE `ephemeral_accounts` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`account_id` blob NOT NULL UNIQUE,`clean_shutdown` numeric DEFAULT false,`host` blob NOT NULL,`balance` text,`drift` text,`requires_sync` numeric, `owner` text NOT NULL); CREATE INDEX `idx_ephemeral_accounts_requires_sync` ON `ephemeral_accounts`(`requires_sync`); +CREATE INDEX `idx_ephemeral_accounts_owner` ON `ephemeral_accounts`(`owner`); -- dbAutopilot CREATE TABLE `autopilots` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`identifier` text NOT NULL UNIQUE,`config` text,`current_period` integer DEFAULT 0); diff --git a/stores/sql/sqlite/migrations/metrics/migration_00003_unix_ms.sql b/stores/sql/sqlite/migrations/metrics/migration_00003_unix_ms.sql new file mode 100644 index 000000000..3538731fc --- /dev/null +++ b/stores/sql/sqlite/migrations/metrics/migration_00003_unix_ms.sql @@ -0,0 +1,2 @@ +UPDATE contract_prunes SET timestamp = CAST(timestamp / 1000000 AS SIGNED); +DROP TABLE IF EXISTS performance; diff --git a/stores/sql/sqlite/migrations/metrics/schema.sql b/stores/sql/sqlite/migrations/metrics/schema.sql index dfb8e3cf1..6741c72ac 100644 --- a/stores/sql/sqlite/migrations/metrics/schema.sql +++ b/stores/sql/sqlite/migrations/metrics/schema.sql @@ -37,14 +37,6 @@ CREATE INDEX `idx_contract_sets_churn_fc_id` ON `contract_sets_churn`(`fc_id`); CREATE INDEX `idx_contract_sets_churn_name` ON `contract_sets_churn`(`name`); CREATE INDEX `idx_contract_sets_churn_timestamp` ON `contract_sets_churn`(`timestamp`); --- dbPerformanceMetric -CREATE TABLE `performance` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`timestamp` BIGINT NOT NULL,`action` text NOT NULL,`host` blob NOT NULL,`origin` text NOT NULL,`duration` integer NOT NULL); -CREATE INDEX `idx_performance_duration` ON `performance`(`duration`); -CREATE INDEX `idx_performance_origin` ON `performance`(`origin`); -CREATE INDEX `idx_performance_host` ON `performance`(`host`); -CREATE INDEX `idx_performance_action` ON `performance`(`action`); -CREATE INDEX `idx_performance_timestamp` ON `performance`(`timestamp`); - -- dbWalletMetric CREATE TABLE `wallets` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`timestamp` BIGINT NOT NULL,`confirmed_lo` BIGINT NOT NULL,`confirmed_hi` BIGINT NOT NULL,`spendable_lo` BIGINT NOT NULL,`spendable_hi` BIGINT NOT NULL,`unconfirmed_lo` BIGINT NOT NULL,`unconfirmed_hi` BIGINT NOT NULL,`immature_lo` BIGINT NOT NULL,`immature_hi` BIGINT NOT NULL); CREATE INDEX `idx_unconfirmed` ON `wallets`(`unconfirmed_lo`,`unconfirmed_hi`); diff --git a/stores/sql/types.go b/stores/sql/types.go index 10cf76e42..ae71e16dd 100644 --- a/stores/sql/types.go +++ b/stores/sql/types.go @@ -43,7 +43,7 @@ type ( EncryptionKey object.EncryptionKey Uint64Str uint64 UnixTimeMS time.Time - UnixTimeNS time.Time + DurationMS time.Duration Unsigned64 uint64 ) @@ -66,7 +66,7 @@ var ( _ scannerValuer = (*PublicKey)(nil) _ scannerValuer = (*EncryptionKey)(nil) _ scannerValuer = (*UnixTimeMS)(nil) - _ scannerValuer = (*UnixTimeNS)(nil) + _ scannerValuer = (*DurationMS)(nil) _ scannerValuer = (*Unsigned64)(nil) ) @@ -204,6 +204,9 @@ func (hs *HostSettings) Scan(value interface{}) error { // Value returns a HostSettings value, implements driver.Valuer interface. func (hs HostSettings) Value() (driver.Value, error) { + if hs == (HostSettings{}) { + return []byte("{}"), nil + } return json.Marshal(hs) } @@ -218,6 +221,9 @@ func (pt *PriceTable) Scan(value interface{}) error { // Value returns a PriceTable value, implements driver.Valuer interface. func (pt PriceTable) Value() (driver.Value, error) { + if pt == (PriceTable{}) { + return []byte("{}"), nil + } return json.Marshal(pt) } @@ -331,8 +337,10 @@ func (u *UnixTimeMS) Scan(value interface{}) error { default: return fmt.Errorf("failed to unmarshal unixTimeMS value: %v %T", value, value) } - - *u = UnixTimeMS(time.UnixMilli(msec)) + *u = UnixTimeMS(time.Time{}) + if msec > 0 { + *u = UnixTimeMS(time.UnixMilli(msec)) + } return nil } @@ -342,34 +350,30 @@ func (u UnixTimeMS) Value() (driver.Value, error) { return time.Time(u).UnixMilli(), nil } -// Scan scan value into UnixTimeNS, implements sql.Scanner interface. -func (u *UnixTimeNS) Scan(value interface{}) error { - var nsec int64 +// Scan scan value into DurationMS, implements sql.Scanner interface. +func (d *DurationMS) Scan(value interface{}) error { + var msec int64 var err error switch value := value.(type) { case int64: - nsec = value + msec = value case []uint8: - nsec, err = strconv.ParseInt(string(value), 10, 64) + msec, err = strconv.ParseInt(string(value), 10, 64) if err != nil { - return fmt.Errorf("failed to unmarshal UnixTimeNS value: %v %T", value, value) + return fmt.Errorf("failed to unmarshal DurationMS value: %v %T", value, value) } default: - return fmt.Errorf("failed to unmarshal UnixTimeNS value: %v %T", value, value) + return fmt.Errorf("failed to unmarshal DurationMS value: %v %T", value, value) } - if nsec == 0 { - *u = UnixTimeNS{} - } else { - *u = UnixTimeNS(time.Unix(0, nsec)) - } + *d = DurationMS(msec) * DurationMS(time.Millisecond) return nil } -// Value returns a int64 value representing a unix timestamp in milliseconds, +// Value returns a int64 value representing a duration in milliseconds, // implements driver.Valuer interface. -func (u UnixTimeNS) Value() (driver.Value, error) { - return time.Time(u).UnixNano(), nil +func (d DurationMS) Value() (driver.Value, error) { + return time.Duration(d).Milliseconds(), nil } // Scan scan value into Uint64, implements sql.Scanner interface. diff --git a/stores/sql_test.go b/stores/sql_test.go index 0846254cb..7eb33d07b 100644 --- a/stores/sql_test.go +++ b/stores/sql_test.go @@ -185,7 +185,7 @@ func newTestSQLStore(t *testing.T, cfg testSQLStoreConfig) *testSQLStore { } if !cfg.skipContractSet { - err = sqlStore.SetContractSet(context.Background(), testContractSet, []types.FileContractID{}) + err = sqlStore.UpdateContractSet(context.Background(), testContractSet, []types.FileContractID{}, nil) if err != nil { t.Fatal("failed to set contract set", err) } diff --git a/webhooks/webhooks.go b/webhooks/webhooks.go index 0f1eb636f..ce643835c 100644 --- a/webhooks/webhooks.go +++ b/webhooks/webhooks.go @@ -7,11 +7,11 @@ import ( "encoding/json" "errors" "fmt" - "io" "net/http" "sync" "time" + "go.sia.tech/renterd/internal/utils" "go.uber.org/zap" ) @@ -268,18 +268,7 @@ func sendEvent(ctx context.Context, url string, headers map[string]string, actio for k, v := range headers { req.Header.Set(k, v) } - defer io.ReadAll(req.Body) // always drain body - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - errStr, err := io.ReadAll(req.Body) - if err != nil { - return fmt.Errorf("failed to read response body: %w", err) - } - return fmt.Errorf("Webhook returned unexpected status %v: %v", resp.StatusCode, string(errStr)) - } - return nil + _, _, err = utils.DoRequest(req, nil) + return err } diff --git a/worker/accounts.go b/worker/accounts.go deleted file mode 100644 index 76a18d37e..000000000 --- a/worker/accounts.go +++ /dev/null @@ -1,167 +0,0 @@ -package worker - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - rhpv3 "go.sia.tech/core/rhp/v3" - "go.sia.tech/core/types" - "go.sia.tech/renterd/api" - rhp3 "go.sia.tech/renterd/internal/rhp/v3" -) - -const ( - // accountLockingDuration is the time for which an account lock remains - // reserved on the bus after locking it. - accountLockingDuration = 30 * time.Second -) - -type ( - // accounts stores the balance and other metrics of accounts that the - // worker maintains with a host. - accounts struct { - as AccountStore - key types.PrivateKey - } - - // account contains information regarding a specific account of the - // worker. - account struct { - as AccountStore - id rhpv3.Account - key types.PrivateKey - host types.PublicKey - } -) - -// ForHost returns an account to use for a given host. If the account -// doesn't exist, a new one is created. -func (a *accounts) ForHost(hk types.PublicKey) *account { - accountID := rhpv3.Account(a.deriveAccountKey(hk).PublicKey()) - return &account{ - as: a.as, - id: accountID, - key: a.key, - host: hk, - } -} - -// deriveAccountKey derives an account plus key for a given host and worker. -// Each worker has its own account for a given host. That makes concurrency -// around keeping track of an accounts balance and refilling it a lot easier in -// a multi-worker setup. -func (a *accounts) deriveAccountKey(hostKey types.PublicKey) types.PrivateKey { - index := byte(0) // not used yet but can be used to derive more than 1 account per host - - // Append the host for which to create it and the index to the - // corresponding sub-key. - subKey := a.key - data := make([]byte, 0, len(subKey)+len(hostKey)+1) - data = append(data, subKey[:]...) - data = append(data, hostKey[:]...) - data = append(data, index) - - seed := types.HashBytes(data) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk -} - -// Balance returns the account balance. -func (a *account) Balance(ctx context.Context) (balance types.Currency, err error) { - err = withAccountLock(ctx, a.as, a.id, a.host, false, func(account api.Account) error { - balance = types.NewCurrency(account.Balance.Uint64(), new(big.Int).Rsh(account.Balance, 64).Uint64()) - return nil - }) - return -} - -// WithDeposit increases the balance of an account by the amount returned by -// amtFn if amtFn doesn't return an error. -func (a *account) WithDeposit(ctx context.Context, amtFn func() (types.Currency, error)) error { - return withAccountLock(ctx, a.as, a.id, a.host, false, func(_ api.Account) error { - amt, err := amtFn() - if err != nil { - return err - } - return a.as.AddBalance(ctx, a.id, a.host, amt.Big()) - }) -} - -// WithSync syncs an accounts balance with the bus. To do so, the account is -// locked while the balance is fetched through balanceFn. -func (a *account) WithSync(ctx context.Context, balanceFn func() (types.Currency, error)) error { - return withAccountLock(ctx, a.as, a.id, a.host, true, func(_ api.Account) error { - balance, err := balanceFn() - if err != nil { - return err - } - return a.as.SetBalance(ctx, a.id, a.host, balance.Big()) - }) -} - -// WithWithdrawal decreases the balance of an account by the amount returned by -// amtFn. The amount is still withdrawn if amtFn returns an error since some -// costs are non-refundable. -func (a *account) WithWithdrawal(ctx context.Context, amtFn func() (types.Currency, error)) error { - return withAccountLock(ctx, a.as, a.id, a.host, false, func(account api.Account) error { - // return early if the account needs to sync - if account.RequiresSync { - return fmt.Errorf("%w; account requires resync", rhp3.ErrBalanceInsufficient) - } - - // return early if our account is not funded - if account.Balance.Cmp(big.NewInt(0)) <= 0 { - return rhp3.ErrBalanceInsufficient - } - - // execute amtFn - amt, err := amtFn() - - // in case of an insufficient balance, we schedule a sync - if rhp3.IsBalanceInsufficient(err) { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - err = errors.Join(err, a.as.ScheduleSync(ctx, a.id, a.host)) - cancel() - } - - // if an amount was returned, we withdraw it - if !amt.IsZero() { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - err = errors.Join(err, a.as.AddBalance(ctx, a.id, a.host, new(big.Int).Neg(amt.Big()))) - cancel() - } - - return err - }) -} - -func (w *Worker) initAccounts(as AccountStore) { - if w.accounts != nil { - panic("accounts already initialized") // developer error - } - w.accounts = &accounts{ - as: as, - key: w.deriveSubKey("accountkey"), - } -} - -func withAccountLock(ctx context.Context, as AccountStore, id rhpv3.Account, hk types.PublicKey, exclusive bool, fn func(a api.Account) error) error { - acc, lockID, err := as.LockAccount(ctx, id, hk, exclusive, accountLockingDuration) - if err != nil { - return err - } - err = fn(acc) - - // unlock account - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - _ = as.UnlockAccount(ctx, acc.ID, lockID) // ignore error - cancel() - - return err -} diff --git a/worker/alerts.go b/worker/alerts.go index fd3657e05..664698fb6 100644 --- a/worker/alerts.go +++ b/worker/alerts.go @@ -1,6 +1,7 @@ package worker import ( + "errors" "time" "go.sia.tech/core/types" @@ -12,7 +13,7 @@ func randomAlertID() types.Hash256 { return frand.Entropy256() } -func newDownloadFailedAlert(bucket, path, prefix, marker string, offset, length, contracts int64, err error) alerts.Alert { +func newDownloadFailedAlert(bucket, path string, offset, length, contracts int64, err error) alerts.Alert { return alerts.Alert{ ID: randomAlertID(), Severity: alerts.SeverityError, @@ -20,8 +21,6 @@ func newDownloadFailedAlert(bucket, path, prefix, marker string, offset, length, Data: map[string]any{ "bucket": bucket, "path": path, - "prefix": prefix, - "marker": marker, "offset": offset, "length": length, "contracts": contracts, @@ -49,6 +48,18 @@ func newUploadFailedAlert(bucket, path, contractSet, mimeType string, minShards, data["multipart"] = true } + hostErr := err + for errors.Unwrap(hostErr) != nil { + hostErr = errors.Unwrap(hostErr) + } + if set, ok := hostErr.(HostErrorSet); ok { + hostErrors := make(map[string]string, len(set)) + for hk, err := range set { + hostErrors[hk.String()] = err.Error() + } + data["hosts"] = hostErrors + } + return alerts.Alert{ ID: randomAlertID(), Severity: alerts.SeverityError, diff --git a/worker/alerts_test.go b/worker/alerts_test.go new file mode 100644 index 000000000..137838a39 --- /dev/null +++ b/worker/alerts_test.go @@ -0,0 +1,48 @@ +package worker + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.sia.tech/core/types" + "go.sia.tech/renterd/alerts" +) + +// TestUploadFailedAlertErrorSet is a test to verify that an upload failing with a HostErrorSet error registers an alert with all the individual errors of any host in the payload. +func TestUploadFailedAlertErrorSet(t *testing.T) { + hostErrSet := HostErrorSet{ + types.PublicKey{1, 1, 1}: errors.New("test"), + } + wrapped := fmt.Errorf("wrapped error: %w", hostErrSet) + + alert := newUploadFailedAlert("bucket", "path", "set", "mimeType", 1, 2, 3, true, false, wrapped) + + alert.ID = types.Hash256{1, 2, 3} + alert.Timestamp = time.Time{} + + expectedAlert := alerts.Alert{ + ID: types.Hash256{1, 2, 3}, + Severity: alerts.SeverityError, + Message: "Upload failed", + Data: map[string]any{ + "bucket": "bucket", + "contractSet": "set", + "contracts": 3, + "error": wrapped.Error(), + "hosts": map[string]string{ + types.PublicKey{1, 1, 1}.String(): "test", + }, + "mimeType": "mimeType", + "minShards": 1, + "packing": true, + "path": "path", + "totalShards": 2, + }, + } + if !cmp.Equal(alert, expectedAlert) { + t.Fatal(cmp.Diff(alert, expectedAlert)) + } +} diff --git a/worker/client/client.go b/worker/client/client.go index 9abac4d0e..2bac4f99f 100644 --- a/worker/client/client.go +++ b/worker/client/client.go @@ -15,6 +15,7 @@ import ( "go.sia.tech/core/types" "go.sia.tech/jape" "go.sia.tech/renterd/api" + "go.sia.tech/renterd/internal/utils" "go.sia.tech/renterd/object" "go.sia.tech/renterd/webhooks" ) @@ -38,6 +39,18 @@ func (c *Client) Account(ctx context.Context, hostKey types.PublicKey) (account return } +// Accounts returns all accounts. +func (c *Client) Accounts(ctx context.Context) (accounts []api.Account, err error) { + err = c.c.WithContext(ctx).GET(fmt.Sprintf("/accounts"), &accounts) + return +} + +// ResetDrift resets the drift of an account to zero. +func (c *Client) ResetDrift(ctx context.Context, id rhpv3.Account) (err error) { + err = c.c.WithContext(ctx).POST(fmt.Sprintf("/account/%s/resetdrift", id), nil, nil) + return +} + // Contracts returns all contracts from the worker. These contracts decorate a // bus contract with the contract's latest revision. func (c *Client) Contracts(ctx context.Context, hostTimeout time.Duration) (resp api.ContractsResponse, err error) { @@ -96,21 +109,14 @@ func (c *Client) HeadObject(ctx context.Context, bucket, path string, opts api.H req.SetBasicAuth("", c.c.WithContext(ctx).Password) opts.ApplyHeaders(req.Header) - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != 200 && resp.StatusCode != 206 { - _ = resp.Body.Close() - switch resp.StatusCode { - case http.StatusNotFound: - return nil, api.ErrObjectNotFound - default: - return nil, errors.New(http.StatusText(resp.StatusCode)) - } + headers, statusCode, err := utils.DoRequest(req, nil) + if err != nil && statusCode == http.StatusNotFound { + return nil, api.ErrObjectNotFound + } else if err != nil { + return nil, errors.New(http.StatusText(statusCode)) } - head, err := parseObjectResponseHeaders(resp.Header) + head, err := parseObjectResponseHeaders(headers) if err != nil { return nil, err } @@ -213,17 +219,11 @@ func (c *Client) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc } else if req.ContentLength, err = sizeFromSeeker(r); err != nil { return nil, fmt.Errorf("failed to get content length from seeker: %w", err) } - resp, err := http.DefaultClient.Do(req) + header, _, err := utils.DoRequest(req, nil) if err != nil { return nil, err } - defer io.Copy(io.Discard, resp.Body) - defer resp.Body.Close() - if resp.StatusCode != 200 { - err, _ := io.ReadAll(resp.Body) - return nil, errors.New(string(err)) - } - return &api.UploadMultipartUploadPartResponse{ETag: resp.Header.Get("ETag")}, nil + return &api.UploadMultipartUploadPartResponse{ETag: header.Get("ETag")}, nil } // UploadObject uploads the data in r, creating an object at the given path. @@ -250,17 +250,11 @@ func (c *Client) UploadObject(ctx context.Context, r io.Reader, bucket, path str } else if req.ContentLength, err = sizeFromSeeker(r); err != nil { return nil, fmt.Errorf("failed to get content length from seeker: %w", err) } - resp, err := http.DefaultClient.Do(req) + header, _, err := utils.DoRequest(req, nil) if err != nil { return nil, err } - defer io.Copy(io.Discard, resp.Body) - defer resp.Body.Close() - if resp.StatusCode != 200 { - err, _ := io.ReadAll(resp.Body) - return nil, errors.New(string(err)) - } - return &api.UploadObjectResponse{ETag: resp.Header.Get("ETag")}, nil + return &api.UploadObjectResponse{ETag: header.Get("ETag")}, nil } // UploadStats returns the upload stats. @@ -271,7 +265,7 @@ func (c *Client) UploadStats() (resp api.UploadStatsResponse, err error) { // NotifyEvent notifies the worker of an event. func (c *Client) NotifyEvent(ctx context.Context, e webhooks.Event) (err error) { - err = c.c.WithContext(ctx).POST("/events", e, nil) + err = c.c.WithContext(ctx).POST("/event", e, nil) return } diff --git a/worker/client/rhp.go b/worker/client/rhp.go index d1fb2d9e8..5aeeee8bf 100644 --- a/worker/client/rhp.go +++ b/worker/client/rhp.go @@ -2,55 +2,12 @@ package client import ( "context" - "errors" - "fmt" "time" "go.sia.tech/core/types" "go.sia.tech/renterd/api" - - rhpv2 "go.sia.tech/core/rhp/v2" ) -// RHPBroadcast broadcasts the latest revision for a contract. -func (c *Client) RHPBroadcast(ctx context.Context, contractID types.FileContractID) (err error) { - err = c.c.WithContext(ctx).POST(fmt.Sprintf("/rhp/contract/%s/broadcast", contractID), nil, nil) - return -} - -// RHPContractRoots fetches the roots of the contract with given id. -func (c *Client) RHPContractRoots(ctx context.Context, contractID types.FileContractID) (roots []types.Hash256, err error) { - err = c.c.WithContext(ctx).GET(fmt.Sprintf("/rhp/contract/%s/roots", contractID), &roots) - return -} - -// RHPForm forms a contract with a host. -func (c *Client) RHPForm(ctx context.Context, endHeight uint64, hostKey types.PublicKey, hostIP string, renterAddress types.Address, renterFunds types.Currency, hostCollateral types.Currency) (rhpv2.ContractRevision, []types.Transaction, error) { - req := api.RHPFormRequest{ - EndHeight: endHeight, - HostCollateral: hostCollateral, - HostKey: hostKey, - HostIP: hostIP, - RenterFunds: renterFunds, - RenterAddress: renterAddress, - } - var resp api.RHPFormResponse - err := c.c.WithContext(ctx).POST("/rhp/form", req, &resp) - return resp.Contract, resp.TransactionSet, err -} - -// RHPFund funds an ephemeral account using the supplied contract. -func (c *Client) RHPFund(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string, balance types.Currency) (err error) { - req := api.RHPFundRequest{ - ContractID: contractID, - HostKey: hostKey, - SiamuxAddr: siamuxAddr, - Balance: balance, - } - err = c.c.WithContext(ctx).POST("/rhp/fund", req, nil) - return -} - // RHPPriceTable fetches a price table for a host. func (c *Client) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, siamuxAddr string, timeout time.Duration) (pt api.HostPriceTable, err error) { req := api.RHPPriceTableRequest{ @@ -62,41 +19,6 @@ func (c *Client) RHPPriceTable(ctx context.Context, hostKey types.PublicKey, sia return } -// RHPPruneContract prunes deleted sectors from the contract with given id. -func (c *Client) RHPPruneContract(ctx context.Context, contractID types.FileContractID, timeout time.Duration) (pruned, remaining uint64, err error) { - var res api.RHPPruneContractResponse - if err = c.c.WithContext(ctx).POST(fmt.Sprintf("/rhp/contract/%s/prune", contractID), api.RHPPruneContractRequest{ - Timeout: api.DurationMS(timeout), - }, &res); err != nil { - return - } else if res.Error != "" { - err = errors.New(res.Error) - } - - pruned = res.Pruned - remaining = res.Remaining - return -} - -// RHPRenew renews an existing contract with a host. -func (c *Client) RHPRenew(ctx context.Context, contractID types.FileContractID, endHeight uint64, hostKey types.PublicKey, siamuxAddr string, hostAddress, renterAddress types.Address, renterFunds, minNewCollateral, maxFundAmount types.Currency, expectedStorage, windowSize uint64) (resp api.RHPRenewResponse, err error) { - req := api.RHPRenewRequest{ - ContractID: contractID, - EndHeight: endHeight, - ExpectedNewStorage: expectedStorage, - HostAddress: hostAddress, - HostKey: hostKey, - MaxFundAmount: maxFundAmount, - MinNewCollateral: minNewCollateral, - RenterAddress: renterAddress, - RenterFunds: renterFunds, - SiamuxAddr: siamuxAddr, - WindowSize: windowSize, - } - err = c.c.WithContext(ctx).POST("/rhp/renew", req, &resp) - return -} - // RHPScan scans a host, returning its current settings. func (c *Client) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP string, timeout time.Duration) (resp api.RHPScanResponse, err error) { err = c.c.WithContext(ctx).POST("/rhp/scan", api.RHPScanRequest{ @@ -106,14 +28,3 @@ func (c *Client) RHPScan(ctx context.Context, hostKey types.PublicKey, hostIP st }, &resp) return } - -// RHPSync funds an ephemeral account using the supplied contract. -func (c *Client) RHPSync(ctx context.Context, contractID types.FileContractID, hostKey types.PublicKey, hostIP, siamuxAddr string) (err error) { - req := api.RHPSyncRequest{ - ContractID: contractID, - HostKey: hostKey, - SiamuxAddr: siamuxAddr, - } - err = c.c.WithContext(ctx).POST("/rhp/sync", req, nil) - return -} diff --git a/worker/host.go b/worker/host.go index 9c65bd0c2..2ecd95233 100644 --- a/worker/host.go +++ b/worker/host.go @@ -2,7 +2,6 @@ package worker import ( "context" - "errors" "fmt" "io" "time" @@ -13,6 +12,7 @@ import ( "go.sia.tech/renterd/api" "go.sia.tech/renterd/internal/gouging" rhp3 "go.sia.tech/renterd/internal/rhp/v3" + "go.sia.tech/renterd/internal/worker" "go.uber.org/zap" ) @@ -29,8 +29,6 @@ type ( FundAccount(ctx context.Context, balance types.Currency, rev *types.FileContractRevision) error SyncAccount(ctx context.Context, rev *types.FileContractRevision) error - - RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) } HostManager interface { @@ -42,11 +40,10 @@ type ( host struct { hk types.PublicKey renterKey types.PrivateKey - accountKey types.PrivateKey fcid types.FileContractID siamuxAddr string - acc *account + acc *worker.Account client *rhp3.Client bus Bus contractSpendingRecorder ContractSpendingRecorder @@ -71,7 +68,6 @@ func (w *Worker) Host(hk types.PublicKey, fcid types.FileContractID, siamuxAddr fcid: fcid, siamuxAddr: siamuxAddr, renterKey: w.deriveRenterKey(hk), - accountKey: w.accounts.deriveAccountKey(hk), priceTables: w.priceTables, } } @@ -80,7 +76,7 @@ func (h *host) PublicKey() types.PublicKey { return h.hk } func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash256, offset, length uint32, overpay bool) (err error) { var amount types.Currency - return h.acc.WithWithdrawal(ctx, func() (types.Currency, error) { + return h.acc.WithWithdrawal(func() (types.Currency, error) { pt, uptc, err := h.priceTables.fetch(ctx, h.hk, nil) if err != nil { return types.ZeroCurrency, err @@ -97,7 +93,7 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 return amount, fmt.Errorf("%w: %v", gouging.ErrPriceTableGouging, breakdown.DownloadErr) } - cost, err := h.client.ReadSector(ctx, offset, length, root, w, h.hk, h.siamuxAddr, h.acc.id, h.accountKey, hpt) + cost, err := h.client.ReadSector(ctx, offset, length, root, w, h.hk, h.siamuxAddr, h.acc.ID(), h.acc.Key(), hpt) if err != nil { return amount, err } @@ -108,14 +104,15 @@ func (h *host) DownloadSector(ctx context.Context, w io.Writer, root types.Hash2 func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, sector *[rhpv2.SectorSize]byte, rev types.FileContractRevision) error { // fetch price table var pt rhpv3.HostPriceTable - if err := h.acc.WithWithdrawal(ctx, func() (amount types.Currency, err error) { + if err := h.acc.WithWithdrawal(func() (amount types.Currency, err error) { pt, amount, err = h.priceTable(ctx, nil) return }); err != nil { return err } + // upload - cost, err := h.client.AppendSector(ctx, sectorRoot, sector, &rev, h.hk, h.siamuxAddr, h.acc.id, pt, h.renterKey) + cost, err := h.client.AppendSector(ctx, sectorRoot, sector, &rev, h.hk, h.siamuxAddr, h.acc.ID(), pt, h.renterKey) if err != nil { return fmt.Errorf("failed to upload sector: %w", err) } @@ -124,52 +121,6 @@ func (h *host) UploadSector(ctx context.Context, sectorRoot types.Hash256, secto return nil } -func (h *host) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { - gc, err := h.gougingChecker(ctx, false) - if err != nil { - return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, types.ZeroCurrency, err - } - revision, err := h.client.Revision(ctx, h.fcid, h.hk, h.siamuxAddr) - if err != nil { - return rhpv2.ContractRevision{}, nil, types.ZeroCurrency, types.ZeroCurrency, err - } - - // helper to discard txn on error - discardTxn := func(ctx context.Context, txn types.Transaction, err *error) { - if *err == nil { - return - } - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - if dErr := h.bus.WalletDiscard(ctx, txn); dErr != nil { - h.logger.Errorf("%v: %s, failed to discard txn: %v", *err, dErr) - } - cancel() - } - - // helper to sign txn - signTxn := func(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error { - // sign txn - return h.bus.WalletSign(ctx, txn, toSign, cf) - } - - // helper to prepare contract renewal - prepareRenew := func(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, func(context.Context, types.Transaction, *error), error) { - resp, err := h.bus.WalletPrepareRenew(ctx, revision, hostAddress, renterAddress, renterKey, renterFunds, minNewCollateral, maxFundAmount, pt, endHeight, windowSize, expectedStorage) - if err != nil { - return api.WalletPrepareRenewResponse{}, nil, err - } - return resp, discardTxn, nil - } - - // renew contract - rev, txnSet, contractPrice, fundAmount, err := h.client.Renew(ctx, rrr, gc, prepareRenew, signTxn, revision, h.renterKey) - if err != nil { - return rhpv2.ContractRevision{}, nil, contractPrice, fundAmount, err - } - return rev, txnSet, contractPrice, fundAmount, err -} - func (h *host) PriceTableUnpaid(ctx context.Context) (api.HostPriceTable, error) { return h.client.PriceTableUnpaid(ctx, h.hk, h.siamuxAddr) } @@ -182,9 +133,9 @@ func (h *host) PriceTable(ctx context.Context, rev *types.FileContractRevision) // fetch the price table if rev != nil { - hpt, err = fetchPT(rhp3.PreparePriceTableContractPayment(rev, h.acc.id, h.renterKey)) + hpt, err = fetchPT(rhp3.PreparePriceTableContractPayment(rev, h.acc.ID(), h.renterKey)) } else { - hpt, err = fetchPT(rhp3.PreparePriceTableAccountPayment(h.accountKey)) + hpt, err = fetchPT(rhp3.PreparePriceTableAccountPayment(h.acc.Key())) } // set the cost @@ -208,7 +159,7 @@ func (h *host) FetchRevision(ctx context.Context, fetchTimeout time.Duration) (t func (h *host) FundAccount(ctx context.Context, desired types.Currency, rev *types.FileContractRevision) error { log := h.logger.With( zap.Stringer("host", h.hk), - zap.Stringer("account", h.acc.id), + zap.Stringer("account", h.acc.ID()), ) // ensure we have at least 2H in the contract to cover the costs @@ -216,20 +167,14 @@ func (h *host) FundAccount(ctx context.Context, desired types.Currency, rev *typ return fmt.Errorf("insufficient funds to fund account: %v <= %v", rev.ValidRenterPayout(), types.NewCurrency64(2)) } - // fetch current balance - balance, err := h.acc.Balance(ctx) - if err != nil { - return err - } - - // return early if we have the desired balance - if balance.Cmp(desired) >= 0 { - return nil - } - // calculate the deposit amount - deposit := desired.Sub(balance) - return h.acc.WithDeposit(ctx, func() (types.Currency, error) { + return h.acc.WithDeposit(func(balance types.Currency) (types.Currency, error) { + // return early if we have the desired balance + if balance.Cmp(desired) >= 0 { + return types.ZeroCurrency, nil + } + deposit := desired.Sub(balance) + // fetch pricetable directly to bypass the gouging check pt, _, err := h.priceTables.fetch(ctx, h.hk, rev) if err != nil { @@ -244,9 +189,9 @@ func (h *host) FundAccount(ctx context.Context, desired types.Currency, rev *typ } // fund the account - if err := h.client.FundAccount(ctx, rev, h.hk, h.siamuxAddr, deposit, h.acc.id, pt.HostPriceTable, h.renterKey); err != nil { + if err := h.client.FundAccount(ctx, rev, h.hk, h.siamuxAddr, deposit, h.acc.ID(), pt.HostPriceTable, h.renterKey); err != nil { if rhp3.IsBalanceMaxExceeded(err) { - err = errors.Join(err, h.acc.as.ScheduleSync(ctx, h.acc.id, h.hk)) + h.acc.ScheduleSync() } return types.ZeroCurrency, fmt.Errorf("failed to fund account with %v; %w", deposit, err) } @@ -270,27 +215,16 @@ func (h *host) SyncAccount(ctx context.Context, rev *types.FileContractRevision) return err } - // check only the unused defaults - gc, err := GougingCheckerFromContext(ctx, false) - if err != nil { - return err - } else if err := gc.CheckUnusedDefaults(pt.HostPriceTable); err != nil { - return fmt.Errorf("%w: %v", gouging.ErrPriceTableGouging, err) + // check only the AccountBalanceCost + if types.NewCurrency64(1).Cmp(pt.AccountBalanceCost) < 0 { + return fmt.Errorf("%w: host is gouging on AccountBalanceCost", gouging.ErrPriceTableGouging) } - return h.acc.WithSync(ctx, func() (types.Currency, error) { - return h.client.SyncAccount(ctx, rev, h.hk, h.siamuxAddr, h.acc.id, pt.UID, h.renterKey) + return h.acc.WithSync(func() (types.Currency, error) { + return h.client.SyncAccount(ctx, rev, h.hk, h.siamuxAddr, h.acc.ID(), pt.HostPriceTable, h.renterKey) }) } -func (h *host) gougingChecker(ctx context.Context, criticalMigration bool) (gouging.Checker, error) { - gp, err := h.bus.GougingParams(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get gouging params: %w", err) - } - return newGougingChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, criticalMigration), nil -} - // priceTable fetches a price table from the host. If a revision is provided, it // will be used to pay for the price table. The returned price table is // guaranteed to be safe to use. diff --git a/worker/host_test.go b/worker/host_test.go index 8bbecaeff..f6ea236cd 100644 --- a/worker/host_test.go +++ b/worker/host_test.go @@ -123,10 +123,6 @@ func (h *testHost) FundAccount(ctx context.Context, balance types.Currency, rev return nil } -func (h *testHost) RenewContract(ctx context.Context, rrr api.RHPRenewRequest) (_ rhpv2.ContractRevision, _ []types.Transaction, _, _ types.Currency, err error) { - return rhpv2.ContractRevision{}, nil, types.Currency{}, types.Currency{}, nil -} - func (h *testHost) SyncAccount(ctx context.Context, rev *types.FileContractRevision) error { return nil } diff --git a/worker/mocks_test.go b/worker/mocks_test.go index f982437a7..feacaba6d 100644 --- a/worker/mocks_test.go +++ b/worker/mocks_test.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "math" - "math/big" "sync" "time" @@ -20,35 +19,13 @@ import ( "go.sia.tech/renterd/webhooks" ) -var _ AccountStore = (*accountsMock)(nil) - type accountsMock struct{} -func (*accountsMock) Accounts(context.Context) ([]api.Account, error) { +func (*accountsMock) Accounts(context.Context, string) ([]api.Account, error) { return nil, nil } -func (*accountsMock) AddBalance(context.Context, rhpv3.Account, types.PublicKey, *big.Int) error { - return nil -} - -func (*accountsMock) LockAccount(context.Context, rhpv3.Account, types.PublicKey, bool, time.Duration) (api.Account, uint64, error) { - return api.Account{}, 0, nil -} - -func (*accountsMock) UnlockAccount(context.Context, rhpv3.Account, uint64) error { - return nil -} - -func (*accountsMock) ResetDrift(context.Context, rhpv3.Account) error { - return nil -} - -func (*accountsMock) SetBalance(context.Context, rhpv3.Account, types.PublicKey, *big.Int) error { - return nil -} - -func (*accountsMock) ScheduleSync(context.Context, rhpv3.Account, types.PublicKey) error { +func (*accountsMock) UpdateAccounts(context.Context, []api.Account) error { return nil } @@ -72,8 +49,6 @@ func (c *chainMock) ConsensusState(ctx context.Context) (api.ConsensusState, err return c.cs, nil } -var _ Bus = (*busMock)(nil) - type busMock struct { *alerterMock *accountsMock @@ -106,6 +81,10 @@ func newBusMock(cs *contractStoreMock, hs *hostStoreMock, os *objectStoreMock) * } } +func (b *busMock) FundAccount(ctx context.Context, acc rhpv3.Account, fcid types.FileContractID, desired types.Currency) (types.Currency, error) { + return types.ZeroCurrency, nil +} + type contractMock struct { rev types.FileContractRevision metadata api.ContractMetadata @@ -131,7 +110,6 @@ func (c *contractMock) AddSector(root types.Hash256, sector *[rhpv2.SectorSize]b c.mu.Lock() c.sectors[root] = sector c.mu.Unlock() - return } func (c *contractMock) Sector(root types.Hash256) (sector *[rhpv2.SectorSize]byte, found bool) { @@ -156,13 +134,12 @@ func newContractLockerMock() *contractLockerMock { func (cs *contractLockerMock) AcquireContract(_ context.Context, fcid types.FileContractID, _ int, _ time.Duration) (uint64, error) { cs.mu.Lock() - defer cs.mu.Unlock() - lock, exists := cs.locks[fcid] if !exists { cs.locks[fcid] = new(sync.Mutex) lock = cs.locks[fcid] } + cs.mu.Unlock() lock.Lock() return 0, nil @@ -173,7 +150,6 @@ func (cs *contractLockerMock) ReleaseContract(_ context.Context, fcid types.File defer cs.mu.Unlock() cs.locks[fcid].Unlock() - delete(cs.locks, fcid) return nil } @@ -722,14 +698,6 @@ func (*walletMock) WalletFund(context.Context, *types.Transaction, types.Currenc return nil, nil, nil } -func (*walletMock) WalletPrepareForm(context.Context, types.Address, types.PublicKey, types.Currency, types.Currency, types.PublicKey, rhpv2.HostSettings, uint64) ([]types.Transaction, error) { - return nil, nil -} - -func (*walletMock) WalletPrepareRenew(context.Context, types.FileContractRevision, types.Address, types.Address, types.PrivateKey, types.Currency, types.Currency, types.Currency, rhpv3.HostPriceTable, uint64, uint64, uint64) (api.WalletPrepareRenewResponse, error) { - return api.WalletPrepareRenewResponse{}, nil -} - func (*walletMock) WalletSign(context.Context, *types.Transaction, []types.Hash256, types.CoveredFields) error { return nil } diff --git a/worker/s3/authentication.go b/worker/s3/authentication.go index 58ebad677..04884433d 100644 --- a/worker/s3/authentication.go +++ b/worker/s3/authentication.go @@ -141,13 +141,18 @@ func (b *authenticatedBackend) AuthenticationMiddleware(h http.Handler) http.Han return } // verify signature - if _, result := signature.V4SignVerify(rq); result != signature.ErrNone { + if accessKeyID, result := signature.V4SignVerify(rq); result == signature.ErrNone { + // authenticated request successfully + perms = rootPerms + } else if accessKeyID == "" { + // no access key provided; bucket policy might still permit access + // NOTE: this happens when the official aws sdk is used without + // credentials + } else { // authentication attempted but failed. writeResponse(w, signature.GetAPIError(result)) return } - // authenticated request successfully - perms = rootPerms } // add permissions to context diff --git a/worker/s3/backend.go b/worker/s3/backend.go index a8dd1cb22..d794c6825 100644 --- a/worker/s3/backend.go +++ b/worker/s3/backend.go @@ -163,7 +163,7 @@ func (s *s3) ListBucket(ctx context.Context, bucketName string, prefix *gofakes3 item := &gofakes3.Content{ Key: key, LastModified: gofakes3.NewContentTime(object.ModTime.Std()), - ETag: object.ETag, + ETag: api.FormatETag(object.ETag), Size: object.Size, StorageClass: gofakes3.StorageStandard, } @@ -271,14 +271,15 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range } } - // ensure metadata is not nil - if res.Metadata == nil { - res.Metadata = make(map[string]string) + // set user metadata + metadata := make(map[string]string) + for k, v := range res.Metadata { + metadata[amazonMetadataPrefix+k] = v } // decorate metadata - res.Metadata["Content-Type"] = res.ContentType - res.Metadata["Last-Modified"] = res.LastModified.Std().Format(http.TimeFormat) + metadata["Content-Type"] = res.ContentType + metadata["Last-Modified"] = res.LastModified.Std().Format(http.TimeFormat) // etag to bytes etag, err := hex.DecodeString(res.Etag) @@ -289,7 +290,7 @@ func (s *s3) GetObject(ctx context.Context, bucketName, objectName string, range return &gofakes3.Object{ Hash: etag, Name: gofakes3.URLEncode(objectName), - Metadata: res.Metadata, + Metadata: metadata, Size: res.Size, Contents: res.Content, Range: objectRange, @@ -389,7 +390,7 @@ func (s *s3) PutObject(ctx context.Context, bucketName, key string, meta map[str } return gofakes3.PutObjectResult{ - ETag: ur.ETag, + ETag: api.FormatETag(ur.ETag), VersionID: "", // not supported }, nil } @@ -452,7 +453,9 @@ func (s *s3) UploadPart(ctx context.Context, bucket, object string, id gofakes3. return nil, gofakes3.ErrorMessage(gofakes3.ErrInternal, err.Error()) } - return &gofakes3.UploadPartResult{ETag: res.ETag}, nil + return &gofakes3.UploadPartResult{ + ETag: api.FormatETag(res.ETag), + }, nil } func (s *s3) ListMultipartUploads(ctx context.Context, bucket string, marker *gofakes3.UploadListMarker, prefix gofakes3.Prefix, limit int64) (*gofakes3.ListMultipartUploadsResult, error) { @@ -502,7 +505,7 @@ func (s *s3) ListParts(ctx context.Context, bucket, object string, uploadID gofa parts = append(parts, gofakes3.ListMultipartUploadPartItem{ PartNumber: part.PartNumber, LastModified: gofakes3.NewContentTime(part.LastModified.Std()), - ETag: part.ETag, + ETag: api.FormatETag(part.ETag), Size: part.Size, }) } @@ -532,7 +535,7 @@ func (s *s3) CompleteMultipartUpload(ctx context.Context, bucket, object string, var parts []api.MultipartCompletedPart for _, part := range input.Parts { parts = append(parts, api.MultipartCompletedPart{ - ETag: part.ETag, + ETag: api.FormatETag(part.ETag), PartNumber: part.PartNumber, }) } diff --git a/worker/worker.go b/worker/worker.go index 7073e0c63..70b5fccf3 100644 --- a/worker/worker.go +++ b/worker/worker.go @@ -7,7 +7,6 @@ import ( "fmt" "io" "math" - "math/big" "net" "net/http" "os" @@ -27,6 +26,7 @@ import ( "go.sia.tech/renterd/build" "go.sia.tech/renterd/config" "go.sia.tech/renterd/internal/gouging" + "go.sia.tech/renterd/internal/rhp" rhp2 "go.sia.tech/renterd/internal/rhp/v2" rhp3 "go.sia.tech/renterd/internal/rhp/v3" "go.sia.tech/renterd/internal/utils" @@ -36,17 +36,13 @@ import ( "go.sia.tech/renterd/worker/client" "go.sia.tech/renterd/worker/s3" "go.uber.org/zap" - "golang.org/x/crypto/blake2b" ) const ( defaultRevisionFetchTimeout = 30 * time.Second - lockingPriorityActiveContractRevision = 100 - lockingPriorityRenew = 80 - lockingPriorityFunding = 40 lockingPrioritySyncing = 30 - lockingPriorityPruning = 20 + lockingPriorityActiveContractRevision = 100 lockingPriorityBlockedUpload = 15 lockingPriorityUpload = 10 @@ -76,7 +72,9 @@ type ( gouging.ConsensusState webhooks.Broadcaster - AccountStore + AccountFunder + iworker.AccountStore + ContractLocker ContractStore HostStore @@ -88,16 +86,8 @@ type ( Wallet } - // An AccountStore manages ephemaral accounts state. - AccountStore interface { - Accounts(ctx context.Context) ([]api.Account, error) - AddBalance(ctx context.Context, id rhpv3.Account, hk types.PublicKey, amt *big.Int) error - - LockAccount(ctx context.Context, id rhpv3.Account, hostKey types.PublicKey, exclusive bool, duration time.Duration) (api.Account, uint64, error) - UnlockAccount(ctx context.Context, id rhpv3.Account, lockID uint64) error - - SetBalance(ctx context.Context, id rhpv3.Account, hk types.PublicKey, amt *big.Int) error - ScheduleSync(ctx context.Context, id rhpv3.Account, hk types.PublicKey) error + AccountFunder interface { + FundAccount(ctx context.Context, account rhpv3.Account, fcid types.FileContractID, amount types.Currency) (types.Currency, error) } ContractStore interface { @@ -153,8 +143,6 @@ type ( Wallet interface { WalletDiscard(ctx context.Context, txn types.Transaction) error WalletFund(ctx context.Context, txn *types.Transaction, amount types.Currency, useUnconfirmedTxns bool) ([]types.Hash256, []types.Transaction, error) - WalletPrepareForm(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, err error) - WalletPrepareRenew(ctx context.Context, revision types.FileContractRevision, hostAddress, renterAddress types.Address, renterKey types.PrivateKey, renterFunds, minNewCollateral, maxFundAmount types.Currency, pt rhpv3.HostPriceTable, endHeight, windowSize, expectedStorage uint64) (api.WalletPrepareRenewResponse, error) WalletSign(ctx context.Context, txn *types.Transaction, toSign []types.Hash256, cf types.CoveredFields) error } @@ -164,18 +152,6 @@ type ( } ) -// deriveSubKey can be used to derive a sub-masterkey from the worker's -// masterkey to use for a specific purpose. Such as deriving more keys for -// ephemeral accounts. -func (w *Worker) deriveSubKey(purpose string) types.PrivateKey { - seed := blake2b.Sum256(append(w.masterKey[:], []byte(purpose)...)) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk -} - // TODO: deriving the renter key from the host key using the master key only // works if we persist a hash of the renter's master key in the database and // compare it on startup, otherwise there's no way of knowing the derived key is @@ -189,12 +165,7 @@ func (w *Worker) deriveSubKey(purpose string) types.PrivateKey { // TODO: instead of deriving a renter key use a randomly generated salt so we're // not limited to one key per host func (w *Worker) deriveRenterKey(hostKey types.PublicKey) types.PrivateKey { - seed := blake2b.Sum256(append(w.deriveSubKey("renterkey"), hostKey[:]...)) - pk := types.NewPrivateKeyFromSeed(seed[:]) - for i := range seed { - seed[i] = 0 - } - return pk + return w.masterKey.DeriveContractKey(hostKey) } // A worker talks to Sia hosts to perform contract and storage operations within @@ -208,15 +179,15 @@ type Worker struct { allowPrivateIPs bool id string bus Bus - masterKey [32]byte + masterKey utils.MasterKey startTime time.Time eventSubscriber iworker.EventSubscriber downloadManager *downloadManager uploadManager *uploadManager - accounts *accounts - dialer *iworker.FallbackDialer + accounts *iworker.AccountMgr + dialer *rhp.FallbackDialer cache iworker.WorkerCache priceTables *priceTables @@ -385,326 +356,6 @@ func (w *Worker) rhpPriceTableHandler(jc jape.Context) { jc.Encode(hpt) } -func (w *Worker) rhpFormHandler(jc jape.Context) { - ctx := jc.Request.Context() - - // decode the request - var rfr api.RHPFormRequest - if jc.Decode(&rfr) != nil { - return - } - - // check renter funds is not zero - if rfr.RenterFunds.IsZero() { - http.Error(jc.ResponseWriter, "RenterFunds can not be zero", http.StatusBadRequest) - return - } - - // apply a pessimistic timeout on contract formations - ctx, cancel := context.WithTimeout(ctx, 15*time.Minute) - defer cancel() - - gp, err := w.bus.GougingParams(ctx) - if jc.Check("could not get gouging parameters", err) != nil { - return - } - gc := newGougingChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, false) - - hostIP, hostKey, renterFunds := rfr.HostIP, rfr.HostKey, rfr.RenterFunds - renterAddress, endHeight, hostCollateral := rfr.RenterAddress, rfr.EndHeight, rfr.HostCollateral - renterKey := w.deriveRenterKey(hostKey) - - contract, txnSet, err := w.rhp2Client.FormContract(ctx, renterAddress, renterKey, hostKey, hostIP, renterFunds, hostCollateral, endHeight, gc, func(ctx context.Context, renterAddress types.Address, renterKey types.PublicKey, renterFunds, hostCollateral types.Currency, hostKey types.PublicKey, hostSettings rhpv2.HostSettings, endHeight uint64) (txns []types.Transaction, discard func(types.Transaction), err error) { - txns, err = w.bus.WalletPrepareForm(ctx, renterAddress, renterKey, renterFunds, hostCollateral, hostKey, hostSettings, endHeight) - if err != nil { - return nil, nil, err - } - return txns, func(txn types.Transaction) { - _ = w.bus.WalletDiscard(ctx, txn) - }, nil - }) - if jc.Check("couldn't form contract", err) != nil { - return - } - - // broadcast the transaction set - err = w.bus.BroadcastTransaction(ctx, txnSet) - if err != nil { - w.logger.Errorf("failed to broadcast formation txn set: %v", err) - } - - jc.Encode(api.RHPFormResponse{ - ContractID: contract.ID(), - Contract: contract, - TransactionSet: txnSet, - }) -} - -func (w *Worker) rhpBroadcastHandler(jc jape.Context) { - ctx := jc.Request.Context() - - // decode the fcid - var fcid types.FileContractID - if jc.DecodeParam("id", &fcid) != nil { - return - } - - // Acquire lock before fetching revision. - unlocker, err := w.acquireContractLock(ctx, fcid, lockingPriorityActiveContractRevision) - if jc.Check("could not acquire revision lock", err) != nil { - return - } - defer unlocker.Release(ctx) - - // Fetch contract from bus. - c, err := w.bus.Contract(ctx, fcid) - if jc.Check("could not get contract", err) != nil { - return - } - rk := w.deriveRenterKey(c.HostKey) - - rev, err := w.rhp2Client.SignedRevision(ctx, c.HostIP, c.HostKey, rk, fcid, time.Minute) - if jc.Check("could not fetch revision", err) != nil { - return - } - - // Create txn with revision. - txn := types.Transaction{ - FileContractRevisions: []types.FileContractRevision{rev.Revision}, - Signatures: rev.Signatures[:], - } - // Fund the txn. We pass 0 here since we only need the wallet to fund - // the fee. - toSign, parents, err := w.bus.WalletFund(ctx, &txn, types.ZeroCurrency, true) - if jc.Check("failed to fund transaction", err) != nil { - return - } - // Sign the txn. - err = w.bus.WalletSign(ctx, &txn, toSign, types.CoveredFields{ - WholeTransaction: true, - }) - if jc.Check("failed to sign transaction", err) != nil { - _ = w.bus.WalletDiscard(ctx, txn) - return - } - // Broadcast the txn. - txnSet := parents - txnSet = append(txnSet, txn) - err = w.bus.BroadcastTransaction(ctx, txnSet) - if jc.Check("failed to broadcast transaction", err) != nil { - _ = w.bus.WalletDiscard(ctx, txn) - return - } -} - -func (w *Worker) rhpPruneContractHandlerPOST(jc jape.Context) { - ctx := jc.Request.Context() - - // decode fcid - var fcid types.FileContractID - if jc.DecodeParam("id", &fcid) != nil { - return - } - - // decode timeout - var pcr api.RHPPruneContractRequest - if jc.Decode(&pcr) != nil { - return - } - - // apply timeout - if pcr.Timeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(pcr.Timeout)) - defer cancel() - } - - // fetch the contract from the bus - contract, err := w.bus.Contract(ctx, fcid) - if errors.Is(err, api.ErrContractNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("couldn't fetch contract", err) != nil { - return - } - - // return early if there's no data to prune - size, err := w.bus.ContractSize(ctx, fcid) - if jc.Check("couldn't fetch contract size", err) != nil { - return - } else if size.Prunable == 0 { - jc.Encode(api.RHPPruneContractResponse{}) - return - } - - // fetch gouging params - gp, err := w.bus.GougingParams(ctx) - if jc.Check("could not fetch gouging parameters", err) != nil { - return - } - gc := newGougingChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, false) - - // prune the contract - var pruned, remaining uint64 - var rev *types.FileContractRevision - var cost types.Currency - err = w.withContractLock(ctx, contract.ID, lockingPriorityPruning, func() error { - stored, pending, err := w.bus.ContractRoots(ctx, contract.ID) - if err != nil { - return fmt.Errorf("failed to fetch contract roots; %w", err) - } - rev, pruned, remaining, cost, err = w.rhp2Client.PruneContract(ctx, w.deriveRenterKey(contract.HostKey), gc, contract.HostIP, contract.HostKey, fcid, contract.RevisionNumber, append(stored, pending...)) - return err - }) - if rev != nil { - w.contractSpendingRecorder.Record(*rev, api.ContractSpending{Deletions: cost}) - } - if err != nil && !errors.Is(err, rhp2.ErrNoSectorsToPrune) && pruned == 0 { - err = fmt.Errorf("failed to prune contract %v; %w", fcid, err) - jc.Error(err, http.StatusInternalServerError) - return - } - - res := api.RHPPruneContractResponse{ - Pruned: pruned, - Remaining: remaining, - } - if err != nil { - res.Error = err.Error() - } - jc.Encode(res) -} - -func (w *Worker) rhpContractRootsHandlerGET(jc jape.Context) { - ctx := jc.Request.Context() - - // decode fcid - var id types.FileContractID - if jc.DecodeParam("id", &id) != nil { - return - } - - // fetch the contract from the bus - c, err := w.bus.Contract(ctx, id) - if errors.Is(err, api.ErrContractNotFound) { - jc.Error(err, http.StatusNotFound) - return - } else if jc.Check("couldn't fetch contract", err) != nil { - return - } - - // fetch gouging params - gp, err := w.bus.GougingParams(ctx) - if jc.Check("couldn't fetch gouging parameters from bus", err) != nil { - return - } - gc := newGougingChecker(gp.GougingSettings, gp.ConsensusState, gp.TransactionFee, false) - - // fetch the roots from the host - roots, rev, cost, err := w.rhp2Client.ContractRoots(ctx, w.deriveRenterKey(c.HostKey), gc, c.HostIP, c.HostKey, id, c.RevisionNumber) - if jc.Check("couldn't fetch contract roots from host", err) != nil { - return - } else if rev != nil { - w.contractSpendingRecorder.Record(*rev, api.ContractSpending{SectorRoots: cost}) - } - jc.Encode(roots) -} - -func (w *Worker) rhpRenewHandler(jc jape.Context) { - ctx := jc.Request.Context() - - // decode request - var rrr api.RHPRenewRequest - if jc.Decode(&rrr) != nil { - return - } - - // check renter funds is not zero - if rrr.RenterFunds.IsZero() { - http.Error(jc.ResponseWriter, "RenterFunds can not be zero", http.StatusBadRequest) - return - } - - // attach gouging checker - gp, err := w.bus.GougingParams(ctx) - if jc.Check("could not get gouging parameters", err) != nil { - return - } - ctx = WithGougingChecker(ctx, w.bus, gp) - - // renew the contract - var renewed rhpv2.ContractRevision - var txnSet []types.Transaction - var contractPrice, fundAmount types.Currency - if jc.Check("couldn't renew contract", w.withContractLock(ctx, rrr.ContractID, lockingPriorityRenew, func() (err error) { - h := w.Host(rrr.HostKey, rrr.ContractID, rrr.SiamuxAddr) - renewed, txnSet, contractPrice, fundAmount, err = h.RenewContract(ctx, rrr) - return err - })) != nil { - return - } - - // broadcast the transaction set - err = w.bus.BroadcastTransaction(ctx, txnSet) - if err != nil { - w.logger.Errorf("failed to broadcast renewal txn set: %v", err) - } - - // send the response - jc.Encode(api.RHPRenewResponse{ - ContractID: renewed.ID(), - Contract: renewed, - ContractPrice: contractPrice, - FundAmount: fundAmount, - TransactionSet: txnSet, - }) -} - -func (w *Worker) rhpFundHandler(jc jape.Context) { - ctx := jc.Request.Context() - - // decode request - var rfr api.RHPFundRequest - if jc.Decode(&rfr) != nil { - return - } - - // attach gouging checker - gp, err := w.bus.GougingParams(ctx) - if jc.Check("could not get gouging parameters", err) != nil { - return - } - ctx = WithGougingChecker(ctx, w.bus, gp) - - // fund the account - jc.Check("couldn't fund account", w.withRevision(ctx, defaultRevisionFetchTimeout, rfr.ContractID, rfr.HostKey, rfr.SiamuxAddr, lockingPriorityFunding, func(rev types.FileContractRevision) error { - return w.Host(rfr.HostKey, rev.ParentID, rfr.SiamuxAddr).FundAccount(ctx, rfr.Balance, &rev) - })) -} - -func (w *Worker) rhpSyncHandler(jc jape.Context) { - ctx := jc.Request.Context() - - // decode the request - var rsr api.RHPSyncRequest - if jc.Decode(&rsr) != nil { - return - } - - // attach gouging checker - up, err := w.bus.UploadParams(ctx) - if jc.Check("couldn't fetch upload parameters from bus", err) != nil { - return - } - ctx = WithGougingChecker(ctx, w.bus, up.GougingParams) - - // sync the account - h := w.Host(rsr.HostKey, rsr.ContractID, rsr.SiamuxAddr) - jc.Check("couldn't sync account", w.withRevision(ctx, defaultRevisionFetchTimeout, rsr.ContractID, rsr.HostKey, rsr.SiamuxAddr, lockingPrioritySyncing, func(rev types.FileContractRevision) error { - return h.SyncAccount(ctx, &rev) - })) -} - func (w *Worker) slabMigrateHandler(jc jape.Context) { ctx := jc.Request.Context() @@ -1216,11 +867,30 @@ func (w *Worker) accountHandlerGET(jc jape.Context) { if jc.DecodeParam("hostkey", &hostKey) != nil { return } - account := rhpv3.Account(w.accounts.deriveAccountKey(hostKey).PublicKey()) + account := rhpv3.Account(w.accounts.ForHost(hostKey).ID()) jc.Encode(account) } -func (w *Worker) eventsHandlerPOST(jc jape.Context) { +func (w *Worker) accountsHandlerGET(jc jape.Context) { + jc.Encode(w.accounts.Accounts()) +} + +func (w *Worker) accountsResetDriftHandlerPOST(jc jape.Context) { + var id rhpv3.Account + if jc.DecodeParam("id", &id) != nil { + return + } + err := w.accounts.ResetDrift(id) + if errors.Is(err, iworker.ErrAccountNotFound) { + jc.Error(err, http.StatusNotFound) + return + } + if jc.Check("failed to reset drift", err) != nil { + return + } +} + +func (w *Worker) eventHandlerPOST(jc jape.Context) { var event webhooks.Event if jc.Decode(&event) != nil { return @@ -1246,6 +916,10 @@ func (w *Worker) stateHandlerGET(jc jape.Context) { // New returns an HTTP handler that serves the worker API. func New(cfg config.Worker, masterKey [32]byte, b Bus, l *zap.Logger) (*Worker, error) { + if cfg.ID == "" { + return nil, errors.New("worker ID cannot be empty") + } + l = l.Named("worker").Named(cfg.ID) if cfg.ContractLockTimeout == 0 { @@ -1270,7 +944,7 @@ func New(cfg config.Worker, masterKey [32]byte, b Bus, l *zap.Logger) (*Worker, a := alerts.WithOrigin(b, fmt.Sprintf("worker.%s", cfg.ID)) shutdownCtx, shutdownCancel := context.WithCancel(context.Background()) - dialer := iworker.NewFallbackDialer(b, net.Dialer{}, l) + dialer := rhp.NewFallbackDialer(b, net.Dialer{}, l) w := &Worker{ alerts: a, allowPrivateIPs: cfg.AllowPrivateIPs, @@ -1290,7 +964,9 @@ func New(cfg config.Worker, masterKey [32]byte, b Bus, l *zap.Logger) (*Worker, shutdownCtxCancel: shutdownCancel, } - w.initAccounts(b) + if err := w.initAccounts(cfg.AccountsRefillInterval); err != nil { + return nil, fmt.Errorf("failed to initialize accounts; %w", err) + } w.initPriceTables() w.initDownloadManager(cfg.DownloadMaxMemory, cfg.DownloadMaxOverdrive, cfg.DownloadOverdriveTimeout, l) @@ -1303,23 +979,18 @@ func New(cfg config.Worker, masterKey [32]byte, b Bus, l *zap.Logger) (*Worker, // Handler returns an HTTP handler that serves the worker API. func (w *Worker) Handler() http.Handler { return jape.Mux(map[string]jape.Handler{ - "GET /account/:hostkey": w.accountHandlerGET, - "GET /id": w.idHandlerGET, + "GET /accounts": w.accountsHandlerGET, + "GET /account/:hostkey": w.accountHandlerGET, + "POST /account/:id/resetdrift": w.accountsResetDriftHandlerPOST, + "GET /id": w.idHandlerGET, - "POST /events": w.eventsHandlerPOST, + "POST /event": w.eventHandlerPOST, "GET /memory": w.memoryGET, - "GET /rhp/contracts": w.rhpContractsHandlerGET, - "POST /rhp/contract/:id/broadcast": w.rhpBroadcastHandler, - "POST /rhp/contract/:id/prune": w.rhpPruneContractHandlerPOST, - "GET /rhp/contract/:id/roots": w.rhpContractRootsHandlerGET, - "POST /rhp/scan": w.rhpScanHandler, - "POST /rhp/form": w.rhpFormHandler, - "POST /rhp/renew": w.rhpRenewHandler, - "POST /rhp/fund": w.rhpFundHandler, - "POST /rhp/sync": w.rhpSyncHandler, - "POST /rhp/pricetable": w.rhpPriceTableHandler, + "GET /rhp/contracts": w.rhpContractsHandlerGET, + "POST /rhp/scan": w.rhpScanHandler, + "POST /rhp/pricetable": w.rhpPriceTableHandler, "GET /stats/downloads": w.downloadsStatsHandlerGET, "GET /stats/uploads": w.uploadsStatsHandlerGET, @@ -1339,7 +1010,7 @@ func (w *Worker) Handler() http.Handler { // Setup register event webhooks that enable the worker cache. func (w *Worker) Setup(ctx context.Context, apiURL, apiPassword string) error { go func() { - eventsURL := fmt.Sprintf("%s/events", apiURL) + eventsURL := fmt.Sprintf("%s/event", apiURL) webhookOpts := []webhooks.HeaderOption{webhooks.WithBasicAuth("", apiPassword)} if err := w.eventSubscriber.Register(w.shutdownCtx, eventsURL, webhookOpts...); err != nil { w.logger.Errorw("failed to register webhooks", zap.Error(err)) @@ -1358,6 +1029,9 @@ func (w *Worker) Shutdown(ctx context.Context) error { w.downloadManager.Stop() w.uploadManager.Stop() + // stop account manager + w.accounts.Shutdown(ctx) + // stop recorders w.contractSpendingRecorder.Stop(ctx) @@ -1389,7 +1063,7 @@ func (w *Worker) scanHost(ctx context.Context, timeout time.Duration, hostKey ty // fetch the host pricetable scanCtx, cancel = timeoutCtx() - pt, err := w.rhp3Client.PriceTableUnpaid(ctx, hostKey, settings.SiamuxAddr()) + pt, err := w.rhp3Client.PriceTableUnpaid(scanCtx, hostKey, settings.SiamuxAddr()) cancel() if err != nil { return settings, rhpv3.HostPriceTable{}, time.Since(start), err @@ -1508,6 +1182,35 @@ func (w *Worker) headObject(ctx context.Context, bucket, path string, onlyMetada }, res, nil } +func (w *Worker) FundAccount(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, desired types.Currency) error { + // calculate the deposit amount + acc := w.accounts.ForHost(hk) + return acc.WithDeposit(func(balance types.Currency) (types.Currency, error) { + // return early if we have the desired balance + if balance.Cmp(desired) >= 0 { + return types.ZeroCurrency, nil + } + deposit := desired.Sub(balance) + + // fund the account + var err error + deposit, err = w.bus.FundAccount(ctx, acc.ID(), fcid, desired.Sub(balance)) + if err != nil { + if rhp3.IsBalanceMaxExceeded(err) { + acc.ScheduleSync() + } + return types.ZeroCurrency, fmt.Errorf("failed to fund account with %v; %w", deposit, err) + } + + // log the account balance after funding + w.logger.Debugw("fund account succeeded", + "balance", balance.ExactString(), + "deposit", deposit.ExactString(), + ) + return deposit, nil + }) +} + func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.DownloadObjectOptions) (*api.GetObjectResponse, error) { // head object hor, res, err := w.headObject(ctx, bucket, path, false, api.HeadObjectOptions{ @@ -1554,7 +1257,7 @@ func (w *Worker) GetObject(ctx context.Context, bucket, path string, opts api.Do if !errors.Is(err, ErrShuttingDown) && !errors.Is(err, errDownloadCancelled) && !errors.Is(err, io.ErrClosedPipe) { - w.registerAlert(newDownloadFailedAlert(bucket, path, opts.Prefix, opts.Marker, offset, length, int64(len(contracts)), err)) + w.registerAlert(newDownloadFailedAlert(bucket, path, offset, length, int64(len(contracts)), err)) } return fmt.Errorf("failed to download object: %w", err) } @@ -1579,6 +1282,25 @@ func (w *Worker) HeadObject(ctx context.Context, bucket, path string, opts api.H return res, err } +func (w *Worker) SyncAccount(ctx context.Context, fcid types.FileContractID, hk types.PublicKey, siamuxAddr string) error { + // attach gouging checker + gp, err := w.cache.GougingParams(ctx) + if err != nil { + return fmt.Errorf("couldn't get gouging parameters; %w", err) + } + ctx = WithGougingChecker(ctx, w.bus, gp) + + // sync the account + h := w.Host(hk, fcid, siamuxAddr) + err = w.withRevision(ctx, defaultRevisionFetchTimeout, fcid, hk, siamuxAddr, lockingPrioritySyncing, func(rev types.FileContractRevision) error { + return h.SyncAccount(ctx, &rev) + }) + if err != nil { + return fmt.Errorf("failed to sync account; %w", err) + } + return nil +} + func (w *Worker) UploadObject(ctx context.Context, r io.Reader, bucket, path string, opts api.UploadObjectOptions) (*api.UploadObjectResponse, error) { // prepare upload params up, err := w.prepareUploadParams(ctx, bucket, opts.ContractSet, opts.MinShards, opts.TotalShards) @@ -1670,6 +1392,14 @@ func (w *Worker) UploadMultipartUploadPart(ctx context.Context, r io.Reader, buc }, nil } +func (w *Worker) initAccounts(refillInterval time.Duration) (err error) { + if w.accounts != nil { + panic("priceTables already initialized") // developer error + } + w.accounts, err = iworker.NewAccountManager(w.masterKey.DeriveAccountsKey(w.id), w.id, w.bus, w, w, w.bus, w.cache, w.bus, refillInterval, w.logger.Desugar()) + return err +} + func (w *Worker) prepareUploadParams(ctx context.Context, bucket string, contractSet string, minShards, totalShards int) (api.UploadParams, error) { // return early if the bucket does not exist _, err := w.bus.Bucket(ctx, bucket) diff --git a/worker/worker_test.go b/worker/worker_test.go index f0822f03f..4472f64b5 100644 --- a/worker/worker_test.go +++ b/worker/worker_test.go @@ -132,6 +132,7 @@ func (w *testWorker) RenewContract(hk types.PublicKey) *contractMock { func newTestWorkerCfg() config.Worker { return config.Worker{ + AccountsRefillInterval: time.Second, ID: "test", ContractLockTimeout: time.Second, BusFlushInterval: time.Second,