Skip to content
This repository has been archived by the owner on Apr 2, 2024. It is now read-only.

Commit

Permalink
refactor(BUX-451): fix spellings and lint hints
Browse files Browse the repository at this point in the history
  • Loading branch information
chris-4chain committed Jan 16, 2024
1 parent bfa6cbd commit 037cb8d
Show file tree
Hide file tree
Showing 16 changed files with 55 additions and 56 deletions.
4 changes: 2 additions & 2 deletions action_transaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (c *Client) GetTransactionsByIDs(ctx context.Context, txIDs []string) ([]*T
ctx = c.GetOrStartTxn(ctx, "get_transactions_by_ids")

// Create the conditions
conditions := generateTxIdFilterConditions(txIDs)
conditions := generateTxIDFilterConditions(txIDs)

// Get the transactions by it's IDs
transactions, err := getTransactions(
Expand Down Expand Up @@ -387,7 +387,7 @@ func (c *Client) RevertTransaction(ctx context.Context, id string) error {
return err
}

func generateTxIdFilterConditions(txIDs []string) *map[string]interface{} {
func generateTxIDFilterConditions(txIDs []string) *map[string]interface{} {
orConditions := make([]map[string]interface{}, len(txIDs))

for i, txID := range txIDs {
Expand Down
8 changes: 4 additions & 4 deletions beef_bump.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func prepareBEEFFactors(ctx context.Context, tx *Transaction, store TransactionG
return nil, nil, err
}

var txIDs []string
txIDs := make([]string, 0, len(tx.draftTransaction.Configuration.Inputs))
for _, input := range tx.draftTransaction.Configuration.Inputs {
txIDs = append(txIDs, input.UtxoPointer.TransactionID)
}
Expand Down Expand Up @@ -96,7 +96,7 @@ func prepareBEEFFactors(ctx context.Context, tx *Transaction, store TransactionG
}

func checkParentTransactions(ctx context.Context, store TransactionGetter, btTx *bt.Tx) ([]*bt.Tx, []*Transaction, error) {
var parentTxIDs []string
parentTxIDs := make([]string, 0, len(btTx.Inputs))
for _, txIn := range btTx.Inputs {
parentTxIDs = append(parentTxIDs, txIn.PreviousTxIDStr())
}
Expand All @@ -106,8 +106,8 @@ func checkParentTransactions(ctx context.Context, store TransactionGetter, btTx
return nil, nil, err
}

var validTxs []*Transaction
var validBtTxs []*bt.Tx
validTxs := make([]*Transaction, 0, len(parentTxs))
validBtTxs := make([]*bt.Tx, 0, len(parentTxs))
for _, parentTx := range parentTxs {
parentBtTx, err := bt.NewTxFromString(parentTx.Hex)
if err != nil {
Expand Down
8 changes: 4 additions & 4 deletions beef_tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,16 @@ func ToBeef(ctx context.Context, tx *Transaction, store TransactionGetter) (stri
return "", err
}
sortedTxs := kahnTopologicalSortTransactions(bumpBtFactors)
beefHex, err := toBeefHex(ctx, bumps, sortedTxs)
beefHex, err := toBeefHex(bumps, sortedTxs)
if err != nil {
return "", fmt.Errorf("ToBeef() error: %w", err)
}

return beefHex, nil
}

func toBeefHex(ctx context.Context, bumps BUMPs, parentTxs []*bt.Tx) (string, error) {
beef, err := newBeefTx(ctx, 1, bumps, parentTxs)
func toBeefHex(bumps BUMPs, parentTxs []*bt.Tx) (string, error) {
beef, err := newBeefTx(1, bumps, parentTxs)
if err != nil {
return "", fmt.Errorf("ToBeefHex() error: %w", err)
}
Expand All @@ -54,7 +54,7 @@ func toBeefHex(ctx context.Context, bumps BUMPs, parentTxs []*bt.Tx) (string, er
return hex.EncodeToString(beefBytes), nil
}

func newBeefTx(ctx context.Context, version uint32, bumps BUMPs, parentTxs []*bt.Tx) (*beefTx, error) {
func newBeefTx(version uint32, bumps BUMPs, parentTxs []*bt.Tx) (*beefTx, error) {
if version > maxBeefVer {
return nil, fmt.Errorf("version above 0x%X", maxBeefVer)
}
Expand Down
6 changes: 3 additions & 3 deletions beef_tx_sorting.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func prepareSortStructures(dag []*bt.Tx) (txByID map[string]*bt.Tx, incomingEdge
incomingEdgesMap = make(map[string]int, dagLen)

for _, tx := range dag {
txByID[tx.TxID()] = tx // TODO: perf -> In bt, the TxID is calculated every time we try to get it, which means we hash the tx bytes twice each time. It's expensive operation - try to avoid calulation each time
txByID[tx.TxID()] = tx // TODO: perf -> In bt, the TxID is calculated every time we try to get it, which means we hash the tx bytes twice each time. It's expensive operation - try to avoid calculation each time
incomingEdgesMap[tx.TxID()] = 0
}

Expand All @@ -39,7 +39,7 @@ func prepareSortStructures(dag []*bt.Tx) (txByID map[string]*bt.Tx, incomingEdge
func calculateIncomingEdges(inDegree map[string]int, txByID map[string]*bt.Tx) {
for _, tx := range txByID {
for _, input := range tx.Inputs {
inputUtxoTxID := input.PreviousTxIDStr() // TODO: perf -> In bt, the TxID is calculated every time we try to get it, which means we hash the tx bytes twice each time. It's expensive operation - try to avoid calulation each time
inputUtxoTxID := input.PreviousTxIDStr() // TODO: perf -> In bt, the TxID is calculated every time we try to get it, which means we hash the tx bytes twice each time. It's expensive operation - try to avoid calculation each time
if _, ok := txByID[inputUtxoTxID]; ok { // transaction can contains inputs we are not interested in
inDegree[inputUtxoTxID]++
}
Expand All @@ -61,7 +61,7 @@ func getTxWithZeroIncomingEdges(incomingEdgesMap map[string]int) []string {

func removeTxFromIncomingEdges(tx *bt.Tx, incomingEdgesMap map[string]int, zeroIncomingEdgeQueue []string) []string {
for _, input := range tx.Inputs {
neighborID := input.PreviousTxIDStr() // TODO: perf -> In bt, the TxID is calculated every time we try to get it, which means we hash the tx bytes twice each time. It's expensive operation - try to avoid calulation each time
neighborID := input.PreviousTxIDStr() // TODO: perf -> In bt, the TxID is calculated every time we try to get it, which means we hash the tx bytes twice each time. It's expensive operation - try to avoid calculation each time
incomingEdgesMap[neighborID]--

if incomingEdgesMap[neighborID] == 0 {
Expand Down
2 changes: 1 addition & 1 deletion bux_suite_mocks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (tm *taskManagerMockBase) IsNewRelicEnabled() bool {
return false
}

func (tm *taskManagerMockBase) CronJobsInit(cronJobsMap taskmanager.CronJobs) error {
func (tm *taskManagerMockBase) CronJobsInit(taskmanager.CronJobs) error {
return nil
}

Expand Down
4 changes: 2 additions & 2 deletions chainstate/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,10 +113,10 @@ func TestNewClient(t *testing.T) {
require.Error(t, err)
})

t.Run("unreacheble miners", func(t *testing.T) {
t.Run("unreachable miners", func(t *testing.T) {
_, err := NewClient(
context.Background(),
WithMinercraft(&minerCraftUnreachble{}),
WithMinercraft(&minerCraftUnreachable{}),
)
require.Error(t, err)
assert.ErrorIs(t, err, ErrMissingBroadcastMiners)
Expand Down
2 changes: 1 addition & 1 deletion chainstate/merkle_root_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (p pulseClientProvider) verifyMerkleRoots(

err = json.Unmarshal(bodyBytes, &merkleRootsRes)
if err != nil {
return nil, fmt.Errorf("error during unmarshaling response body: %s", err.Error())
return nil, fmt.Errorf("error during unmarshalling response body: %s", err.Error())
}

return &merkleRootsRes, nil
Expand Down
10 changes: 5 additions & 5 deletions chainstate/minercraft_init.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ func (i *minercraftInitializer) newClient() (err error) {
return
}

// validateMiners will check if miner is reacheble by requesting its FeeQuote
// validateMiners will check if miner is reachable by requesting its FeeQuote
// If there was on error on FeeQuote(), the miner will be deleted from miners list
// If usage of MapiFeeQuotes is enabled and miner is reacheble, miner's fee unit will be upadeted with MAPI fee quotes
// If usage of MapiFeeQuotes is enabled and miner is reachable, miner's fee unit will be updated with MAPI fee quotes
// If FeeQuote returns some quote, but fee is not presented in it, it means that miner is valid but we can't use it's feequote
func (i *minercraftInitializer) validateMiners() error {
ctxWithCancel, cancel := context.WithTimeout(i.ctx, 5*time.Second)
Expand All @@ -113,7 +113,7 @@ func (i *minercraftInitializer) validateMiners() error {
}
wg.Wait()

i.deleteUnreacheableMiners()
i.deleteUnreachableMiners()

switch {
case len(c.options.config.minercraftConfig.broadcastMiners) == 0:
Expand Down Expand Up @@ -157,8 +157,8 @@ func (i *minercraftInitializer) addToMinersWithFee(miner *minercraft.Miner, feeU
i.minersWithFee[minerID(miner.MinerID)] = *feeUnit
}

// deleteUnreacheableMiners deletes miners which can't be reacheable from config
func (i *minercraftInitializer) deleteUnreacheableMiners() {
// deleteUnreachableMiners deletes miners which can't be reachable from config
func (i *minercraftInitializer) deleteUnreachableMiners() {
c := i.client
validMiners := []*minercraft.Miner{}
for _, miner := range c.options.config.minercraftConfig.broadcastMiners {
Expand Down
4 changes: 2 additions & 2 deletions chainstate/mock_minercraft.go
Original file line number Diff line number Diff line change
Expand Up @@ -524,11 +524,11 @@ func (m *minerCraftBroadcastSuccess) SubmitTransaction(_ context.Context, miner
return nil, errors.New("missing miner response")
}

type minerCraftUnreachble struct {
type minerCraftUnreachable struct {
MinerCraftBase
}

// FeeQuote returns an error.
func (m *minerCraftUnreachble) FeeQuote(context.Context, *minercraft.Miner) (*minercraft.FeeQuoteResponse, error) {
func (m *minerCraftUnreachable) FeeQuote(context.Context, *minercraft.Miner) (*minercraft.FeeQuoteResponse, error) {
return nil, errors.New("minercraft is unreachable")
}
4 changes: 2 additions & 2 deletions model_bump_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -734,10 +734,10 @@ func TestBUMPModel_CalculateMergedBUMPAndHex(t *testing.T) {
"0e" + // 13 - tree height
"02" + // nLeafs at this level
"fd8004" + // offset - 1152
"00" + // flags - data follows, not a cilent txid
"00" + // flags - data follows, not a client txid
"a35764daec4a1cdec33d1108619109b00b9e37c04e9492a9bb875cc31dde4b4d" + // hash
"fd8104" + // offset - 1153
"02" + // flags - data follows, cilent txid
"02" + // flags - data follows, client txid
"da148e7fde1906808a92e8d542cfc6591f697895fe3701a35613fecb3db63021" + // hash
// ----------------------
// implied end of leaves at this height
Expand Down
38 changes: 18 additions & 20 deletions model_utxos.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,8 @@ func newUtxo(xPubID, txID, scriptPubKey string, index uint32, satoshis uint64, o

// getSpendableUtxos get all spendable utxos by page / pageSize
func getSpendableUtxos(ctx context.Context, xPubID, utxoType string, queryParams *datastore.QueryParams, //nolint:nolintlint,unparam // this param will be used
fromUtxos []*UtxoPointer, opts ...ModelOps) ([]*Utxo, error) {

fromUtxos []*UtxoPointer, opts ...ModelOps,
) ([]*Utxo, error) {
// Construct the conditions and results
var models []Utxo
conditions := map[string]interface{}{
Expand Down Expand Up @@ -145,8 +145,8 @@ func unReserveUtxos(ctx context.Context, xPubID, draftID string, opts ...ModelOp

// reserveUtxos reserve utxos for the given draft ID and amount
func reserveUtxos(ctx context.Context, xPubID, draftID string,
satoshis uint64, feePerByte float64, fromUtxos []*UtxoPointer, opts ...ModelOps) ([]*Utxo, error) {

satoshis uint64, feePerByte float64, fromUtxos []*UtxoPointer, opts ...ModelOps,
) ([]*Utxo, error) {
// Create base model
m := NewBaseModel(ModelNameEmpty, opts...)

Expand All @@ -166,7 +166,7 @@ func reserveUtxos(ctx context.Context, xPubID, draftID string,

queryParams := &datastore.QueryParams{}
if fromUtxos == nil {
// if we are not getting all utxos, paginate the retreival
// if we are not getting all utxos, paginate the retrieval
queryParams.Page = 1
queryParams.PageSize = m.pageSize
if queryParams.PageSize == 0 {
Expand Down Expand Up @@ -258,8 +258,8 @@ func newUtxoFromTxID(txID string, index uint32, opts ...ModelOps) *Utxo {

// getUtxos will get all the utxos with the given conditions
func getUtxos(ctx context.Context, metadata *Metadata, conditions *map[string]interface{},
queryParams *datastore.QueryParams, opts ...ModelOps) ([]*Utxo, error) {

queryParams *datastore.QueryParams, opts ...ModelOps,
) ([]*Utxo, error) {
modelItems := make([]*Utxo, 0)
if err := getModelsByConditions(ctx, ModelUtxo, &modelItems, metadata, conditions, queryParams, opts...); err != nil {
return nil, err
Expand All @@ -270,14 +270,15 @@ func getUtxos(ctx context.Context, metadata *Metadata, conditions *map[string]in

// getAccessKeysCount will get a count of all the utxos with the given conditions
func getUtxosCount(ctx context.Context, metadata *Metadata, conditions *map[string]interface{},
opts ...ModelOps) (int64, error) {
opts ...ModelOps,
) (int64, error) {
return getModelCountByConditions(ctx, ModelUtxo, Utxo{}, metadata, conditions, opts...)
}

// getTransactionsAggregate will get a count of all transactions per aggregate column with the given conditions
func getUtxosAggregate(ctx context.Context, metadata *Metadata, conditions *map[string]interface{},
aggregateColumn string, opts ...ModelOps) (map[string]interface{}, error) {

aggregateColumn string, opts ...ModelOps,
) (map[string]interface{}, error) {
modelItems := make([]*Utxo, 0)
results, err := getModelsAggregateByConditions(
ctx, ModelUtxo, &modelItems, metadata, conditions, aggregateColumn, opts...,
Expand All @@ -291,9 +292,9 @@ func getUtxosAggregate(ctx context.Context, metadata *Metadata, conditions *map[

// getUtxosByXpubID will return utxos by a given xPub ID
func getUtxosByXpubID(ctx context.Context, xPubID string, metadata *Metadata, conditions *map[string]interface{},
queryParams *datastore.QueryParams, opts ...ModelOps) ([]*Utxo, error) {

var dbConditions = map[string]interface{}{}
queryParams *datastore.QueryParams, opts ...ModelOps,
) ([]*Utxo, error) {
dbConditions := map[string]interface{}{}
if conditions != nil {
dbConditions = *conditions
}
Expand All @@ -308,8 +309,8 @@ func getUtxosByXpubID(ctx context.Context, xPubID string, metadata *Metadata, co

// getUtxosByDraftID will return the utxos by a given draft id
func getUtxosByDraftID(ctx context.Context, draftID string,
queryParams *datastore.QueryParams, opts ...ModelOps) ([]*Utxo, error) {

queryParams *datastore.QueryParams, opts ...ModelOps,
) ([]*Utxo, error) {
conditions := map[string]interface{}{
draftIDField: draftID,
}
Expand All @@ -318,8 +319,8 @@ func getUtxosByDraftID(ctx context.Context, draftID string,

// getUtxosByConditions will get utxos by given conditions
func getUtxosByConditions(ctx context.Context, conditions map[string]interface{},
queryParams *datastore.QueryParams, opts ...ModelOps) ([]*Utxo, error) {

queryParams *datastore.QueryParams, opts ...ModelOps,
) ([]*Utxo, error) {
var models []Utxo
if err := getModels(
ctx, NewBaseModel(
Expand All @@ -343,7 +344,6 @@ func getUtxosByConditions(ctx context.Context, conditions map[string]interface{}

// getUtxo will get the utxo with the given conditions
func getUtxo(ctx context.Context, txID string, index uint32, opts ...ModelOps) (*Utxo, error) {

// Start the new model
utxo := newUtxoFromTxID(txID, index, opts...)

Expand Down Expand Up @@ -389,7 +389,6 @@ func (m *Utxo) GetID() string {

// BeforeCreating will fire before the model is being inserted into the Datastore
func (m *Utxo) BeforeCreating(_ context.Context) error {

m.Client().Logger().Debug().
Str("utxoID", m.ID).
Msgf("starting: %s BeforeCreate hook...", m.Name())
Expand Down Expand Up @@ -447,7 +446,6 @@ func (m *Utxo) GenerateID() string {

// Migrate model specific migration on startup
func (m *Utxo) Migrate(client datastore.ClientInterface) error {

tableName := client.GetTableName(tableUTXOs)
if client.Engine() == datastore.MySQL {
if err := m.migrateMySQL(client, tableName); err != nil {
Expand Down
12 changes: 6 additions & 6 deletions paymail_service_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -150,8 +150,8 @@ func (p *PaymailDefaultServiceProvider) CreateP2PDestinationResponse(
// RecordTransaction will record the transaction
// TODO: rename to HandleReceivedP2pTransaction
func (p *PaymailDefaultServiceProvider) RecordTransaction(ctx context.Context,
p2pTx *paymail.P2PTransaction, requestMetadata *server.RequestMetadata) (*paymail.P2PTransactionPayload, error) {

p2pTx *paymail.P2PTransaction, requestMetadata *server.RequestMetadata,
) (*paymail.P2PTransactionPayload, error) {
// Create the metadata
metadata := p.createMetadata(requestMetadata, "HandleReceivedP2pTransaction")
metadata[p2pMetadataField] = p2pTx.MetaData
Expand Down Expand Up @@ -338,7 +338,7 @@ func saveBEEFTxInputs(ctx context.Context, c ClientInterface, dBeef *beef.Decode
}

func getInputsWhichAreNotInDb(c ClientInterface, dBeef *beef.DecodedBEEF) ([]*beef.TxData, error) {
var txIDs []string
txIDs := make([]string, 0, len(dBeef.Transactions))
for _, tx := range dBeef.Transactions {
txIDs = append(txIDs, tx.GetTxID())
}
Expand Down Expand Up @@ -369,12 +369,12 @@ func getInputsWhichAreNotInDb(c ClientInterface, dBeef *beef.DecodedBEEF) ([]*be
return txs, nil
}

func getBump(bumpIndx int, bumps beef.BUMPs) (*BUMP, error) {
if bumpIndx > len(bumps) {
func getBump(bumpIndex int, bumps beef.BUMPs) (*BUMP, error) {
if bumpIndex > len(bumps) {
return nil, fmt.Errorf("error in getBump: bump index exceeds bumps length")
}

bump := bumps[bumpIndx]
bump := bumps[bumpIndex]
paths := make([][]BUMPLeaf, 0)

for _, path := range bump.Path {
Expand Down
4 changes: 2 additions & 2 deletions record_tx_strategy_external_incoming_tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func _addTxToCheck(ctx context.Context, tx *externalIncomingTx, c ClientInterfac
Msg("start ITC")

if err = incomingTx.Save(ctx); err != nil {
return nil, fmt.Errorf("addind new IncomingTx to check queue failed. Reason: %w", err)
return nil, fmt.Errorf("adding new IncomingTx to check queue failed. Reason: %w", err)
}

result := incomingTx.toTransactionDto()
Expand Down Expand Up @@ -161,7 +161,7 @@ func _externalIncomingBroadcast(ctx context.Context, logger *zerolog.Logger, tx
Str("txID", tx.ID).
Msgf("broadcasting failed, next try will be handled by task manager. Reason: %s", err)

// ignore error, transaction will be broadcaset in a cron task
// ignore error, transaction will be broadcasted in a cron task
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion record_tx_strategy_internal_incoming_tx.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func _internalIncomingBroadcast(ctx context.Context, logger *zerolog.Logger, tra
Str("txID", transaction.ID).
Msgf("broadcasting failed, next try will be handled by task manager. Reason: %s", err)

// ignore broadcast error - will be repeted by task manager
// ignore broadcast error - will be repeated by task manager
return nil
}

Expand Down
1 change: 1 addition & 0 deletions utils/byte_array.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ func ToByteArray(value interface{}) ([]byte, error) {
}
}

// StrOrBytesToString converts string or []byte to string or returns an error
func StrOrBytesToString(value interface{}) (string, error) {
switch typedValue := value.(type) {
case []byte:
Expand Down
2 changes: 1 addition & 1 deletion utils/fees.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ func (f *FeeUnit) String() string {
return fmt.Sprintf("FeeUnit(%d satoshis / %d bytes)", f.Satoshis, f.Bytes)
}

// IsZero returns true if the fee unit suggets no fees (free)
// IsZero returns true if the fee unit suggest no fees (free)
func (f *FeeUnit) IsZero() bool {
return f.Satoshis == 0
}
Expand Down

0 comments on commit 037cb8d

Please sign in to comment.