Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

doc: improve godoc for the blob service #3422

Merged
merged 3 commits into from
May 24, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions blob/blob.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ func (com Commitment) Equal(c Commitment) bool {
return bytes.Equal(com, c)
}

// Proof is a collection of nmt.Proofs that verifies the inclusion of the data.
// Proof proves the WHOLE namespaced data for the particular row.
// The Proof is a set of nmt proofs that can be verified only through
// the included method (due to limitation of the nmt https://github.com/celestiaorg/nmt/issues/218).
// Proof proves the WHOLE namespaced data to the row roots.
// TODO (@vgonkivs): rework `Proof` in order to prove a particular blob.
// https://github.com/celestiaorg/celestia-node/issues/2303
type Proof []*nmt.Proof
Expand Down
31 changes: 16 additions & 15 deletions blob/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,20 @@ import (
"github.com/celestiaorg/celestia-app/pkg/shares"
)

// parser is a helper struct that allows collecting shares and transforming them into the blob.
// it contains all necessary information that is needed to build the blob:
// * position of the blob inside the EDS;
// * blob's length;
// * shares needed to build the blob;
// * extra condition to verify the final blob.
// parser helps to collect shares and transform them into a blob.
// It can handle only one blob at a time.
type parser struct {
index int
length int
// index is a position of the blob inside the EDS.
index int
// length is an amount of the shares needed to build the blob.
length int
// shares is a set of shares to build the blob.
shares []shares.Share
verifyFn func(blob *Blob) bool
}

// NOTE: passing shares here needed to detect padding shares(as we do not need this check in
// addShares)
// set tries to find the first blob's share by skipping padding shares and
// sets the metadata of the blob(index and length)
func (p *parser) set(index int, shrs []shares.Share) ([]shares.Share, error) {
if len(shrs) == 0 {
return nil, errEmptyShares
Expand All @@ -47,9 +46,9 @@ func (p *parser) set(index int, shrs []shares.Share) ([]shares.Share, error) {
return shrs, nil
}

// addShares sets shares until the blob is completed and returns extra shares back.
// we do not need here extra condition to check padding shares as we do not expect it here.
// it is possible only between two blobs.
// addShares sets shares until the blob is completed and extra remaining shares back.
// It assumes that the remaining shares required for blob completeness are correct and
// do not include padding shares.
func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComplete bool) {
index := -1
for i, sh := range shares {
Expand All @@ -71,7 +70,7 @@ func (p *parser) addShares(shares []shares.Share) (shrs []shares.Share, isComple
return shares[index+1:], true
}

// parse parses shares and creates the Blob.
// parse ensures that correct amount of shares was collected and create a blob from the existing shares.
func (p *parser) parse() (*Blob, error) {
if p.length != len(p.shares) {
return nil, fmt.Errorf("invalid shares amount. want:%d, have:%d", p.length, len(p.shares))
Expand Down Expand Up @@ -111,7 +110,8 @@ func (p *parser) parse() (*Blob, error) {
return blob, nil
}

// skipPadding skips first share in the range if this share is the Padding share.
// skipPadding iterates through the shares until non-padding share will be found. It guarantees that
// the returned set of shares will start with non-padding share(or empty set of shares).
func (p *parser) skipPadding(shares []shares.Share) ([]shares.Share, error) {
if len(shares) == 0 {
return nil, errEmptyShares
Expand Down Expand Up @@ -147,6 +147,7 @@ func (p *parser) isEmpty() bool {
return p.index == 0 && p.length == 0 && len(p.shares) == 0
}

// reset cleans up parser, so it can be re-used within the same verify functionality.
func (p *parser) reset() {
p.index = 0
p.length = 0
Expand Down
22 changes: 10 additions & 12 deletions blob/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,8 @@ func (s *Service) Submit(ctx context.Context, blobs []*Blob, gasPrice GasPrice)
}

// Get retrieves all the blobs for given namespaces at the given height by commitment.
// Get collects all namespaced data from the EDS, constructs blobs
// and compares commitments. `ErrBlobNotFound` can be returned in case blob was not found.
func (s *Service) Get(
ctx context.Context,
height uint64,
Expand All @@ -135,7 +137,8 @@ func (s *Service) Get(
}

// GetProof retrieves all blobs in the given namespaces at the given height by commitment
// and returns their Proof.
// and returns their Proof. It collects all namespaced data from the EDS, constructs blobs
// and compares commitments.
func (s *Service) GetProof(
ctx context.Context,
height uint64,
Expand Down Expand Up @@ -227,13 +230,6 @@ func (s *Service) Included(
)

// In the current implementation, LNs will have to download all shares to recompute the commitment.
// To achieve 1. we need to modify Proof structure and to store all subtree roots, that were
// involved in commitment creation and then call `merkle.HashFromByteSlices`(tendermint package).
// nmt.Proof is verifying share inclusion by recomputing row roots, so, theoretically, we can do
// the same but using subtree roots. For this case, we need an extra method in nmt.Proof
// that will perform all reconstructions,
// but we have to guarantee that all our stored subtree roots will be on the same height(e.g. one
// level above shares).
// TODO(@vgonkivs): rework the implementation to perform all verification without network requests.
sharesParser := &parser{verifyFn: func(blob *Blob) bool {
return blob.compareCommitments(commitment)
Expand All @@ -250,8 +246,8 @@ func (s *Service) Included(
}

// retrieve retrieves blobs and their proofs by requesting the whole namespace and
// comparing Commitments with each blob.
// Retrieving is stopped once the requested blob/proof is found.
// comparing Commitments.
// Retrieving is stopped once the `verify` condition in shareParser is met.
func (s *Service) retrieve(
ctx context.Context,
height uint64,
Expand Down Expand Up @@ -284,6 +280,7 @@ func (s *Service) retrieve(

getCtx, getSharesSpan := tracer.Start(ctx, "get-shares-by-namespace")

// collect shares for the requested namespace
namespacedShares, err := s.shareGetter.GetSharesByNamespace(getCtx, header, namespace)
if err != nil {
if errors.Is(err, share.ErrNotFound) {
Expand Down Expand Up @@ -338,19 +335,20 @@ func (s *Service) retrieve(
return nil, nil, err
}

// update index and shares if padding shares were detected.
if len(appShares) != len(shrs) {
// update index and shares if a padding share was detected.
index += len(appShares) - len(shrs)
appShares = shrs
}
}

shrs, isComplete = sharesParser.addShares(appShares)
// move to the next row if the blob is incomplete
if !isComplete {
appShares = nil
break
}

// otherwise construct blob
blob, err := sharesParser.parse()
if err != nil {
return nil, nil, err
Expand Down
7 changes: 4 additions & 3 deletions blob/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,16 @@ import (
"testing"
"time"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/celestiaorg/go-header/store"
ds "github.com/ipfs/go-datastore"
ds_sync "github.com/ipfs/go-datastore/sync"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
tmrand "github.com/tendermint/tendermint/libs/rand"

"github.com/celestiaorg/celestia-app/pkg/appconsts"
"github.com/celestiaorg/celestia-app/pkg/shares"
"github.com/celestiaorg/go-header/store"

"github.com/celestiaorg/celestia-node/blob/blobtest"
"github.com/celestiaorg/celestia-node/header"
"github.com/celestiaorg/celestia-node/header/headertest"
Expand Down
Loading