From 818ba0f2666f933cec05b09ba0937ee03b5399a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Jul 2024 12:27:10 +0300 Subject: [PATCH 01/12] all: plumbing to get fork-monolith support in --- cmd/sszgen/gen.go | 50 +++++--- cmd/sszgen/opset.go | 18 +-- codec.go | 7 ++ decoder.go | 14 ++- encoder.go | 20 ++-- example_asymmetric_test.go | 4 +- example_checked_test.go | 2 +- example_dynamic_test.go | 10 +- example_static_test.go | 2 +- forks.go | 81 +++++++++++++ genutils.go | 34 ++++++ hasher.go | 9 +- sizer.go | 32 ++++-- ssz.go | 107 +++++++++++++++--- tests/corner_cases_test.go | 8 +- .../gen_aggregate_and_proof_ssz.go | 4 +- .../gen_attestation_data_ssz.go | 9 +- .../gen_attestation_ssz.go | 14 ++- .../gen_attester_slashing_ssz.go | 6 +- .../gen_beacon_block_body_altair_ssz.go | 22 ++-- .../gen_beacon_block_body_bellatrix_ssz.go | 24 ++-- .../gen_beacon_block_body_capella_ssz.go | 26 +++-- .../gen_beacon_block_body_deneb_ssz.go | 28 +++-- .../gen_beacon_block_body_ssz.go | 22 ++-- .../gen_beacon_block_header_ssz.go | 2 +- .../gen_beacon_block_ssz.go | 4 +- .../gen_beacon_state_capella_ssz.go | 30 +++-- .../gen_beacon_state_deneb.go | 72 ------------ .../gen_beacon_state_deneb_ssz.go | 78 +++++++++++++ .../gen_beacon_state_ssz.go | 24 ++-- .../gen_bls_to_execution_change_ssz.go | 2 +- .../gen_checkpoint_ssz.go | 2 +- .../gen_deposit_data_ssz.go | 2 +- .../gen_deposit_message_ssz.go | 2 +- .../consensus-spec-tests/gen_deposit_ssz.go | 9 +- .../gen_eth1_block_ssz.go | 2 +- .../consensus-spec-tests/gen_eth1_data_ssz.go | 2 +- .../gen_execution_payload_capella_ssz.go | 8 +- .../gen_execution_payload_deneb_ssz.go | 8 +- ...en_execution_payload_header_capella_ssz.go | 4 +- .../gen_execution_payload_header_deneb_ssz.go | 4 +- .../gen_execution_payload_header_ssz.go | 4 +- .../gen_execution_payload_ssz.go | 6 +- .../gen_execution_payload_variation_ssz.go | 6 +- .../consensus-spec-tests/gen_fork_ssz.go | 2 +- .../gen_historical_batch_ssz.go | 2 +- .../gen_historical_batch_variation_ssz.go | 2 +- .../gen_historical_summary_ssz.go | 2 +- .../gen_indexed_attestation_ssz.go | 14 ++- .../gen_pending_attestation_ssz.go | 14 ++- .../gen_proposer_slashing_ssz.go | 9 +- .../gen_signed_beacon_block_header_ssz.go | 9 +- .../gen_signed_bls_to_execution_change_ssz.go | 9 +- .../gen_signed_voluntary_exit_ssz.go | 9 +- .../gen_sync_aggregate_ssz.go | 2 +- .../gen_sync_committee_ssz.go | 2 +- .../consensus-spec-tests/gen_validator_ssz.go | 2 +- .../gen_voluntary_exit_ssz.go | 2 +- .../gen_withdrawal_ssz.go | 2 +- .../gen_withdrawal_variation_ssz.go | 2 +- .../consensus-spec-tests/types_consensus.go | 1 + 61 files changed, 620 insertions(+), 289 deletions(-) create mode 100644 forks.go create mode 100644 genutils.go delete mode 100644 tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go diff --git a/cmd/sszgen/gen.go b/cmd/sszgen/gen.go index 3c153aa..1ecfbfb 100644 --- a/cmd/sszgen/gen.go +++ b/cmd/sszgen/gen.go @@ -109,7 +109,14 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { // variable to run it on package init if runtime { fmt.Fprintf(&b, "// Cached static size computed on package init.\n") - fmt.Fprintf(&b, "var staticSizeCache%s = ", typ.named.Obj().Name()) + fmt.Fprintf(&b, "var staticSizeCache%s = ssz.PrecomputeStaticSizeCache((*%s)(nil))\n\n", typ.named.Obj().Name(), typ.named.Obj().Name()) + + fmt.Fprintf(&b, "// SizeSSZ returns the total size of the static ssz object.\n") + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) uint32 {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " if fork := int(sizer.Fork()); fork < len(staticSizeCache%s) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " return staticSizeCache%s[fork]\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " }\n") + fmt.Fprintf(&b, " return ") for i := range typ.opsets { if bytes := typ.opsets[i].(*opsetStatic).bytes; bytes != nil { if len(bytes) == 1 { @@ -121,23 +128,20 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) pkg := typ.Obj().Pkg() if pkg.Path() == ctx.pkg.Path() { - fmt.Fprintf(&b, "(*%s)(nil).SizeSSZ()", typ.Obj().Name()) + fmt.Fprintf(&b, "ssz.Size((*%s)(nil))", typ.Obj().Name()) } else { ctx.addImport(pkg.Path(), "") - fmt.Fprintf(&b, "(*%s.%s)(nil).SizeSSZ()", pkg.Name(), typ.Obj().Name()) + fmt.Fprintf(&b, "ssz.Size((*%s.%s)(nil))", pkg.Name(), typ.Obj().Name()) } } if i < len(typ.opsets)-1 { fmt.Fprint(&b, " + ") } } - fmt.Fprintf(&b, "\n\n// SizeSSZ returns the total size of the static ssz object.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ() uint32 {\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, " return staticSizeCache%s\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, "}\n") + fmt.Fprintf(&b, "\n}\n") } else { fmt.Fprint(&b, "// SizeSSZ returns the total size of the static ssz object.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ() uint32 {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) uint32 {\n", typ.named.Obj().Name()) fmt.Fprint(&b, " return ") for i := range typ.opsets { bytes := typ.opsets[i].(*opsetStatic).bytes @@ -168,7 +172,15 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { // variable to run it on package init if runtime { fmt.Fprintf(&b, "// Cached static size computed on package init.\n") - fmt.Fprintf(&b, "var staticSizeCache%s = ", typ.named.Obj().Name()) + fmt.Fprintf(&b, "var staticSizeCache%s = ssz.PrecomputeStaticSizeCache((*%s)(nil))\n\n", typ.named.Obj().Name(), typ.named.Obj().Name()) + + fmt.Fprintf(&b, "// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " // Load static size if already precomputed, calculate otherwise\n") + fmt.Fprintf(&b, " if fork := int(sizer.Fork()); fork < len(staticSizeCache%s) {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " size = staticSizeCache%s[fork]\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " } else {\n") + fmt.Fprintf(&b, " size = ") for i := range typ.opsets { switch t := typ.opsets[i].(type) { case *opsetStatic: @@ -182,28 +194,29 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) pkg := typ.Obj().Pkg() if pkg.Path() == ctx.pkg.Path() { - fmt.Fprintf(&b, "(*%s)(nil).SizeSSZ()", typ.Obj().Name()) + fmt.Fprintf(&b, "(*%s)(nil).SizeSSZ(sizer)", typ.Obj().Name()) } else { ctx.addImport(pkg.Path(), "") - fmt.Fprintf(&b, "(*%s.%s)(nil).SizeSSZ()", pkg.Name(), typ.Obj().Name()) + fmt.Fprintf(&b, "(*%s.%s)(nil).SizeSSZ(sizer)", pkg.Name(), typ.Obj().Name()) } } case *opsetDynamic: fmt.Fprintf(&b, "%d", offsetBytes) } if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") + fmt.Fprintf(&b, " + ") + } else { + fmt.Fprintf(&b, "\n") } } - fmt.Fprintf(&b, "\n\n// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(fixed bool) uint32 {\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, " var size = uint32(staticSizeCache%s)\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " }\n") + fmt.Fprintf(&b, " // Either return the static size or accumulate the dynamic too\n") fmt.Fprintf(&b, " if (fixed) {\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, " }\n") for i := range typ.opsets { if opset, ok := typ.opsets[i].(*opsetDynamic); ok { - call := generateCall(opset.size, "", "obj."+typ.fields[i]) + call := generateCall(opset.size, "sizer", "obj."+typ.fields[i]) fmt.Fprintf(&b, " size += ssz.%s\n", call) } } @@ -212,7 +225,7 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, "}\n") } else { fmt.Fprintf(&b, "\n\n// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(fixed bool) uint32 {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 {\n", typ.named.Obj().Name()) fmt.Fprintf(&b, " var size = uint32(") for i := range typ.opsets { switch t := typ.opsets[i].(type) { @@ -235,7 +248,7 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " }\n") for i := range typ.opsets { if opset, ok := typ.opsets[i].(*opsetDynamic); ok { - call := generateCall(opset.size, "", "obj."+typ.fields[i]) + call := generateCall(opset.size, "sizer", "obj."+typ.fields[i]) fmt.Fprintf(&b, " size += ssz.%s\n", call) } } @@ -324,6 +337,7 @@ func generateCall(tmpl string, recv string, field string, limits ...int) string panic(err) } d := map[string]interface{}{ + "Sizer": recv, "Codec": recv, "Field": field, } diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index 27f8470..c995172 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -27,7 +27,7 @@ type opsetStatic struct { // codec operates on a given dynamic type. Ideally these would be some go/types // function values, but alas too much pain, especially with generics. type opsetDynamic struct { - size string // SizeXYZ method for the SizeSSZ method + size string // SizeXYZ method for the ssz.Sizer defineOffset string // DefineXYZOffset method for the ssz.Codec defineContent string // DefineXYZContent method for the ssz.Codec encodeOffset string // EncodeXYZOffset method for the ssz.Encoder @@ -119,7 +119,7 @@ func (p *parseContext) resolveBitlistOpset(tags *sizeTag) (opset, error) { return nil, fmt.Errorf("slice of bits tag conflict: field supports [N] bits, tag wants %v bits", tags.limit) } return &opsetDynamic{ - "SizeSliceOfBits({{.Field}})", + "SizeSliceOfBits({{.Sizer}}, {{.Field}})", fmt.Sprintf("DefineSliceOfBitsOffset({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly fmt.Sprintf("DefineSliceOfBitsContent({{.Codec}}, &{{.Field}}, %d)", tags.limit[0]), // inject bit-cap directly "EncodeSliceOfBitsOffset({{.Codec}}, &{{.Field}})", @@ -262,7 +262,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of byte basic type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeDynamicBytes({{.Field}})", + "SizeDynamicBytes({{.Sizer}}, {{.Field}})", "DefineDynamicBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeDynamicBytesOffset({{.Codec}}, &{{.Field}})", @@ -297,7 +297,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of uint64 basic type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfUint64s({{.Field}})", + "SizeSliceOfUint64s({{.Sizer}}, {{.Field}})", "DefineSliceOfUint64sOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfUint64sContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfUint64sOffset({{.Codec}}, &{{.Field}})", @@ -319,7 +319,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of static objects type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfStaticObjects({{.Field}})", + "SizeSliceOfStaticObjects({{.Sizer}}, {{.Field}})", "DefineSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfStaticObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfStaticObjectsOffset({{.Codec}}, &{{.Field}})", @@ -337,7 +337,7 @@ func (p *parseContext) resolveSliceOpset(typ types.Type, tags *sizeTag) (opset, return nil, fmt.Errorf("dynamic slice of dynamic objects type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfDynamicObjects({{.Field}})", + "SizeSliceOfDynamicObjects({{.Sizer}}, {{.Field}})", "DefineSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfDynamicObjectsContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfDynamicObjectsOffset({{.Codec}}, &{{.Field}})", @@ -393,7 +393,7 @@ func (p *parseContext) resolveSliceOfArrayOpset(typ types.Type, innerSize int, t return nil, fmt.Errorf("dynamic slice of array of byte basic type tag conflict: needs [N] tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfStaticBytes({{.Field}})", + "SizeSliceOfStaticBytes({{.Sizer}}, {{.Field}})", "DefineSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "DefineSliceOfStaticBytesContent({{.Codec}}, &{{.Field}}, {{.MaxSize}})", "EncodeSliceOfStaticBytesOffset({{.Codec}}, &{{.Field}})", @@ -428,7 +428,7 @@ func (p *parseContext) resolveSliceOfSliceOpset(typ types.Type, tags *sizeTag) ( return nil, fmt.Errorf("dynamic slice of dynamic slice of byte basic type tag conflict: needs [N, M] ssz-max tag, has %v", tags.limit) } return &opsetDynamic{ - "SizeSliceOfDynamicBytes({{.Field}})", + "SizeSliceOfDynamicBytes({{.Sizer}}, {{.Field}})", "DefineSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})", "DefineSliceOfDynamicBytesContent({{.Codec}}, &{{.Field}}, {{.MaxItems}}, {{.MaxSize}})", "EncodeSliceOfDynamicBytesOffset({{.Codec}}, &{{.Field}})", @@ -498,7 +498,7 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o return nil, fmt.Errorf("dynamic object type cannot have any ssz tags") } return &opsetDynamic{ - "SizeDynamicObject({{.Field}})", + "SizeDynamicObject({{.Sizer}}, {{.Field}})", "DefineDynamicObjectOffset({{.Codec}}, &{{.Field}})", "DefineDynamicObjectContent({{.Codec}}, &{{.Field}})", "EncodeDynamicObjectOffset({{.Codec}}, &{{.Field}})", diff --git a/codec.go b/codec.go index 061e9a0..97ea6c8 100644 --- a/codec.go +++ b/codec.go @@ -15,6 +15,8 @@ import ( // define their schemas once and have that work for both operations at once // (with the same speed as explicitly typing them out would, of course). type Codec struct { + fork Fork // Context for cross-fork monolith types + enc *Encoder dec *Decoder has *Hasher @@ -53,6 +55,11 @@ func (c *Codec) DefineHasher(impl func(has *Hasher)) { } } +// Fork retrieves the current fork (if any) that the codec is operating in. +func (c *Codec) Fork() Fork { + return c.fork +} + // DefineBool defines the next field as a 1 byte boolean. func DefineBool[T ~bool](c *Codec, v *T) { if c.enc != nil { diff --git a/decoder.go b/decoder.go index 08777cc..7f1f13d 100644 --- a/decoder.go +++ b/decoder.go @@ -57,9 +57,10 @@ type Decoder struct { inBufPtrs []uintptr // Stack of starting pointers from outer calls (buffered mode) inBufEnd uintptr // Ending pointer in the input buffer (buffered mode) - err error // Any write error to halt future encoding calls + err error // Any write error to halt future encoding calls + codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + sizer *Sizer // Self-referencing to pass SizeSSZ call through (API trick) - codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) buf [32]byte // Integer conversion buffer bufInt uint256.Int // Big.Int conversion buffer (not pointer, alloc free) @@ -73,6 +74,11 @@ type Decoder struct { sizess [][]uint32 // Stack of computed sizes from outer calls } +// Fork retrieves the current fork (if any) that the decoder is operating in. +func (dec *Decoder) Fork() Fork { + return dec.codec.fork +} + // DecodeBool parses a boolean. func DecodeBool[T ~bool](dec *Decoder, v *T) { if dec.err != nil { @@ -354,7 +360,7 @@ func DecodeDynamicObjectContent[T newableDynamicObject[U], U any](dec *Decoder, if *obj == nil { *obj = T(new(U)) } - dec.startDynamics((*obj).SizeSSZ(true)) + dec.startDynamics((*obj).SizeSSZ(dec.sizer, true)) (*obj).DefineSSZ(dec.codec) dec.flushDynamics() } @@ -743,7 +749,7 @@ func DecodeSliceOfStaticObjectsContent[T newableStaticObject[U], U any](dec *Dec // Compute the number of items based on the item size of the type var sizer T // SizeSSZ is on *U, objects is static, so nil T is fine - itemSize := sizer.SizeSSZ() + itemSize := sizer.SizeSSZ(dec.sizer) if size%itemSize != 0 { dec.err = fmt.Errorf("%w: length %d, item size %d", ErrDynamicStaticsIndivisible, size, itemSize) return diff --git a/encoder.go b/encoder.go index 2f262e1..6b2acb1 100644 --- a/encoder.go +++ b/encoder.go @@ -71,6 +71,7 @@ type Encoder struct { err error // Any write error to halt future encoding calls codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + sizer *Sizer // Self-referencing to pass SizeSSZ call through (API trick) buf [32]byte // Integer conversion buffer bufInt uint256.Int // Big.Int conversion buffer (not pointer, alloc free) @@ -78,6 +79,11 @@ type Encoder struct { offset uint32 // Offset tracker for dynamic fields } +// Fork retrieves the current fork (if any) that the encoder is operating in. +func (enc *Encoder) Fork() Fork { + return enc.codec.fork +} + // EncodeBool serializes a boolean. func EncodeBool[T ~bool](enc *Encoder, v T) { if enc.outWriter != nil { @@ -288,7 +294,7 @@ func EncodeDynamicObjectOffset(enc *Encoder, obj DynamicObject) { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] } - enc.offset += obj.SizeSSZ(false) + enc.offset += obj.SizeSSZ(enc.sizer, false) } // EncodeDynamicObjectContent is the lazy data writer for EncodeDynamicObjectOffset. @@ -296,7 +302,7 @@ func EncodeDynamicObjectContent(enc *Encoder, obj DynamicObject) { if enc.err != nil { return } - enc.offsetDynamics(obj.SizeSSZ(true)) + enc.offsetDynamics(obj.SizeSSZ(enc.sizer, true)) obj.DefineSSZ(enc.codec) } @@ -590,7 +596,7 @@ func EncodeSliceOfStaticObjectsOffset[T StaticObject](enc *Encoder, objects []T) enc.outBuffer = enc.outBuffer[4:] } if items := len(objects); items > 0 { - enc.offset += uint32(items) * objects[0].SizeSSZ() + enc.offset += uint32(items) * objects[0].SizeSSZ(enc.sizer) } } @@ -617,7 +623,7 @@ func EncodeSliceOfDynamicObjectsOffset[T DynamicObject](enc *Encoder, objects [] enc.outBuffer = enc.outBuffer[4:] } for _, obj := range objects { - enc.offset += 4 + obj.SizeSSZ(false) + enc.offset += 4 + obj.SizeSSZ(enc.sizer, false) } } @@ -638,14 +644,14 @@ func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects [ binary.LittleEndian.PutUint32(enc.buf[:4], enc.offset) _, enc.err = enc.outWriter.Write(enc.buf[:4]) - enc.offset += obj.SizeSSZ(false) + enc.offset += obj.SizeSSZ(enc.sizer, false) } } else { for _, obj := range objects { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] - enc.offset += obj.SizeSSZ(false) + enc.offset += obj.SizeSSZ(enc.sizer, false) } } // Inline: @@ -657,7 +663,7 @@ func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects [ if enc.err != nil { return } - enc.offsetDynamics(obj.SizeSSZ(true)) + enc.offsetDynamics(obj.SizeSSZ(enc.sizer, true)) obj.DefineSSZ(enc.codec) } } diff --git a/example_asymmetric_test.go b/example_asymmetric_test.go index a9028c5..368db8b 100644 --- a/example_asymmetric_test.go +++ b/example_asymmetric_test.go @@ -17,7 +17,7 @@ type WithdrawalAsym struct { Amount uint64 `ssz-size:"8"` } -func (w *WithdrawalAsym) SizeSSZ() uint32 { return 44 } +func (w *WithdrawalAsym) SizeSSZ(siz *ssz.Sizer) uint32 { return 44 } func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) { codec.DefineEncoder(func(enc *ssz.Encoder) { @@ -41,7 +41,7 @@ func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) { } func ExampleEncodeAsymmetricObject() { - blob := make([]byte, (*WithdrawalAsym)(nil).SizeSSZ()) + blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil))) if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym)); err != nil { panic(err) } diff --git a/example_checked_test.go b/example_checked_test.go index 453601c..41d9e16 100644 --- a/example_checked_test.go +++ b/example_checked_test.go @@ -17,7 +17,7 @@ type WithdrawalChecked struct { Amount uint64 `ssz-size:"8"` } -func (w *WithdrawalChecked) SizeSSZ() uint32 { return 44 } +func (w *WithdrawalChecked) SizeSSZ(siz *ssz.Sizer) uint32 { return 44 } func (w *WithdrawalChecked) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &w.Index) // Field (0) - Index - 8 bytes diff --git a/example_dynamic_test.go b/example_dynamic_test.go index 32b1d8c..86fd3e7 100644 --- a/example_dynamic_test.go +++ b/example_dynamic_test.go @@ -32,16 +32,16 @@ type ExecutionPayload struct { Withdrawals []*Withdrawal `ssz-max:"16"` } -func (e *ExecutionPayload) SizeSSZ(fixed bool) uint32 { +func (e *ExecutionPayload) SizeSSZ(siz *ssz.Sizer, fixed bool) uint32 { // Start out with the static size size := uint32(512) if fixed { return size } // Append all the dynamic sizes - size += ssz.SizeDynamicBytes(e.ExtraData) // Field (10) - ExtraData - max 32 bytes (not enforced) - size += ssz.SizeSliceOfDynamicBytes(e.Transactions) // Field (13) - Transactions - max 1048576 items, 1073741824 bytes each (not enforced) - size += ssz.SizeSliceOfStaticObjects(e.Withdrawals) // Field (14) - Withdrawals - max 16 items, 44 bytes each (not enforced) + size += ssz.SizeDynamicBytes(siz, e.ExtraData) // Field (10) - ExtraData - max 32 bytes (not enforced) + size += ssz.SizeSliceOfDynamicBytes(siz, e.Transactions) // Field (13) - Transactions - max 1048576 items, 1073741824 bytes each (not enforced) + size += ssz.SizeSliceOfStaticObjects(siz, e.Withdrawals) // Field (14) - Withdrawals - max 16 items, 44 bytes each (not enforced) return size } @@ -72,7 +72,7 @@ func (e *ExecutionPayload) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeDynamicObject() { obj := new(ExecutionPayload) - blob := make([]byte, obj.SizeSSZ(false)) + blob := make([]byte, ssz.Size(obj)) if err := ssz.EncodeToBytes(blob, obj); err != nil { panic(err) } diff --git a/example_static_test.go b/example_static_test.go index f433baa..70cbc08 100644 --- a/example_static_test.go +++ b/example_static_test.go @@ -20,7 +20,7 @@ type Withdrawal struct { Amount uint64 `ssz-size:"8"` } -func (w *Withdrawal) SizeSSZ() uint32 { return 44 } +func (w *Withdrawal) SizeSSZ(siz *ssz.Sizer) uint32 { return 44 } func (w *Withdrawal) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &w.Index) // Field (0) - Index - 8 bytes diff --git a/forks.go b/forks.go new file mode 100644 index 0000000..b11c3ce --- /dev/null +++ b/forks.go @@ -0,0 +1,81 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package ssz + +// Fork is an enum with all the hard forks that Ethereum mainnet went through, +// which can be used to multiplex monolith types that can encode/decode across +// a range of forks, not just for one specific. +// +// These enums are only meaningful in relation to one another, but are completely +// meaningless numbers otherwise. Do not persist them across code versions. +type Fork int + +const ( + ForkUnknown Fork = iota // Placeholder if forks haven't been specified (must be index 0) + + ForkFrontier // https://ethereum.org/en/history/#frontier + ForkHomestead // https://ethereum.org/en/history/#homestead + ForkDAO // https://ethereum.org/en/history/#dao-fork + ForkTangerine // https://ethereum.org/en/history/#tangerine-whistle + ForkSpurious // https://ethereum.org/en/history/#spurious-dragon + ForkByzantium // https://ethereum.org/en/history/#byzantium + ForkConstantinople // https://ethereum.org/en/history/#constantinople + ForkIstanbul // https://ethereum.org/en/history/#istanbul + ForkMuir // https://ethereum.org/en/history/#muir-glacier + ForkPhase0 // https://ethereum.org/en/history/#beacon-chain-genesis + ForkBerlin // https://ethereum.org/en/history/#berlin + ForkLondon // https://ethereum.org/en/history/#london + ForkAltair // https://ethereum.org/en/history/#altair + ForkArrow // https://ethereum.org/en/history/#arrow-glacier + ForkGray // https://ethereum.org/en/history/#gray-glacier + ForkBellatrix // https://ethereum.org/en/history/#bellatrix + ForkParis // https://ethereum.org/en/history/#paris + ForkShapella // https://ethereum.org/en/history/#shapella + ForkDencun // https://ethereum.org/en/history/#dencun + ForkPectra // https://ethereum.org/en/history/#pectra + + ForkFuture // Use this for specifying future features (must be last index, no gaps) + + ForkMerge = ForkParis // Common alias for Paris + ForkShanghai = ForkShapella // EL alias for Shapella + ForkCapella = ForkShapella // CL alias for Shapella + ForkCancun = ForkDencun // EL alias for Dencun + ForkDeneb = ForkDencun // CL alias for Dencun + ForkPrague = ForkPectra // EL alias for Pectra + ForkElectra = ForkPectra // CL alias for Pectra +) + +// forkMapping maps fork names to fork values. This is used internally by the +// ssz codec generator to convert tags to values. +var forkMapping = map[string]Fork{ + "frontier": ForkFrontier, + "homestead": ForkHomestead, + "dao": ForkDAO, + "tangerine": ForkTangerine, + "spurious": ForkSpurious, + "byzantium": ForkByzantium, + "constantinople": ForkConstantinople, + "istanbul": ForkIstanbul, + "muir": ForkMuir, + "phase0": ForkPhase0, + "berlin": ForkBerlin, + "london": ForkLondon, + "altair": ForkAltair, + "arrow": ForkArrow, + "gray": ForkGray, + "bellatrix": ForkBellatrix, + "paris": ForkParis, + "merge": ForkMerge, + "shapella": ForkShapella, + "shanghai": ForkShanghai, + "capella": ForkCapella, + "dencun": ForkDencun, + "cancun": ForkCancun, + "deneb": ForkDeneb, + "pectra": ForkPectra, + "prague": ForkPrague, + "electra": ForkElectra, + "future": ForkFuture, +} diff --git a/genutils.go b/genutils.go new file mode 100644 index 0000000..006c6e4 --- /dev/null +++ b/genutils.go @@ -0,0 +1,34 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package ssz + +import "fmt" + +// PrecomputeStaticSizeCache is a helper for genssz to precompute SSZ (static) +// sizes for a monolith type on different forks. +// +// For non-monolith types that are constant across forks (or are not meant to be +// used across forks), all the sizes will be the same. +func PrecomputeStaticSizeCache(obj Object) []uint32 { + var ( + sizes = make([]uint32, ForkFuture) + sizer = &Sizer{codec: new(Codec)} + ) + switch v := obj.(type) { + case StaticObject: + for fork := 0; fork < len(sizes); fork++ { + sizer.codec.fork = Fork(fork) + sizes[fork] = v.SizeSSZ(sizer) + } + case DynamicObject: + for fork := 0; fork < len(sizes); fork++ { + sizer.codec.fork = Fork(fork) + sizes[fork] = v.SizeSSZ(sizer, true) + } + default: + panic(fmt.Sprintf("unsupported type: %T", obj)) + } + return sizes +} diff --git a/hasher.go b/hasher.go index 51fb530..fced534 100644 --- a/hasher.go +++ b/hasher.go @@ -52,7 +52,9 @@ type Hasher struct { groups []groupStats // Hashing progress tracking for the chunk groups layer int // Layer depth being hasher now - codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + codec *Codec // Self-referencing to pass DefineSSZ calls through (API trick) + sizer *Sizer // Self-referencing to pass SizeSSZ call through (API trick) + bitbuf []byte // Bitlist conversion buffer } @@ -64,6 +66,11 @@ type groupStats struct { chunks int // Number of chunks in this group } +// Fork retrieves the current fork (if any) that the hasher is operating in. +func (h *Hasher) Fork() Fork { + return h.codec.fork +} + // HashBool hashes a boolean. func HashBool[T ~bool](h *Hasher, v T) { if !v { diff --git a/sizer.go b/sizer.go index 892a5bb..e910d2c 100644 --- a/sizer.go +++ b/sizer.go @@ -6,33 +6,43 @@ package ssz import "github.com/prysmaticlabs/go-bitfield" +// Sizer is an SSZ static and dynamic size computer. +type Sizer struct { + codec *Codec // Self-referencing to have access to fork contexts +} + +// Fork retrieves the current fork (if any) that the sizer is operating in. +func (siz *Sizer) Fork() Fork { + return siz.codec.fork +} + // SizeDynamicBytes returns the serialized size of the dynamic part of a dynamic // blob. -func SizeDynamicBytes(blobs []byte) uint32 { +func SizeDynamicBytes(siz *Sizer, blobs []byte) uint32 { return uint32(len(blobs)) } // SizeSliceOfBits returns the serialized size of the dynamic part of a slice of // bits. -func SizeSliceOfBits(bits bitfield.Bitlist) uint32 { +func SizeSliceOfBits(siz *Sizer, bits bitfield.Bitlist) uint32 { return uint32(len(bits)) } // SizeSliceOfUint64s returns the serialized size of the dynamic part of a dynamic // list of uint64s. -func SizeSliceOfUint64s[T ~uint64](ns []T) uint32 { +func SizeSliceOfUint64s[T ~uint64](siz *Sizer, ns []T) uint32 { return uint32(len(ns)) * 8 } // SizeDynamicObject returns the serialized size of the dynamic part of a dynamic // object. -func SizeDynamicObject[T DynamicObject](obj T) uint32 { - return obj.SizeSSZ(false) +func SizeDynamicObject[T DynamicObject](siz *Sizer, obj T) uint32 { + return obj.SizeSSZ(siz, false) } // SizeSliceOfStaticBytes returns the serialized size of the dynamic part of a dynamic // list of static blobs. -func SizeSliceOfStaticBytes[T commonBytesLengths](blobs []T) uint32 { +func SizeSliceOfStaticBytes[T commonBytesLengths](siz *Sizer, blobs []T) uint32 { if len(blobs) == 0 { return 0 } @@ -41,7 +51,7 @@ func SizeSliceOfStaticBytes[T commonBytesLengths](blobs []T) uint32 { // SizeSliceOfDynamicBytes returns the serialized size of the dynamic part of a dynamic // list of dynamic blobs. -func SizeSliceOfDynamicBytes(blobs [][]byte) uint32 { +func SizeSliceOfDynamicBytes(siz *Sizer, blobs [][]byte) uint32 { var size uint32 for _, blob := range blobs { size += uint32(4 + len(blob)) // 4-byte offset + dynamic data later @@ -51,19 +61,19 @@ func SizeSliceOfDynamicBytes(blobs [][]byte) uint32 { // SizeSliceOfStaticObjects returns the serialized size of the dynamic part of a dynamic // list of static objects. -func SizeSliceOfStaticObjects[T StaticObject](objects []T) uint32 { +func SizeSliceOfStaticObjects[T StaticObject](siz *Sizer, objects []T) uint32 { if len(objects) == 0 { return 0 } - return uint32(len(objects)) * objects[0].SizeSSZ() + return uint32(len(objects)) * objects[0].SizeSSZ(siz) } // SizeSliceOfDynamicObjects returns the serialized size of the dynamic part of // a dynamic list of dynamic objects. -func SizeSliceOfDynamicObjects[T DynamicObject](objects []T) uint32 { +func SizeSliceOfDynamicObjects[T DynamicObject](siz *Sizer, objects []T) uint32 { var size uint32 for _, obj := range objects { - size += 4 + obj.SizeSSZ(false) // 4-byte offset + dynamic data later + size += 4 + obj.SizeSSZ(siz, false) // 4-byte offset + dynamic data later } return size } diff --git a/ssz.go b/ssz.go index 9beed9c..b613b0f 100644 --- a/ssz.go +++ b/ssz.go @@ -29,7 +29,7 @@ type StaticObject interface { // Note, StaticObject.SizeSSZ and DynamicObject.SizeSSZ deliberately clash // to allow the compiler to detect placing one or the other in reversed data // slots on an SSZ containers. - SizeSSZ() uint32 + SizeSSZ(siz *Sizer) uint32 } // DynamicObject defines the methods a type needs to implement to be used as a @@ -43,7 +43,7 @@ type DynamicObject interface { // Note, StaticObject.SizeSSZ and DynamicObject.SizeSSZ deliberately clash // to allow the compiler to detect placing one or the other in reversed data // slots on an SSZ containers. - SizeSSZ(fixed bool) uint32 + SizeSSZ(siz *Sizer, fixed bool) uint32 } // encoderPool is a pool of SSZ encoders to reuse some tiny internal helpers @@ -52,6 +52,7 @@ var encoderPool = sync.Pool{ New: func() any { codec := &Codec{enc: new(Encoder)} codec.enc.codec = codec + codec.enc.sizer = &Sizer{codec: codec} return codec }, } @@ -62,6 +63,7 @@ var decoderPool = sync.Pool{ New: func() any { codec := &Codec{dec: new(Decoder)} codec.dec.codec = codec + codec.dec.sizer = &Sizer{codec: codec} return codec }, } @@ -72,29 +74,50 @@ var hasherPool = sync.Pool{ New: func() any { codec := &Codec{has: new(Hasher)} codec.has.codec = codec + codec.has.sizer = &Sizer{codec: codec} return codec }, } +// sizerPool is a pool of SSZ sizers to reuse some tiny internal helpers +// without hitting Go's GC constantly. +var sizerPool = sync.Pool{ + New: func() any { + return &Sizer{codec: new(Codec)} + }, +} + // EncodeToStream serializes the object into a data stream. Do not use this // method with a bytes.Buffer to write into a []byte slice, as that will do // double the byte copying. For that use case, use EncodeToBytes instead. func EncodeToStream(w io.Writer, obj Object) error { + return EncodeToStreamWithFork(w, obj, ForkUnknown) +} + +// EncodeToStreamWithFork is analogous to EncodeToStream, but allows the user to +// set a specific fork context to encode the object in. This is useful for code- +// bases that have monolith types that marshal into many fork formats. +func EncodeToStreamWithFork(w io.Writer, obj Object, fork Fork) error { codec := encoderPool.Get().(*Codec) defer encoderPool.Put(codec) - codec.enc.outWriter, codec.enc.err = w, nil + codec.fork, codec.enc.outWriter = fork, w switch v := obj.(type) { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.enc.offsetDynamics(v.SizeSSZ(true)) + codec.enc.offsetDynamics(v.SizeSSZ(codec.enc.sizer, true)) v.DefineSSZ(codec) default: panic(fmt.Sprintf("unsupported type: %T", obj)) } + // Retrieve any errors, zero out the sink and return + err := codec.enc.err + codec.enc.outWriter = nil - return codec.enc.err + codec.enc.err = nil + + return err } // EncodeToBytes serializes the object into a byte buffer. Don't use this method @@ -102,6 +125,13 @@ func EncodeToStream(w io.Writer, obj Object) error { // would double the memory use for the temporary buffer. For that use case, use // EncodeToStream instead. func EncodeToBytes(buf []byte, obj Object) error { + return EncodeToBytesWithFork(buf, obj, ForkUnknown) +} + +// EncodeToBytesWithFork is analogous to EncodeToBytes, but allows the user to +// set a specific fork context to encode the object in. This is useful for code- +// bases that have monolith types that marshal into many fork formats. +func EncodeToBytesWithFork(buf []byte, obj Object, fork Fork) error { // Sanity check that we have enough space to serialize into if size := Size(obj); int(size) > len(buf) { return fmt.Errorf("%w: buffer %d bytes, object %d bytes", ErrBufferTooSmall, len(buf), size) @@ -109,29 +139,41 @@ func EncodeToBytes(buf []byte, obj Object) error { codec := encoderPool.Get().(*Codec) defer encoderPool.Put(codec) - codec.enc.outBuffer, codec.enc.err = buf, nil + codec.fork, codec.enc.outBuffer = fork, buf switch v := obj.(type) { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.enc.offsetDynamics(v.SizeSSZ(true)) + codec.enc.offsetDynamics(v.SizeSSZ(codec.enc.sizer, true)) v.DefineSSZ(codec) default: panic(fmt.Sprintf("unsupported type: %T", obj)) } + // Retrieve any errors, zero out the sink and return + err := codec.enc.err + codec.enc.outBuffer = nil - return codec.enc.err + codec.enc.err = nil + + return err } // DecodeFromStream parses an object with the given size out of a stream. Do not // use this method with a bytes.Buffer to read from a []byte slice, as that will // double the byte copying. For that use case, use DecodeFromBytes instead. func DecodeFromStream(r io.Reader, obj Object, size uint32) error { + return DecodeFromStreamWithFork(r, obj, size, ForkUnknown) +} + +// DecodeFromStreamWithFork is analogous to DecodeFromStream, but allows the user +// to set a specific fork context to decode the object in. This is useful for code- +// bases that have monolith types that unmarshal into many fork formats. +func DecodeFromStreamWithFork(r io.Reader, obj Object, size uint32, fork Fork) error { // Retrieve a new decoder codec and set its data source codec := decoderPool.Get().(*Codec) defer decoderPool.Put(codec) - codec.dec.inReader = r + codec.fork, codec.dec.inReader = fork, r // Start a decoding round with length enforcement in place codec.dec.descendIntoSlot(size) @@ -140,7 +182,7 @@ func DecodeFromStream(r io.Reader, obj Object, size uint32) error { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.dec.startDynamics(v.SizeSSZ(true)) + codec.dec.startDynamics(v.SizeSSZ(codec.dec.sizer, true)) v.DefineSSZ(codec) codec.dec.flushDynamics() default: @@ -162,6 +204,13 @@ func DecodeFromStream(r io.Reader, obj Object, size uint32) error { // would double the memory use for the temporary buffer. For that use case, use // DecodeFromStream instead. func DecodeFromBytes(blob []byte, obj Object) error { + return DecodeFromBytesWithFork(blob, obj, ForkUnknown) +} + +// DecodeFromBytesWithFork is analogous to DecodeFromBytes, but allows the user +// to set a specific fork context to decode the object in. This is useful for code- +// bases that have monolith types that unmarshal into many fork formats. +func DecodeFromBytesWithFork(blob []byte, obj Object, fork Fork) error { // Reject decoding from an empty slice if len(blob) == 0 { return io.ErrUnexpectedEOF @@ -170,6 +219,7 @@ func DecodeFromBytes(blob []byte, obj Object) error { codec := decoderPool.Get().(*Codec) defer decoderPool.Put(codec) + codec.fork = fork codec.dec.inBuffer = blob codec.dec.inBufEnd = uintptr(unsafe.Pointer(&blob[0])) + uintptr(len(blob)) @@ -180,7 +230,7 @@ func DecodeFromBytes(blob []byte, obj Object) error { case StaticObject: v.DefineSSZ(codec) case DynamicObject: - codec.dec.startDynamics(v.SizeSSZ(true)) + codec.dec.startDynamics(v.SizeSSZ(codec.dec.sizer, true)) v.DefineSSZ(codec) codec.dec.flushDynamics() default: @@ -202,10 +252,19 @@ func DecodeFromBytes(blob []byte, obj Object) error { // This is useful for processing small objects with stable runtime and O(1) GC // guarantees. func HashSequential(obj Object) [32]byte { + return HashSequentialWithFork(obj, ForkUnknown) +} + +// HashSequentialWithFork is analogous to HashSequential, but allows the user to +// set a specific fork context to hash the object in. This is useful for code- +// bases that have monolith types that hash across many fork formats. +func HashSequentialWithFork(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() + codec.fork = fork + codec.has.descendLayer() obj.DefineSSZ(codec) codec.has.ascendLayer(0) @@ -221,11 +280,20 @@ func HashSequential(obj Object) [32]byte { // is useful for processing large objects, but will place a bigger load on your CPU // and GC; and might be more variable timing wise depending on other load. func HashConcurrent(obj Object) [32]byte { + return HashConcurrentWithFork(obj, ForkUnknown) +} + +// HashConcurrentWithFork is analogous to HashConcurrent, but allows the user to +// set a specific fork context to hash the object in. This is useful for code- +// bases that have monolith types that hash across many fork formats. +func HashConcurrentWithFork(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() + codec.fork = fork codec.has.threads = true + codec.has.descendLayer() obj.DefineSSZ(codec) codec.has.ascendLayer(0) @@ -233,18 +301,31 @@ func HashConcurrent(obj Object) [32]byte { if len(codec.has.chunks) != 1 { panic(fmt.Sprintf("unfinished hashing: left %v", codec.has.groups)) } + codec.has.threads = false return codec.has.chunks[0] } // Size retrieves the size of a ssz object, independent if it's a static or a // dynamic one. func Size(obj Object) uint32 { + return SizeWithFork(obj, ForkUnknown) +} + +// SizeWithFork is analogous to Size, but allows the user to set a specific fork +// context to size the object in. This is useful for codebases that have monolith +// types that serialize across many fork formats. +func SizeWithFork(obj Object, fork Fork) uint32 { + sizer := sizerPool.Get().(*Sizer) + defer sizerPool.Put(sizer) + + sizer.codec.fork = fork + var size uint32 switch v := obj.(type) { case StaticObject: - size = v.SizeSSZ() + size = v.SizeSSZ(sizer) case DynamicObject: - size = v.SizeSSZ(false) + size = v.SizeSSZ(sizer, false) default: panic(fmt.Sprintf("unsupported type: %T", obj)) } diff --git a/tests/corner_cases_test.go b/tests/corner_cases_test.go index 82d5687..9564bf4 100644 --- a/tests/corner_cases_test.go +++ b/tests/corner_cases_test.go @@ -19,7 +19,7 @@ import ( func TestDecodeMissized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, obj.SizeSSZ()+1) + blob := make([]byte, ssz.Size(obj)+1) if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } @@ -27,7 +27,7 @@ func TestDecodeMissized(t *testing.T) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - blob = make([]byte, obj.SizeSSZ()-1) + blob = make([]byte, ssz.Size(obj)-1) if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } @@ -40,7 +40,7 @@ type testMissizedType struct { A, B uint64 } -func (t *testMissizedType) SizeSSZ() uint32 { return 16 } +func (t *testMissizedType) SizeSSZ(sizer *ssz.Sizer) uint32 { return 16 } func (t *testMissizedType) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &t.A) ssz.DefineUint64(codec, &t.B) @@ -50,7 +50,7 @@ func (t *testMissizedType) DefineSSZ(codec *ssz.Codec) { func TestEncodeOversized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, obj.SizeSSZ()-1) + blob := make([]byte, ssz.Size(obj)-1) if err := ssz.EncodeToBytes(blob, obj); !errors.Is(err, ssz.ErrBufferTooSmall) { t.Errorf("encode to bytes error mismatch: have %v, want %v", err, ssz.ErrBufferTooSmall) } diff --git a/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go b/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go index 62a33c4..62cfa00 100644 --- a/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *AggregateAndProof) SizeSSZ(fixed bool) uint32 { +func (obj *AggregateAndProof) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(8 + 4 + 96) if fixed { return size } - size += ssz.SizeDynamicObject(obj.Aggregate) + size += ssz.SizeDynamicObject(sizer, obj.Aggregate) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go index 7d05f35..54b1e95 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go @@ -5,11 +5,14 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheAttestationData = 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() +var staticSizeCacheAttestationData = ssz.PrecomputeStaticSizeCache((*AttestationData)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *AttestationData) SizeSSZ() uint32 { - return staticSizeCacheAttestationData +func (obj *AttestationData) SizeSSZ(sizer *ssz.Sizer) uint32 { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationData) { + return staticSizeCacheAttestationData[fork] + } + return 8 + 8 + 32 + ssz.Size((*Checkpoint)(nil)) + ssz.Size((*Checkpoint)(nil)) } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go index 6eb49ff..f7b08d7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go @@ -5,16 +5,22 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheAttestation = 4 + (*AttestationData)(nil).SizeSSZ() + 96 +var staticSizeCacheAttestation = ssz.PrecomputeStaticSizeCache((*Attestation)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *Attestation) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheAttestation) +func (obj *Attestation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestation) { + size = staticSizeCacheAttestation[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfBits(obj.AggregationBits) + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go index 97638fd..2100502 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *AttesterSlashing) SizeSSZ(fixed bool) uint32 { +func (obj *AttesterSlashing) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(4 + 4) if fixed { return size } - size += ssz.SizeDynamicObject(obj.Attestation1) - size += ssz.SizeDynamicObject(obj.Attestation2) + size += ssz.SizeDynamicObject(sizer, obj.Attestation1) + size += ssz.SizeDynamicObject(sizer, obj.Attestation2) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go index c6f3178..83542f6 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_altair_ssz.go @@ -5,20 +5,26 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyAltair = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() +var staticSizeCacheBeaconBlockBodyAltair = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyAltair)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyAltair) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyAltair) +func (obj *BeaconBlockBodyAltair) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyAltair) { + size = staticSizeCacheBeaconBlockBodyAltair[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go index a424f55..5bf3bfe 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_bellatrix_ssz.go @@ -5,21 +5,27 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyBellatrix = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() + 4 +var staticSizeCacheBeaconBlockBodyBellatrix = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyBellatrix)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyBellatrix) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyBellatrix) +func (obj *BeaconBlockBodyBellatrix) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyBellatrix) { + size = staticSizeCacheBeaconBlockBodyBellatrix[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) - size += ssz.SizeDynamicObject(obj.ExecutionPayload) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go index 39ec490..237ec22 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_capella_ssz.go @@ -5,22 +5,28 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyCapella = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() + 4 + 4 +var staticSizeCacheBeaconBlockBodyCapella = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyCapella)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyCapella) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyCapella) +func (obj *BeaconBlockBodyCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyCapella) { + size = staticSizeCacheBeaconBlockBodyCapella[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + 4 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) - size += ssz.SizeDynamicObject(obj.ExecutionPayload) - size += ssz.SizeSliceOfStaticObjects(obj.BlsToExecutionChanges) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.BlsToExecutionChanges) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go index 118aaf6..b42bdf8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_deneb_ssz.go @@ -5,23 +5,29 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBodyDeneb = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ() + 4 + 4 + 4 +var staticSizeCacheBeaconBlockBodyDeneb = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyDeneb)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBodyDeneb) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBodyDeneb) +func (obj *BeaconBlockBodyDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyDeneb) { + size = staticSizeCacheBeaconBlockBodyDeneb[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + (*SyncAggregate)(nil).SizeSSZ(sizer) + 4 + 4 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) - size += ssz.SizeDynamicObject(obj.ExecutionPayload) - size += ssz.SizeSliceOfStaticObjects(obj.BlsToExecutionChanges) - size += ssz.SizeSliceOfStaticBytes(obj.BlobKzgCommitments) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.BlsToExecutionChanges) + size += ssz.SizeSliceOfStaticBytes(sizer, obj.BlobKzgCommitments) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go index 484c1ca..260d7cb 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_ssz.go @@ -5,20 +5,26 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconBlockBody = 96 + (*Eth1Data)(nil).SizeSSZ() + 32 + 4 + 4 + 4 + 4 + 4 +var staticSizeCacheBeaconBlockBody = ssz.PrecomputeStaticSizeCache((*BeaconBlockBody)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlockBody) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconBlockBody) +func (obj *BeaconBlockBody) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBody) { + size = staticSizeCacheBeaconBlockBody[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticObjects(obj.ProposerSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.AttesterSlashings) - size += ssz.SizeSliceOfDynamicObjects(obj.Attestations) - size += ssz.SizeSliceOfStaticObjects(obj.Deposits) - size += ssz.SizeSliceOfStaticObjects(obj.VoluntaryExits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go index dc9b0d7..6a0ee7f 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_header_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *BeaconBlockHeader) SizeSSZ() uint32 { +func (obj *BeaconBlockHeader) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 + 32 + 32 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go index e7bc25f..8e6dda0 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlock) SizeSSZ(fixed bool) uint32 { +func (obj *BeaconBlock) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(8 + 8 + 32 + 32 + 4) if fixed { return size } - size += ssz.SizeDynamicObject(obj.Body) + size += ssz.SizeDynamicObject(sizer, obj.Body) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go index 68d19dc..b413fc5 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_capella_ssz.go @@ -5,24 +5,30 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconStateCapella = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ() + (*BeaconBlockHeader)(nil).SizeSSZ() + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ() + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + 4 + (*SyncCommittee)(nil).SizeSSZ() + (*SyncCommittee)(nil).SizeSSZ() + 4 + 8 + 8 + 4 +var staticSizeCacheBeaconStateCapella = ssz.PrecomputeStaticSizeCache((*BeaconStateCapella)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconStateCapella) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconStateCapella) +func (obj *BeaconStateCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateCapella) { + size = staticSizeCacheBeaconStateCapella[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + 4 + 8 + 8 + 4 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticBytes(obj.HistoricalRoots) - size += ssz.SizeSliceOfStaticObjects(obj.Eth1DataVotes) - size += ssz.SizeSliceOfStaticObjects(obj.Validators) - size += ssz.SizeSliceOfUint64s(obj.Balances) - size += ssz.SizeDynamicBytes(obj.PreviousEpochParticipation) - size += ssz.SizeDynamicBytes(obj.CurrentEpochParticipation) - size += ssz.SizeSliceOfUint64s(obj.InactivityScores) - size += ssz.SizeDynamicObject(obj.LatestExecutionPayloadHeader) - size += ssz.SizeSliceOfStaticObjects(obj.HistoricalSummaries) + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.HistoricalSummaries) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go deleted file mode 100644 index b57597e..0000000 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb.go +++ /dev/null @@ -1,72 +0,0 @@ -// Code generated by github.com/karalabe/ssz. DO NOT EDIT. - -package consensus_spec_tests - -import "github.com/karalabe/ssz" - -// Cached static size computed on package init. -var staticSizeCacheBeaconStateDeneb = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ() + (*BeaconBlockHeader)(nil).SizeSSZ() + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ() + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + 4 + (*SyncCommittee)(nil).SizeSSZ() + (*SyncCommittee)(nil).SizeSSZ() + 4 + 8 + 8 + 4 - -// SizeSSZ returns either the static size of the object if fixed == true, or -// the total size otherwise. -func (obj *BeaconStateDeneb) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconStateDeneb) - if fixed { - return size - } - size += ssz.SizeSliceOfStaticBytes(obj.HistoricalRoots) - size += ssz.SizeSliceOfStaticObjects(obj.Eth1DataVotes) - size += ssz.SizeSliceOfStaticObjects(obj.Validators) - size += ssz.SizeSliceOfUint64s(obj.Balances) - size += ssz.SizeDynamicBytes(obj.PreviousEpochParticipation) - size += ssz.SizeDynamicBytes(obj.CurrentEpochParticipation) - size += ssz.SizeSliceOfUint64s(obj.InactivityScores) - size += ssz.SizeDynamicObject(obj.LatestExecutionPayloadHeader) - size += ssz.SizeSliceOfStaticObjects(obj.HistoricalSummaries) - - return size -} - -// DefineSSZ defines how an object is encoded/decoded. -func (obj *BeaconStateDeneb) DefineSSZ(codec *ssz.Codec) { - // Define the static data (fields and dynamic offsets) - ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes - ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes - ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes - ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) - ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes - ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots,16777216) // Offset ( 7) - HistoricalRoots - 4 bytes - ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes,2048) // Offset ( 9) - Eth1DataVotes - 4 bytes - ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators,1099511627776) // Offset (11) - Validators - 4 bytes - ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances,1099511627776) // Offset (12) - Balances - 4 bytes - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes - ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation,1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation,1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes - ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes - ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) - ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores,1099511627776) // Offset (21) - InactivityScores - 4 bytes - ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) - ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) - ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (24) - LatestExecutionPayloadHeader - 4 bytes - ssz.DefineUint64(codec, &obj.NextWithdrawalIndex) // Field (25) - NextWithdrawalIndex - 8 bytes - ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (26) - NextWithdrawalValidatorIndex - 8 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries,16777216) // Offset (27) - HistoricalSummaries - 4 bytes - - // Define the dynamic data (fields) - ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes - ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes - ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes - ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes - ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes - ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (24) - LatestExecutionPayloadHeader - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.HistoricalSummaries, 16777216) // Field (27) - HistoricalSummaries - ? bytes -} \ No newline at end of file diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go new file mode 100644 index 0000000..8896b7d --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_deneb_ssz.go @@ -0,0 +1,78 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateDeneb = ssz.PrecomputeStaticSizeCache((*BeaconStateDeneb)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateDeneb) { + size = staticSizeCacheBeaconStateDeneb[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + 4 + 8 + 8 + 4 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.HistoricalSummaries) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateDeneb) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (21) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (24) - LatestExecutionPayloadHeader - 4 bytes + ssz.DefineUint64(codec, &obj.NextWithdrawalIndex) // Field (25) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (26) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (27) - HistoricalSummaries - 4 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes + ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (24) - LatestExecutionPayloadHeader - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.HistoricalSummaries, 16777216) // Field (27) - HistoricalSummaries - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go index ff04879..054e131 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_ssz.go @@ -5,21 +5,27 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheBeaconState = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ() + (*BeaconBlockHeader)(nil).SizeSSZ() + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ() + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() + (*Checkpoint)(nil).SizeSSZ() +var staticSizeCacheBeaconState = ssz.PrecomputeStaticSizeCache((*BeaconState)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconState) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheBeaconState) +func (obj *BeaconState) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconState) { + size = staticSizeCacheBeaconState[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfStaticBytes(obj.HistoricalRoots) - size += ssz.SizeSliceOfStaticObjects(obj.Eth1DataVotes) - size += ssz.SizeSliceOfStaticObjects(obj.Validators) - size += ssz.SizeSliceOfUint64s(obj.Balances) - size += ssz.SizeSliceOfDynamicObjects(obj.PreviousEpochAttestations) - size += ssz.SizeSliceOfDynamicObjects(obj.CurrentEpochAttestations) + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.PreviousEpochAttestations) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.CurrentEpochAttestations) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go index ba72149..184fc13 100644 --- a/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_bls_to_execution_change_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *BLSToExecutionChange) SizeSSZ() uint32 { +func (obj *BLSToExecutionChange) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 48 + 20 } diff --git a/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go b/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go index 676dbe8..2da5dc7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_checkpoint_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Checkpoint) SizeSSZ() uint32 { +func (obj *Checkpoint) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go index d2e22c9..58360c6 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_data_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *DepositData) SizeSSZ() uint32 { +func (obj *DepositData) SizeSSZ(sizer *ssz.Sizer) uint32 { return 48 + 32 + 8 + 96 } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go index ee36e66..879400a 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_message_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *DepositMessage) SizeSSZ() uint32 { +func (obj *DepositMessage) SizeSSZ(sizer *ssz.Sizer) uint32 { return 48 + 32 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go index ad1d662..5990cd7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go @@ -5,11 +5,14 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheDeposit = 33*32 + (*DepositData)(nil).SizeSSZ() +var staticSizeCacheDeposit = ssz.PrecomputeStaticSizeCache((*Deposit)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *Deposit) SizeSSZ() uint32 { - return staticSizeCacheDeposit +func (obj *Deposit) SizeSSZ(sizer *ssz.Sizer) uint32 { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheDeposit) { + return staticSizeCacheDeposit[fork] + } + return 33*32 + ssz.Size((*DepositData)(nil)) } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go b/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go index bee1ebf..e09d4f1 100644 --- a/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_eth1_block_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Eth1Block) SizeSSZ() uint32 { +func (obj *Eth1Block) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 32 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go index 806049d..1e3be89 100644 --- a/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_eth1_data_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Eth1Data) SizeSSZ() uint32 { +func (obj *Eth1Data) SizeSSZ(sizer *ssz.Sizer) uint32 { return 32 + 8 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go index 59a0a58..604ce27 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go @@ -6,14 +6,14 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadCapella) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayloadCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) - size += ssz.SizeSliceOfStaticObjects(obj.Withdrawals) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go index 37ff652..eaf0751 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go @@ -6,14 +6,14 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadDeneb) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayloadDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 + 8 + 8) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) - size += ssz.SizeSliceOfStaticObjects(obj.Withdrawals) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go index 377bc6c..9ed1802 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeaderCapella) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayloadHeaderCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go index 9c7c824..b0f0639 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeaderDeneb) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayloadHeaderDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 + 8 + 8) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go index 777c73b..153dc54 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go @@ -6,12 +6,12 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeader) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayloadHeader) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go index ae3dfa9..eac28ae 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayload) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayload) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go index ef6c78f..75bf260 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadVariation) SizeSSZ(fixed bool) uint32 { +func (obj *ExecutionPayloadVariation) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4) if fixed { return size } - size += ssz.SizeDynamicBytes(obj.ExtraData) - size += ssz.SizeSliceOfDynamicBytes(obj.Transactions) + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go b/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go index 28623d5..cf87868 100644 --- a/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_fork_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Fork) SizeSSZ() uint32 { +func (obj *Fork) SizeSSZ(sizer *ssz.Sizer) uint32 { return 4 + 4 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go b/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go index 9ce8ed2..a2541f3 100644 --- a/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_historical_batch_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *HistoricalBatch) SizeSSZ() uint32 { +func (obj *HistoricalBatch) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8192*32 + 8192*32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go index 8abb8ce..93a2c47 100644 --- a/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_historical_batch_variation_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *HistoricalBatchVariation) SizeSSZ() uint32 { +func (obj *HistoricalBatchVariation) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8192*32 + 8192*32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go b/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go index 22372b0..741dc0b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_historical_summary_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *HistoricalSummary) SizeSSZ() uint32 { +func (obj *HistoricalSummary) SizeSSZ(sizer *ssz.Sizer) uint32 { return 32 + 32 } diff --git a/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go index c595974..ff47f22 100644 --- a/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_indexed_attestation_ssz.go @@ -5,16 +5,22 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheIndexedAttestation = 4 + (*AttestationData)(nil).SizeSSZ() + 96 +var staticSizeCacheIndexedAttestation = ssz.PrecomputeStaticSizeCache((*IndexedAttestation)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *IndexedAttestation) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCacheIndexedAttestation) +func (obj *IndexedAttestation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheIndexedAttestation) { + size = staticSizeCacheIndexedAttestation[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfUint64s(obj.AttestationIndices) + size += ssz.SizeSliceOfUint64s(sizer, obj.AttestationIndices) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go index 11db1b8..c805ff3 100644 --- a/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_pending_attestation_ssz.go @@ -5,16 +5,22 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCachePendingAttestation = 4 + (*AttestationData)(nil).SizeSSZ() + 8 + 8 +var staticSizeCachePendingAttestation = ssz.PrecomputeStaticSizeCache((*PendingAttestation)(nil)) // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *PendingAttestation) SizeSSZ(fixed bool) uint32 { - var size = uint32(staticSizeCachePendingAttestation) +func (obj *PendingAttestation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCachePendingAttestation) { + size = staticSizeCachePendingAttestation[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 8 + 8 + } + // Either return the static size or accumulate the dynamic too if fixed { return size } - size += ssz.SizeSliceOfBits(obj.AggregationBits) + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go b/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go index 4a95b9a..9c314b7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go @@ -5,11 +5,14 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheProposerSlashing = (*SignedBeaconBlockHeader)(nil).SizeSSZ() + (*SignedBeaconBlockHeader)(nil).SizeSSZ() +var staticSizeCacheProposerSlashing = ssz.PrecomputeStaticSizeCache((*ProposerSlashing)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *ProposerSlashing) SizeSSZ() uint32 { - return staticSizeCacheProposerSlashing +func (obj *ProposerSlashing) SizeSSZ(sizer *ssz.Sizer) uint32 { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheProposerSlashing) { + return staticSizeCacheProposerSlashing[fork] + } + return ssz.Size((*SignedBeaconBlockHeader)(nil)) + ssz.Size((*SignedBeaconBlockHeader)(nil)) } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go index e4c1d0c..02022e8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go @@ -5,11 +5,14 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheSignedBeaconBlockHeader = (*BeaconBlockHeader)(nil).SizeSSZ() + 96 +var staticSizeCacheSignedBeaconBlockHeader = ssz.PrecomputeStaticSizeCache((*SignedBeaconBlockHeader)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedBeaconBlockHeader) SizeSSZ() uint32 { - return staticSizeCacheSignedBeaconBlockHeader +func (obj *SignedBeaconBlockHeader) SizeSSZ(sizer *ssz.Sizer) uint32 { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedBeaconBlockHeader) { + return staticSizeCacheSignedBeaconBlockHeader[fork] + } + return ssz.Size((*BeaconBlockHeader)(nil)) + 96 } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go index f79df41..33447c8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go @@ -5,11 +5,14 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheSignedBLSToExecutionChange = (*BLSToExecutionChange)(nil).SizeSSZ() + 96 +var staticSizeCacheSignedBLSToExecutionChange = ssz.PrecomputeStaticSizeCache((*SignedBLSToExecutionChange)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedBLSToExecutionChange) SizeSSZ() uint32 { - return staticSizeCacheSignedBLSToExecutionChange +func (obj *SignedBLSToExecutionChange) SizeSSZ(sizer *ssz.Sizer) uint32 { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedBLSToExecutionChange) { + return staticSizeCacheSignedBLSToExecutionChange[fork] + } + return ssz.Size((*BLSToExecutionChange)(nil)) + 96 } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go index c8827bd..b01757a 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go @@ -5,11 +5,14 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // Cached static size computed on package init. -var staticSizeCacheSignedVoluntaryExit = (*VoluntaryExit)(nil).SizeSSZ() + 96 +var staticSizeCacheSignedVoluntaryExit = ssz.PrecomputeStaticSizeCache((*SignedVoluntaryExit)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedVoluntaryExit) SizeSSZ() uint32 { - return staticSizeCacheSignedVoluntaryExit +func (obj *SignedVoluntaryExit) SizeSSZ(sizer *ssz.Sizer) uint32 { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedVoluntaryExit) { + return staticSizeCacheSignedVoluntaryExit[fork] + } + return ssz.Size((*VoluntaryExit)(nil)) + 96 } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go b/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go index a5eb1ed..f1d2183 100644 --- a/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_sync_aggregate_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SyncAggregate) SizeSSZ() uint32 { +func (obj *SyncAggregate) SizeSSZ(sizer *ssz.Sizer) uint32 { return 64 + 96 } diff --git a/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go b/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go index 7be6c36..abf2e0e 100644 --- a/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_sync_committee_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SyncCommittee) SizeSSZ() uint32 { +func (obj *SyncCommittee) SizeSSZ(sizer *ssz.Sizer) uint32 { return 512*48 + 48 } diff --git a/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go b/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go index 6e40251..ec7ff1b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_validator_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Validator) SizeSSZ() uint32 { +func (obj *Validator) SizeSSZ(sizer *ssz.Sizer) uint32 { return 48 + 32 + 8 + 1 + 8 + 8 + 8 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go index 14853a0..9f8d355 100644 --- a/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_voluntary_exit_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *VoluntaryExit) SizeSSZ() uint32 { +func (obj *VoluntaryExit) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go b/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go index 021d327..8373ec9 100644 --- a/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_withdrawal_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *Withdrawal) SizeSSZ() uint32 { +func (obj *Withdrawal) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 + 20 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go index 5e1fa65..ce74ad3 100644 --- a/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_withdrawal_variation_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *WithdrawalVariation) SizeSSZ() uint32 { +func (obj *WithdrawalVariation) SizeSSZ(sizer *ssz.Sizer) uint32 { return 8 + 8 + 20 + 8 } diff --git a/tests/testtypes/consensus-spec-tests/types_consensus.go b/tests/testtypes/consensus-spec-tests/types_consensus.go index 13d06ea..1edf64a 100644 --- a/tests/testtypes/consensus-spec-tests/types_consensus.go +++ b/tests/testtypes/consensus-spec-tests/types_consensus.go @@ -43,6 +43,7 @@ import ( //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadHeaderDeneb -out gen_execution_payload_header_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconState -out gen_beacon_state_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconStateCapella -out gen_beacon_state_capella_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateDeneb -out gen_beacon_state_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBody -out gen_beacon_block_body_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyAltair -out gen_beacon_block_body_altair_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyBellatrix -out gen_beacon_block_body_bellatrix_ssz.go From 473bca32bf7ae6a00f5c5133c03e2995040a0121 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Thu, 25 Jul 2024 16:37:09 +0300 Subject: [PATCH 02/12] all: hack together the monolith ssz generator --- cmd/sszgen/forks.go | 38 ++++ cmd/sszgen/gen.go | 203 +++++++++++------- cmd/sszgen/opset.go | 2 +- cmd/sszgen/tags.go | 31 ++- cmd/sszgen/types.go | 12 +- forks.go | 4 +- hasher.go | 2 +- ssz.go | 65 +----- tests/consensus_specs_test.go | 99 +++++---- tests/corner_cases_test.go | 22 +- .../gen_aggregate_and_proof_ssz.go | 4 +- .../gen_attestation_data_ssz.go | 5 +- .../gen_attestation_data_variation_1_ssz.go | 32 +++ .../gen_attestation_data_variation_2_ssz.go | 33 +++ .../gen_attestation_data_variation_3_ssz.go | 32 +++ .../gen_attestation_variation_1_ssz.go | 43 ++++ .../gen_attestation_variation_2_ssz.go | 44 ++++ .../gen_attestation_variation_3_ssz.go | 43 ++++ .../gen_attester_slashing_ssz.go | 4 +- .../gen_beacon_block_ssz.go | 4 +- .../consensus-spec-tests/gen_deposit_ssz.go | 5 +- .../gen_execution_payload_capella_ssz.go | 4 +- .../gen_execution_payload_deneb_ssz.go | 4 +- ...en_execution_payload_header_capella_ssz.go | 4 +- .../gen_execution_payload_header_deneb_ssz.go | 4 +- ...n_execution_payload_header_monolith_ssz.go | 57 +++++ .../gen_execution_payload_header_ssz.go | 4 +- .../gen_execution_payload_monolith_ssz.go | 65 ++++++ .../gen_execution_payload_ssz.go | 4 +- .../gen_execution_payload_variation_ssz.go | 4 +- .../gen_proposer_slashing_ssz.go | 5 +- .../gen_signed_beacon_block_header_ssz.go | 5 +- .../gen_signed_bls_to_execution_change_ssz.go | 5 +- .../gen_signed_voluntary_exit_ssz.go | 5 +- .../consensus-spec-tests/types_consensus.go | 2 +- .../consensus-spec-tests/types_monoliths.go | 50 +++++ .../consensus-spec-tests/types_variation.go | 55 +++++ genutils.go => utils.go | 7 +- 38 files changed, 765 insertions(+), 246 deletions(-) create mode 100644 cmd/sszgen/forks.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/types_monoliths.go rename genutils.go => utils.go (77%) diff --git a/cmd/sszgen/forks.go b/cmd/sszgen/forks.go new file mode 100644 index 0000000..3738e4b --- /dev/null +++ b/cmd/sszgen/forks.go @@ -0,0 +1,38 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package main + +// forkMapping maps fork names to fork values. This is used internally by the +// ssz codec generator to convert tags to values. +var forkMapping = map[string]string{ + "frontier": "Frontier", + "homestead": "Homestead", + "dao": "DAO", + "tangerine": "Tangerine", + "spurious": "Spurious", + "byzantium": "Byzantium", + "constantinople": "Constantinople", + "istanbul": "Istanbul", + "muir": "Muir", + "phase0": "Phase0", + "berlin": "Berlin", + "london": "London", + "altair": "Altair", + "arrow": "Arrow", + "gray": "Gray", + "bellatrix": "Bellatrix", + "paris": "Paris", + "merge": "Merge", + "shapella": "Shapella", + "shanghai": "Shanghai", + "capella": "Capella", + "dencun": "Dencun", + "cancun": "Cancun", + "deneb": "Deneb", + "pectra": "Pectra", + "prague": "Prague", + "electra": "Electra", + "future": "Future", +} diff --git a/cmd/sszgen/gen.go b/cmd/sszgen/gen.go index 1ecfbfb..503bb7b 100644 --- a/cmd/sszgen/gen.go +++ b/cmd/sszgen/gen.go @@ -9,6 +9,7 @@ import ( "fmt" "go/types" "html/template" + "io" "math" "sort" ) @@ -91,18 +92,73 @@ func generate(ctx *genContext, typ *sszContainer) ([]byte, error) { return bytes.Join(codes, []byte("\n")), nil } +// generateStaticSizeAccumulator is a helper to iterate over all the fields and +// accumulate the static sizes into a `size` variable based on fork constraints. +func generateStaticSizeAccumulator(w io.Writer, ctx *genContext, typ *sszContainer) { + for i := range typ.opsets { + switch { + case typ.forks[i] == "" && i == 0: + fmt.Fprintf(w, " size = ") + case typ.forks[i] == "" && typ.forks[i-1] == "": + fmt.Fprintf(w, " + ") + case typ.forks[i] == "" && typ.forks[i-1] != "": + fmt.Fprintf(w, "\n size += ") + case typ.forks[i] != "" && i > 0: + fmt.Fprintf(w, "\n") + } + if typ.forks[i] != "" { + if typ.forks[i][0] == '!' { + fmt.Fprintf(w, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(w, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + fmt.Fprintf(w, " size += ") + } + switch t := typ.opsets[i].(type) { + case *opsetStatic: + if t.bytes != nil { + if len(t.bytes) == 1 { + fmt.Fprintf(w, "%d", t.bytes[0]) + } else { + fmt.Fprintf(w, "%d*%d", t.bytes[0], t.bytes[1]) + } + } else { + typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) + pkg := typ.Obj().Pkg() + if pkg.Path() == ctx.pkg.Path() { + fmt.Fprintf(w, "(*%s)(nil).SizeSSZ(sizer)", typ.Obj().Name()) + } else { + ctx.addImport(pkg.Path(), "") + fmt.Fprintf(w, "(*%s.%s)(nil).SizeSSZ(sizer)", pkg.Name(), typ.Obj().Name()) + } + } + case *opsetDynamic: + fmt.Fprintf(w, "%d", offsetBytes) + } + if typ.forks[i] != "" { + fmt.Fprintf(w, "\n }") + } + } + fmt.Fprintf(w, " \n") +} + func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { var b bytes.Buffer // Generate the code itself if typ.static { // Iterate through the fields to see if the size can be computed compile - // time or if runtime resolutions are needed - var runtime bool + // time or if runtime resolutions are needed. + var ( + runtime bool + monolith bool + ) for i := range typ.opsets { if typ.opsets[i].(*opsetStatic).bytes == nil { runtime = true - break + } + if typ.forks[i] != "" { + monolith = true } } // If some types require runtime size determination, generate a helper @@ -112,49 +168,35 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, "var staticSizeCache%s = ssz.PrecomputeStaticSizeCache((*%s)(nil))\n\n", typ.named.Obj().Name(), typ.named.Obj().Name()) fmt.Fprintf(&b, "// SizeSSZ returns the total size of the static ssz object.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) uint32 {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) (size uint32) {\n", typ.named.Obj().Name()) fmt.Fprintf(&b, " if fork := int(sizer.Fork()); fork < len(staticSizeCache%s) {\n", typ.named.Obj().Name()) fmt.Fprintf(&b, " return staticSizeCache%s[fork]\n", typ.named.Obj().Name()) fmt.Fprintf(&b, " }\n") - fmt.Fprintf(&b, " return ") - for i := range typ.opsets { - if bytes := typ.opsets[i].(*opsetStatic).bytes; bytes != nil { + + generateStaticSizeAccumulator(&b, ctx, typ) + fmt.Fprintf(&b, " return size\n}\n") + } else { + fmt.Fprint(&b, "// SizeSSZ returns the total size of the static ssz object.\n") + if monolith { + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) (size uint32) {\n", typ.named.Obj().Name()) + generateStaticSizeAccumulator(&b, ctx, typ) + fmt.Fprintf(&b, " return size\n}\n") + } else { + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) uint32 {\n", typ.named.Obj().Name()) + fmt.Fprintf(&b, " return ") + for i := range typ.opsets { + bytes := typ.opsets[i].(*opsetStatic).bytes if len(bytes) == 1 { fmt.Fprintf(&b, "%d", bytes[0]) } else { fmt.Fprintf(&b, "%d*%d", bytes[0], bytes[1]) } - } else { - typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) - pkg := typ.Obj().Pkg() - if pkg.Path() == ctx.pkg.Path() { - fmt.Fprintf(&b, "ssz.Size((*%s)(nil))", typ.Obj().Name()) - } else { - ctx.addImport(pkg.Path(), "") - fmt.Fprintf(&b, "ssz.Size((*%s.%s)(nil))", pkg.Name(), typ.Obj().Name()) + if i < len(typ.opsets)-1 { + fmt.Fprint(&b, " + ") } } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") - } + fmt.Fprintf(&b, "\n}\n") } - fmt.Fprintf(&b, "\n}\n") - } else { - fmt.Fprint(&b, "// SizeSSZ returns the total size of the static ssz object.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer) uint32 {\n", typ.named.Obj().Name()) - fmt.Fprint(&b, " return ") - for i := range typ.opsets { - bytes := typ.opsets[i].(*opsetStatic).bytes - if len(bytes) == 1 { - fmt.Fprintf(&b, "%d", bytes[0]) - } else { - fmt.Fprintf(&b, "%d*%d", bytes[0], bytes[1]) - } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") - } - } - fmt.Fprintf(&b, "\n}\n") } } else { // Iterate through the fields to see if the static size can be computed @@ -164,7 +206,6 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { if typ, ok := typ.opsets[i].(*opsetStatic); ok { if typ.bytes == nil { runtime = true - break } } } @@ -180,35 +221,7 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " if fork := int(sizer.Fork()); fork < len(staticSizeCache%s) {\n", typ.named.Obj().Name()) fmt.Fprintf(&b, " size = staticSizeCache%s[fork]\n", typ.named.Obj().Name()) fmt.Fprintf(&b, " } else {\n") - fmt.Fprintf(&b, " size = ") - for i := range typ.opsets { - switch t := typ.opsets[i].(type) { - case *opsetStatic: - if t.bytes != nil { - if len(t.bytes) == 1 { - fmt.Fprintf(&b, "%d", t.bytes[0]) - } else { - fmt.Fprintf(&b, "%d*%d", t.bytes[0], t.bytes[1]) - } - } else { - typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) - pkg := typ.Obj().Pkg() - if pkg.Path() == ctx.pkg.Path() { - fmt.Fprintf(&b, "(*%s)(nil).SizeSSZ(sizer)", typ.Obj().Name()) - } else { - ctx.addImport(pkg.Path(), "") - fmt.Fprintf(&b, "(*%s.%s)(nil).SizeSSZ(sizer)", pkg.Name(), typ.Obj().Name()) - } - } - case *opsetDynamic: - fmt.Fprintf(&b, "%d", offsetBytes) - } - if i < len(typ.opsets)-1 { - fmt.Fprintf(&b, " + ") - } else { - fmt.Fprintf(&b, "\n") - } - } + generateStaticSizeAccumulator(&b, ctx, typ) fmt.Fprintf(&b, " }\n") fmt.Fprintf(&b, " // Either return the static size or accumulate the dynamic too\n") fmt.Fprintf(&b, " if (fixed) {\n") @@ -216,8 +229,18 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " }\n") for i := range typ.opsets { if opset, ok := typ.opsets[i].(*opsetDynamic); ok { + if typ.forks[i] != "" { + if typ.forks[i][0] == '!' { + fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + } call := generateCall(opset.size, "sizer", "obj."+typ.fields[i]) fmt.Fprintf(&b, " size += ssz.%s\n", call) + if typ.forks[i] != "" { + fmt.Fprintf(&b, " }\n") + } } } fmt.Fprintf(&b, "\n") @@ -225,31 +248,25 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, "}\n") } else { fmt.Fprintf(&b, "\n\n// SizeSSZ returns either the static size of the object if fixed == true, or\n// the total size otherwise.\n") - fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 {\n", typ.named.Obj().Name()) - fmt.Fprintf(&b, " var size = uint32(") - for i := range typ.opsets { - switch t := typ.opsets[i].(type) { - case *opsetStatic: - if len(t.bytes) == 1 { - fmt.Fprintf(&b, "%d", t.bytes[0]) - } else { - fmt.Fprintf(&b, "%d*%d", t.bytes[0], t.bytes[1]) - } - case *opsetDynamic: - fmt.Fprintf(&b, "%d", offsetBytes) - } - if i < len(typ.opsets)-1 { - fmt.Fprint(&b, " + ") - } - } - fmt.Fprintf(&b, ")\n") + fmt.Fprintf(&b, "func (obj *%s) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) {\n", typ.named.Obj().Name()) + generateStaticSizeAccumulator(&b, ctx, typ) fmt.Fprintf(&b, " if (fixed) {\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, " }\n") for i := range typ.opsets { if opset, ok := typ.opsets[i].(*opsetDynamic); ok { + if typ.forks[i] != "" { + if typ.forks[i][0] == '!' { + fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + } call := generateCall(opset.size, "sizer", "obj."+typ.fields[i]) fmt.Fprintf(&b, " size += ssz.%s\n", call) + if typ.forks[i] != "" { + fmt.Fprintf(&b, " }\n") + } } } fmt.Fprintf(&b, "\n") @@ -297,6 +314,13 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprint(&b, " // Define the static data (fields and dynamic offsets)\n") } for i := 0; i < len(typ.fields); i++ { + if typ.forks[i] != "" { + if typ.forks[i][0] == '!' { + fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(&b, " if codec.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + } field := typ.fields[i] switch opset := typ.opsets[i].(type) { case *opsetStatic: @@ -314,14 +338,27 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { call := generateCall(opset.defineOffset, "codec", "obj."+field, opset.limits...) fmt.Fprintf(&b, " ssz.%s // Offset ("+indexRule+") - "+nameRule+" - %"+sizeRule+"d bytes\n", call, i, field, offsetBytes) } + if typ.forks[i] != "" { + fmt.Fprintf(&b, " }\n") + } } if !typ.static { fmt.Fprint(&b, "\n // Define the dynamic data (fields)\n") for i := 0; i < len(typ.fields); i++ { field := typ.fields[i] if opset, ok := (typ.opsets[i]).(*opsetDynamic); ok { + if typ.forks[i] != "" { + if typ.forks[i][0] == '!' { + fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(&b, " if codec.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + } call := generateCall(opset.defineContent, "codec", "obj."+field, opset.limits...) fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - ? bytes\n", call, i, field) + if typ.forks[i] != "" { + fmt.Fprintf(&b, " }\n") + } } } } diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index c995172..e639e56 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -20,7 +20,7 @@ type opsetStatic struct { define string // DefineXYZ method for the ssz.Codec encode string // EncodeXYZ method for the ssz.Encoder decode string // DecodeXYZ method for the ssz.Decoder - bytes []int // Number of bytes in the ssz encoding (0 == unknown) + bytes []int // Number of bytes in the ssz encoding (nil == unknown) } // opsetDynamic is a group of methods that define how different pieces of an ssz diff --git a/cmd/sszgen/tags.go b/cmd/sszgen/tags.go index de81d57..8a8f365 100644 --- a/cmd/sszgen/tags.go +++ b/cmd/sszgen/tags.go @@ -14,22 +14,25 @@ const ( sszTagIdent = "ssz" sszSizeTagIdent = "ssz-size" sszMaxTagIdent = "ssz-max" + sszForkTagIdent = "ssz-fork" ) -// sizeTag describes the size restriction for types. +// sizeTag describes the restriction for types. type sizeTag struct { bits bool // whether the sizes are bits instead of bytes size []int // 0 means the size for that dimension is undefined limit []int // 0 means the limit for that dimension is undefined } -func parseTags(input string) (bool, *sizeTag, error) { +func parseTags(input string) (bool, *sizeTag, string, error) { if len(input) == 0 { - return false, nil, nil + return false, nil, "", nil } var ( ignore bool tags sizeTag + fork string + setTag = func(v int, ident string) { if ident == sszMaxTagIdent { tags.limit = append(tags.limit, v) @@ -41,7 +44,7 @@ func parseTags(input string) (bool, *sizeTag, error) { for _, tag := range strings.Fields(input) { parts := strings.Split(tag, ":") if len(parts) != 2 { - return false, nil, fmt.Errorf("invalid tag %s", tag) + return false, nil, "", fmt.Errorf("invalid tag %s", tag) } ident, remain := parts[0], strings.Trim(parts[1], "\"") switch ident { @@ -60,14 +63,28 @@ func parseTags(input string) (bool, *sizeTag, error) { } num, err := strconv.ParseInt(p, 10, 64) if err != nil { - return false, nil, err + return false, nil, "", err } setTag(int(num), ident) } + case sszForkTagIdent: + var negate bool + if remain[0] == '!' { + negate = true + remain = remain[1:] + } + if enum, ok := forkMapping[remain]; !ok { + return ignore, nil, "", fmt.Errorf("invalid fork tag %s", tag) + } else { + fork = enum + if negate { + fork = "!" + fork + } + } } } if tags.size == nil && tags.limit == nil { - return ignore, nil, nil + return ignore, nil, fork, nil } - return ignore, &tags, nil + return ignore, &tags, fork, nil } diff --git a/cmd/sszgen/types.go b/cmd/sszgen/types.go index e1c2183..1df5ddf 100644 --- a/cmd/sszgen/types.go +++ b/cmd/sszgen/types.go @@ -13,9 +13,10 @@ type sszContainer struct { *types.Struct named *types.Named static bool - fields []string - types []types.Type - opsets []opset + fields []string // Name of the struct field + types []types.Type // Type of the struct field + opsets []opset // Opset for the struct field + forks []string // Fork constraint for the struct field } // makeContainer iterates over the fields of the struct and attempt to match each @@ -26,6 +27,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss fields []string types []types.Type opsets []opset + forks []string ) // Iterate over all the fields of the struct for i := 0; i < typ.NumFields(); i++ { @@ -34,7 +36,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss if !f.Exported() { continue } - ignore, tags, err := parseTags(typ.Tag(i)) + ignore, tags, fork, err := parseTags(typ.Tag(i)) if err != nil { return nil, fmt.Errorf("failed to parse field %s.%s tags: %v", named.Obj().Name(), f.Name(), err) } @@ -52,6 +54,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss fields = append(fields, f.Name()) types = append(types, f.Type()) opsets = append(opsets, opset) + forks = append(forks, fork) } return &sszContainer{ Struct: typ, @@ -60,6 +63,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss fields: fields, types: types, opsets: opsets, + forks: forks, }, nil } diff --git a/forks.go b/forks.go index b11c3ce..3f40807 100644 --- a/forks.go +++ b/forks.go @@ -47,9 +47,9 @@ const ( ForkElectra = ForkPectra // CL alias for Pectra ) -// forkMapping maps fork names to fork values. This is used internally by the +// ForkMapping maps fork names to fork values. This is used internally by the // ssz codec generator to convert tags to values. -var forkMapping = map[string]Fork{ +var ForkMapping = map[string]Fork{ "frontier": ForkFrontier, "homestead": ForkHomestead, "dao": ForkDAO, diff --git a/hasher.go b/hasher.go index fced534..b8a0c6d 100644 --- a/hasher.go +++ b/hasher.go @@ -321,7 +321,7 @@ func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems u defer h.ascendMixinLayer(uint64(len(objects)), maxItems) // If threading is disabled, or hashing nothing, do it sequentially - if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0])) < concurrencyThreshold { + if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0], h.Fork())) < concurrencyThreshold { for _, obj := range objects { h.descendLayer() obj.DefineSSZ(h.codec) diff --git a/ssz.go b/ssz.go index b613b0f..42acef2 100644 --- a/ssz.go +++ b/ssz.go @@ -90,14 +90,7 @@ var sizerPool = sync.Pool{ // EncodeToStream serializes the object into a data stream. Do not use this // method with a bytes.Buffer to write into a []byte slice, as that will do // double the byte copying. For that use case, use EncodeToBytes instead. -func EncodeToStream(w io.Writer, obj Object) error { - return EncodeToStreamWithFork(w, obj, ForkUnknown) -} - -// EncodeToStreamWithFork is analogous to EncodeToStream, but allows the user to -// set a specific fork context to encode the object in. This is useful for code- -// bases that have monolith types that marshal into many fork formats. -func EncodeToStreamWithFork(w io.Writer, obj Object, fork Fork) error { +func EncodeToStream(w io.Writer, obj Object, fork Fork) error { codec := encoderPool.Get().(*Codec) defer encoderPool.Put(codec) @@ -124,16 +117,9 @@ func EncodeToStreamWithFork(w io.Writer, obj Object, fork Fork) error { // if you want to then write the buffer into a stream via some writer, as that // would double the memory use for the temporary buffer. For that use case, use // EncodeToStream instead. -func EncodeToBytes(buf []byte, obj Object) error { - return EncodeToBytesWithFork(buf, obj, ForkUnknown) -} - -// EncodeToBytesWithFork is analogous to EncodeToBytes, but allows the user to -// set a specific fork context to encode the object in. This is useful for code- -// bases that have monolith types that marshal into many fork formats. -func EncodeToBytesWithFork(buf []byte, obj Object, fork Fork) error { +func EncodeToBytes(buf []byte, obj Object, fork Fork) error { // Sanity check that we have enough space to serialize into - if size := Size(obj); int(size) > len(buf) { + if size := Size(obj, fork); int(size) > len(buf) { return fmt.Errorf("%w: buffer %d bytes, object %d bytes", ErrBufferTooSmall, len(buf), size) } codec := encoderPool.Get().(*Codec) @@ -161,14 +147,7 @@ func EncodeToBytesWithFork(buf []byte, obj Object, fork Fork) error { // DecodeFromStream parses an object with the given size out of a stream. Do not // use this method with a bytes.Buffer to read from a []byte slice, as that will // double the byte copying. For that use case, use DecodeFromBytes instead. -func DecodeFromStream(r io.Reader, obj Object, size uint32) error { - return DecodeFromStreamWithFork(r, obj, size, ForkUnknown) -} - -// DecodeFromStreamWithFork is analogous to DecodeFromStream, but allows the user -// to set a specific fork context to decode the object in. This is useful for code- -// bases that have monolith types that unmarshal into many fork formats. -func DecodeFromStreamWithFork(r io.Reader, obj Object, size uint32, fork Fork) error { +func DecodeFromStream(r io.Reader, obj Object, size uint32, fork Fork) error { // Retrieve a new decoder codec and set its data source codec := decoderPool.Get().(*Codec) defer decoderPool.Put(codec) @@ -203,14 +182,7 @@ func DecodeFromStreamWithFork(r io.Reader, obj Object, size uint32, fork Fork) e // if you want to first read the buffer from a stream via some reader, as that // would double the memory use for the temporary buffer. For that use case, use // DecodeFromStream instead. -func DecodeFromBytes(blob []byte, obj Object) error { - return DecodeFromBytesWithFork(blob, obj, ForkUnknown) -} - -// DecodeFromBytesWithFork is analogous to DecodeFromBytes, but allows the user -// to set a specific fork context to decode the object in. This is useful for code- -// bases that have monolith types that unmarshal into many fork formats. -func DecodeFromBytesWithFork(blob []byte, obj Object, fork Fork) error { +func DecodeFromBytes(blob []byte, obj Object, fork Fork) error { // Reject decoding from an empty slice if len(blob) == 0 { return io.ErrUnexpectedEOF @@ -251,14 +223,7 @@ func DecodeFromBytesWithFork(blob []byte, obj Object, fork Fork) error { // HashSequential computes the ssz merkle root of the object on a single thread. // This is useful for processing small objects with stable runtime and O(1) GC // guarantees. -func HashSequential(obj Object) [32]byte { - return HashSequentialWithFork(obj, ForkUnknown) -} - -// HashSequentialWithFork is analogous to HashSequential, but allows the user to -// set a specific fork context to hash the object in. This is useful for code- -// bases that have monolith types that hash across many fork formats. -func HashSequentialWithFork(obj Object, fork Fork) [32]byte { +func HashSequential(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() @@ -279,14 +244,7 @@ func HashSequentialWithFork(obj Object, fork Fork) [32]byte { // concurrent threads (iff some data segments are large enough to be worth it). This // is useful for processing large objects, but will place a bigger load on your CPU // and GC; and might be more variable timing wise depending on other load. -func HashConcurrent(obj Object) [32]byte { - return HashConcurrentWithFork(obj, ForkUnknown) -} - -// HashConcurrentWithFork is analogous to HashConcurrent, but allows the user to -// set a specific fork context to hash the object in. This is useful for code- -// bases that have monolith types that hash across many fork formats. -func HashConcurrentWithFork(obj Object, fork Fork) [32]byte { +func HashConcurrent(obj Object, fork Fork) [32]byte { codec := hasherPool.Get().(*Codec) defer hasherPool.Put(codec) defer codec.has.Reset() @@ -307,14 +265,7 @@ func HashConcurrentWithFork(obj Object, fork Fork) [32]byte { // Size retrieves the size of a ssz object, independent if it's a static or a // dynamic one. -func Size(obj Object) uint32 { - return SizeWithFork(obj, ForkUnknown) -} - -// SizeWithFork is analogous to Size, but allows the user to set a specific fork -// context to size the object in. This is useful for codebases that have monolith -// types that serialize across many fork formats. -func SizeWithFork(obj Object, fork Fork) uint32 { +func Size(obj Object, fork Fork) uint32 { sizer := sizerPool.Get().(*Sizer) defer sizerPool.Put(sizer) diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 95fe40c..b176eb2 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -177,6 +177,7 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st // TestConsensusSpecs iterates over all the (supported) consensus SSZ types and // runs the encoding/decoding/hashing round. func TestConsensusSpecs(t *testing.T) { + // Run through all the consensus specs as simple types testConsensusSpecType[*types.AggregateAndProof](t, "AggregateAndProof", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") testConsensusSpecType[*types.Attestation](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") testConsensusSpecType[*types.AttestationData](t, "AttestationData") @@ -219,10 +220,20 @@ func TestConsensusSpecs(t *testing.T) { testConsensusSpecType[*types.VoluntaryExit](t, "VoluntaryExit") testConsensusSpecType[*types.Withdrawal](t, "Withdrawal") + // Add monolith variations to the consensus types + testConsensusSpecType[*types.ExecutionPayloadMonolith](t, "ExecutionPayload", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](t, "ExecutionPayloadHeader", "bellatrix", "capella", "deneb") + // Add some API variations to test different codec implementations testConsensusSpecType[*types.ExecutionPayloadVariation](t, "ExecutionPayload", "bellatrix") testConsensusSpecType[*types.HistoricalBatchVariation](t, "HistoricalBatch") testConsensusSpecType[*types.WithdrawalVariation](t, "Withdrawal") + testConsensusSpecType[*types.AttestationVariation1](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") + testConsensusSpecType[*types.AttestationVariation2](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") + testConsensusSpecType[*types.AttestationVariation3](t, "Attestation", "altair", "bellatrix", "capella", "deneb", "eip7594", "phase0", "whisk") + testConsensusSpecType[*types.AttestationDataVariation1](t, "AttestationData") + testConsensusSpecType[*types.AttestationDataVariation2](t, "AttestationData") + testConsensusSpecType[*types.AttestationDataVariation3](t, "AttestationData") // Iterate over all the untouched tests and report them // forks, err := os.ReadDir(consensusSpecTestsRoot) @@ -312,11 +323,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // from yaml and check that too, but hex-in-yaml makes everything // beyond annoying. C'est la vie. obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to decode SSZ stream: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to re-encode SSZ stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -325,11 +336,11 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to decode SSZ buffer: %v", err) } - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkMapping[fork])) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkMapping[fork]); err != nil { t.Fatalf("failed to re-encode SSZ buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -339,14 +350,14 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, } // Encoder/decoder seems to work, check if the size reported by the // encoded object actually matches the encoded stream - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkMapping[fork]); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } - hash := ssz.HashSequential(obj) + hash := ssz.HashSequential(obj, ssz.ForkMapping[fork]) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } - hash = ssz.HashConcurrent(obj) + hash = ssz.HashConcurrent(obj, ssz.ForkMapping[fork]) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } @@ -406,7 +417,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.Fatalf("failed to parse snappy ssz binary: %v", err) } inObj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), inObj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), inObj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } // Start the benchmarks for all the different operations @@ -416,7 +427,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.EncodeToStream(io.Discard, inObj); err != nil { + if err := ssz.EncodeToStream(io.Discard, inObj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to encode SSZ stream: %v", err) } } @@ -429,7 +440,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.EncodeToBytes(blob, inObj); err != nil { + if err := ssz.EncodeToBytes(blob, inObj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to encode SSZ bytes: %v", err) } } @@ -443,7 +454,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.DecodeFromStream(r, obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(r, obj, uint32(len(inSSZ)), ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } r.Reset(inSSZ) @@ -457,14 +468,14 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } } }) b.Run(fmt.Sprintf("%s/merkleize-sequential", kind), func(b *testing.B) { obj := T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } b.SetBytes(int64(len(inSSZ))) @@ -472,12 +483,12 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - ssz.HashSequential(obj) + ssz.HashSequential(obj, ssz.ForkMapping[fork]) } }) b.Run(fmt.Sprintf("%s/merkleize-concurrent", kind), func(b *testing.B) { obj := T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) } b.SetBytes(int64(len(inSSZ))) @@ -485,7 +496,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.ResetTimer() for i := 0; i < b.N; i++ { - ssz.HashConcurrent(obj) + ssz.HashConcurrent(obj, ssz.ForkMapping[fork]) } }) } @@ -653,7 +664,7 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) f.Fatalf("failed to parse snappy ssz binary: %v", err) } obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkMapping[fork.Name()]); err == nil { // Stash away all valid ssz streams so we can play with decoding // into previously used objects valids = append(valids, inSSZ) @@ -670,11 +681,11 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) // Try the stream encoder/decoder obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { // Stream decoder succeeded, make sure it re-encodes correctly and // that the buffer decoder also succeeds parsing blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -682,27 +693,27 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded stream mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to decode buffer: %v", err) } // Sanity check that hashing and size retrieval works - hash1 := ssz.HashSequential(obj) - hash2 := ssz.HashConcurrent(obj) + hash1 := ssz.HashSequential(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } valid = true } // Try the buffer encoder/decoder obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err == nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err == nil { // Buffer decoder succeeded, make sure it re-encodes correctly and // that the stream decoder also succeeds parsing - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkFuture)) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -710,16 +721,16 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded buffer mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", bin, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):]) } - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { t.Fatalf("failed to decode stream: %v", err) } // Sanity check that hashing and size retrieval works - hash1 := ssz.HashSequential(obj) - hash2 := ssz.HashConcurrent(obj) + hash1 := ssz.HashSequential(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } } @@ -730,14 +741,14 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) // Try the stream encoder/decoder into a prepped object obj = T(new(U)) - if err := ssz.DecodeFromBytes(vSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(vSSZ, obj, ssz.ForkFuture); err != nil { panic(err) // we've already decoded this, cannot fail } - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err != nil { t.Fatalf("failed to decode stream into used object: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode stream from used object: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -745,24 +756,24 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded stream from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } - hash1 := ssz.HashSequential(obj) - hash2 := ssz.HashConcurrent(obj) + hash1 := ssz.HashSequential(obj, ssz.ForkFuture) + hash2 := ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } // Try the buffer encoder/decoder into a prepped object obj = T(new(U)) - if err := ssz.DecodeFromBytes(vSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(vSSZ, obj, ssz.ForkFuture); err != nil { panic(err) // we've already decoded this, cannot fail } - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to decode buffer into used object: %v", err) } - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkFuture)) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkFuture); err != nil { t.Fatalf("failed to re-encode buffer from used object: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -770,12 +781,12 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) t.Fatalf("re-encoded buffer from used object mismatch: have %x, want %x, common prefix %d, have left %x, want left %x", blob, inSSZ, len(prefix), bin[len(prefix):], inSSZ[len(prefix):]) } - hash1 = ssz.HashSequential(obj) - hash2 = ssz.HashConcurrent(obj) + hash1 = ssz.HashSequential(obj, ssz.ForkFuture) + hash2 = ssz.HashConcurrent(obj, ssz.ForkFuture) if hash1 != hash2 { t.Fatalf("sequential/concurrent hash mismatch: sequencial %x, concurrent %x", hash1, hash2) } - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkFuture); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } } diff --git a/tests/corner_cases_test.go b/tests/corner_cases_test.go index 9564bf4..23d1b13 100644 --- a/tests/corner_cases_test.go +++ b/tests/corner_cases_test.go @@ -19,19 +19,19 @@ import ( func TestDecodeMissized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, ssz.Size(obj)+1) - if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { + blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)+1) + if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob))); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { + if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob)), ssz.ForkUnknown); !errors.Is(err, ssz.ErrObjectSlotSizeMismatch) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, ssz.ErrObjectSlotSizeMismatch) } - blob = make([]byte, ssz.Size(obj)-1) - if err := ssz.DecodeFromBytes(blob, obj); !errors.Is(err, io.ErrUnexpectedEOF) { + blob = make([]byte, ssz.Size(obj, ssz.ForkUnknown)-1) + if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from bytes error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } - if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob))); !errors.Is(err, io.ErrUnexpectedEOF) { + if err := ssz.DecodeFromStream(bytes.NewReader(blob), obj, uint32(len(blob)), ssz.ForkUnknown); !errors.Is(err, io.ErrUnexpectedEOF) { t.Errorf("decode from stream error mismatch: have %v, want %v", err, io.ErrUnexpectedEOF) } } @@ -50,11 +50,11 @@ func (t *testMissizedType) DefineSSZ(codec *ssz.Codec) { func TestEncodeOversized(t *testing.T) { obj := new(testMissizedType) - blob := make([]byte, ssz.Size(obj)-1) - if err := ssz.EncodeToBytes(blob, obj); !errors.Is(err, ssz.ErrBufferTooSmall) { + blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)-1) + if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); !errors.Is(err, ssz.ErrBufferTooSmall) { t.Errorf("encode to bytes error mismatch: have %v, want %v", err, ssz.ErrBufferTooSmall) } - if err := ssz.EncodeToStream(&testEncodeOversizedStream{blob}, obj); err == nil { + if err := ssz.EncodeToStream(&testEncodeOversizedStream{blob}, obj, ssz.ForkUnknown); err == nil { t.Errorf("encode to stream error mismatch: have nil, want stream full") // wonky, but should be fine } } @@ -85,7 +85,7 @@ func TestZeroCounterOffset(t *testing.T) { if err != nil { panic(err) } - err = ssz.DecodeFromBytes(inSSZ, new(types.ExecutionPayload)) + err = ssz.DecodeFromBytes(inSSZ, new(types.ExecutionPayload), ssz.ForkUnknown) if !errors.Is(err, ssz.ErrZeroCounterOffset) { t.Errorf("decode error mismatch: have %v, want %v", err, ssz.ErrZeroCounterOffset) } @@ -97,7 +97,7 @@ func TestInvalidBoolean(t *testing.T) { if err != nil { panic(err) } - err = ssz.DecodeFromBytes(inSSZ, new(types.Validator)) + err = ssz.DecodeFromBytes(inSSZ, new(types.Validator), ssz.ForkUnknown) if !errors.Is(err, ssz.ErrInvalidBoolean) { t.Errorf("decode error mismatch: have %v, want %v", err, ssz.ErrInvalidBoolean) } diff --git a/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go b/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go index 62cfa00..21f4c57 100644 --- a/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_aggregate_and_proof_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *AggregateAndProof) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(8 + 4 + 96) +func (obj *AggregateAndProof) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 8 + 4 + 96 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go index 54b1e95..1629e34 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_ssz.go @@ -8,11 +8,12 @@ import "github.com/karalabe/ssz" var staticSizeCacheAttestationData = ssz.PrecomputeStaticSizeCache((*AttestationData)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *AttestationData) SizeSSZ(sizer *ssz.Sizer) uint32 { +func (obj *AttestationData) SizeSSZ(sizer *ssz.Sizer) (size uint32) { if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationData) { return staticSizeCacheAttestationData[fork] } - return 8 + 8 + 32 + ssz.Size((*Checkpoint)(nil)) + ssz.Size((*Checkpoint)(nil)) + size = 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go new file mode 100644 index 0000000..c52c0b8 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go @@ -0,0 +1,32 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationDataVariation1 = ssz.PrecomputeStaticSizeCache((*AttestationDataVariation1)(nil)) + +// SizeSSZ returns the total size of the static ssz object. +func (obj *AttestationDataVariation1) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationDataVariation1) { + return staticSizeCacheAttestationDataVariation1[fork] + } + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationDataVariation1) DefineSSZ(codec *ssz.Codec) { + if codec.Fork() >= ssz.ForkFuture { + ssz.DefineUint64(codec, &obj.Future) // Field (0) - Future - 8 bytes + } + ssz.DefineUint64(codec, &obj.Slot) // Field (1) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (2) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (3) - BeaconBlockHash - 32 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go new file mode 100644 index 0000000..9f0b981 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go @@ -0,0 +1,33 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationDataVariation2 = ssz.PrecomputeStaticSizeCache((*AttestationDataVariation2)(nil)) + +// SizeSSZ returns the total size of the static ssz object. +func (obj *AttestationDataVariation2) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationDataVariation2) { + return staticSizeCacheAttestationDataVariation2[fork] + } + size = 8 + 8 + 32 + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationDataVariation2) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes + if codec.Fork() >= ssz.ForkFuture { + ssz.DefineUint64(codec, &obj.Future) // Field (3) - Future - 8 bytes + } + ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go new file mode 100644 index 0000000..fbfb6b5 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go @@ -0,0 +1,32 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationDataVariation3 = ssz.PrecomputeStaticSizeCache((*AttestationDataVariation3)(nil)) + +// SizeSSZ returns the total size of the static ssz object. +func (obj *AttestationDataVariation3) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationDataVariation3) { + return staticSizeCacheAttestationDataVariation3[fork] + } + size = 8 + 8 + 32 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationDataVariation3) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (3) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (4) - Target - ? bytes (Checkpoint) + if codec.Fork() >= ssz.ForkFuture { + ssz.DefineUint64(codec, &obj.Future) // Field (5) - Future - 8 bytes + } +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go new file mode 100644 index 0000000..130882d --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go @@ -0,0 +1,43 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationVariation1 = ssz.PrecomputeStaticSizeCache((*AttestationVariation1)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *AttestationVariation1) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationVariation1) { + size = staticSizeCacheAttestationVariation1[fork] + } else { + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationVariation1) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + if codec.Fork() >= ssz.ForkFuture { + ssz.DefineUint64(codec, &obj.Future) // Field (0) - Future - 8 bytes + } + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (1) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (2) - Data - ? bytes (AttestationData) + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (1) - AggregationBits - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go new file mode 100644 index 0000000..96672ed --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go @@ -0,0 +1,44 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationVariation2 = ssz.PrecomputeStaticSizeCache((*AttestationVariation2)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *AttestationVariation2) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationVariation2) { + size = staticSizeCacheAttestationVariation2[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + size += 96 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationVariation2) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) + if codec.Fork() >= ssz.ForkFuture { + ssz.DefineUint64(codec, &obj.Future) // Field (2) - Future - 8 bytes + } + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go new file mode 100644 index 0000000..d7dcd07 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go @@ -0,0 +1,43 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheAttestationVariation3 = ssz.PrecomputeStaticSizeCache((*AttestationVariation3)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *AttestationVariation3) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheAttestationVariation3) { + size = staticSizeCacheAttestationVariation3[fork] + } else { + size = 4 + (*AttestationData)(nil).SizeSSZ(sizer) + 96 + if sizer.Fork() >= ssz.ForkFuture { + size += 8 + } + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfBits(sizer, obj.AggregationBits) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *AttestationVariation3) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (2) - Signature - 96 bytes + if codec.Fork() >= ssz.ForkFuture { + ssz.DefineUint64(codec, &obj.Future) // Field (3) - Future - 8 bytes + } + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go index 2100502..56c3188 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attester_slashing_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *AttesterSlashing) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(4 + 4) +func (obj *AttesterSlashing) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 4 + 4 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go index 8e6dda0..b09e670 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BeaconBlock) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(8 + 8 + 32 + 32 + 4) +func (obj *BeaconBlock) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 8 + 8 + 32 + 32 + 4 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go index 5990cd7..158c192 100644 --- a/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_deposit_ssz.go @@ -8,11 +8,12 @@ import "github.com/karalabe/ssz" var staticSizeCacheDeposit = ssz.PrecomputeStaticSizeCache((*Deposit)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *Deposit) SizeSSZ(sizer *ssz.Sizer) uint32 { +func (obj *Deposit) SizeSSZ(sizer *ssz.Sizer) (size uint32) { if fork := int(sizer.Fork()); fork < len(staticSizeCacheDeposit) { return staticSizeCacheDeposit[fork] } - return 33*32 + ssz.Size((*DepositData)(nil)) + size = 33*32 + (*DepositData)(nil).SizeSSZ(sizer) + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go index 604ce27..eef108b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_capella_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4) +func (obj *ExecutionPayloadCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go index eaf0751..f9cc135 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_deneb_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 + 8 + 8) +func (obj *ExecutionPayloadDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + 4 + 8 + 8 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go index 9ed1802..ce48a20 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_capella_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeaderCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32) +func (obj *ExecutionPayloadHeaderCapella) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go index b0f0639..a787aed 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_deneb_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeaderDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 + 8 + 8) +func (obj *ExecutionPayloadHeaderDeneb) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + 32 + 8 + 8 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go new file mode 100644 index 0000000..f391892 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -0,0 +1,57 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadHeaderMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 + if sizer.Fork() >= ssz.ForkCapella { + size += 32 + } + if sizer.Fork() >= ssz.ForkDeneb { + size += 8 + } + if sizer.Fork() >= ssz.ForkDeneb { + size += 8 + } + if fixed { + return size + } + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.ExtraData, 32) // Offset (10) - ExtraData - 4 bytes + ssz.DefineStaticBytes(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.TransactionsRoot) // Field (13) - TransactionsRoot - 32 bytes + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineStaticBytes(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes + } + if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes + } + if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes + } + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go index 153dc54..2e571e8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadHeader) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32) +func (obj *ExecutionPayloadHeader) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go new file mode 100644 index 0000000..2263ee5 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -0,0 +1,65 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 + if sizer.Fork() >= ssz.ForkCapella { + size += 4 + } + if sizer.Fork() >= ssz.ForkDeneb { + size += 8 + } + if sizer.Fork() >= ssz.ForkDeneb { + size += 8 + } + if fixed { + return size + } + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + if sizer.Fork() >= ssz.ForkCapella { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) + } + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.ExtraData, 32) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Withdrawals, 16) // Offset (14) - Withdrawals - 4 bytes + } + if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes + } + if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes + } + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Withdrawals, 16) // Field (14) - Withdrawals - ? bytes + } +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go index eac28ae..b1e1f6a 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayload) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4) +func (obj *ExecutionPayload) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go index 75bf260..147e237 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_variation_ssz.go @@ -6,8 +6,8 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *ExecutionPayloadVariation) SizeSSZ(sizer *ssz.Sizer, fixed bool) uint32 { - var size = uint32(32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4) +func (obj *ExecutionPayloadVariation) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 if fixed { return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go b/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go index 9c314b7..4cbd415 100644 --- a/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_proposer_slashing_ssz.go @@ -8,11 +8,12 @@ import "github.com/karalabe/ssz" var staticSizeCacheProposerSlashing = ssz.PrecomputeStaticSizeCache((*ProposerSlashing)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *ProposerSlashing) SizeSSZ(sizer *ssz.Sizer) uint32 { +func (obj *ProposerSlashing) SizeSSZ(sizer *ssz.Sizer) (size uint32) { if fork := int(sizer.Fork()); fork < len(staticSizeCacheProposerSlashing) { return staticSizeCacheProposerSlashing[fork] } - return ssz.Size((*SignedBeaconBlockHeader)(nil)) + ssz.Size((*SignedBeaconBlockHeader)(nil)) + size = (*SignedBeaconBlockHeader)(nil).SizeSSZ(sizer) + (*SignedBeaconBlockHeader)(nil).SizeSSZ(sizer) + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go index 02022e8..34bc309 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_beacon_block_header_ssz.go @@ -8,11 +8,12 @@ import "github.com/karalabe/ssz" var staticSizeCacheSignedBeaconBlockHeader = ssz.PrecomputeStaticSizeCache((*SignedBeaconBlockHeader)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedBeaconBlockHeader) SizeSSZ(sizer *ssz.Sizer) uint32 { +func (obj *SignedBeaconBlockHeader) SizeSSZ(sizer *ssz.Sizer) (size uint32) { if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedBeaconBlockHeader) { return staticSizeCacheSignedBeaconBlockHeader[fork] } - return ssz.Size((*BeaconBlockHeader)(nil)) + 96 + size = (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 96 + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go index 33447c8..0e114e0 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_bls_to_execution_change_ssz.go @@ -8,11 +8,12 @@ import "github.com/karalabe/ssz" var staticSizeCacheSignedBLSToExecutionChange = ssz.PrecomputeStaticSizeCache((*SignedBLSToExecutionChange)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedBLSToExecutionChange) SizeSSZ(sizer *ssz.Sizer) uint32 { +func (obj *SignedBLSToExecutionChange) SizeSSZ(sizer *ssz.Sizer) (size uint32) { if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedBLSToExecutionChange) { return staticSizeCacheSignedBLSToExecutionChange[fork] } - return ssz.Size((*BLSToExecutionChange)(nil)) + 96 + size = (*BLSToExecutionChange)(nil).SizeSSZ(sizer) + 96 + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go b/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go index b01757a..b29f8ec 100644 --- a/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_signed_voluntary_exit_ssz.go @@ -8,11 +8,12 @@ import "github.com/karalabe/ssz" var staticSizeCacheSignedVoluntaryExit = ssz.PrecomputeStaticSizeCache((*SignedVoluntaryExit)(nil)) // SizeSSZ returns the total size of the static ssz object. -func (obj *SignedVoluntaryExit) SizeSSZ(sizer *ssz.Sizer) uint32 { +func (obj *SignedVoluntaryExit) SizeSSZ(sizer *ssz.Sizer) (size uint32) { if fork := int(sizer.Fork()); fork < len(staticSizeCacheSignedVoluntaryExit) { return staticSizeCacheSignedVoluntaryExit[fork] } - return ssz.Size((*VoluntaryExit)(nil)) + 96 + size = (*VoluntaryExit)(nil).SizeSSZ(sizer) + 96 + return size } // DefineSSZ defines how an object is encoded/decoded. diff --git a/tests/testtypes/consensus-spec-tests/types_consensus.go b/tests/testtypes/consensus-spec-tests/types_consensus.go index 1edf64a..983aa7c 100644 --- a/tests/testtypes/consensus-spec-tests/types_consensus.go +++ b/tests/testtypes/consensus-spec-tests/types_consensus.go @@ -63,7 +63,7 @@ type Address [20]byte // LogsBloom is a standalone mock of go-ethereum's types.LogsBloom type LogsBloom [256]byte -// Roots is a helper type to foce a generator quirk. +// Roots is a helper type to force a generator quirk. type Roots [8192]Hash type AggregateAndProof struct { diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go new file mode 100644 index 0000000..f08c7c8 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -0,0 +1,50 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package consensus_spec_tests + +import "github.com/holiman/uint256" + +//go:generate go run ../../../cmd/sszgen -type ExecutionPayloadMonolith -out gen_execution_payload_monolith_ssz.go +//go:generate go run ../../../cmd/sszgen -type ExecutionPayloadHeaderMonolith -out gen_execution_payload_header_monolith_ssz.go + +type ExecutionPayloadMonolith struct { + ParentHash Hash + FeeRecipient Address + StateRoot Hash + ReceiptsRoot Hash + LogsBloom LogsBloom + PrevRandao Hash + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32"` + BaseFeePerGas *uint256.Int + BlockHash Hash + Transactions [][]byte `ssz-max:"1048576,1073741824"` + Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"capella"` + BlobGasUsed uint64 `ssz-fork:"deneb"` + ExcessBlobGas uint64 `ssz-fork:"deneb"` +} + +type ExecutionPayloadHeaderMonolith struct { + ParentHash [32]byte + FeeRecipient [20]byte + StateRoot [32]byte + ReceiptsRoot [32]byte + LogsBloom [256]byte + PrevRandao [32]byte + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32"` + BaseFeePerGas [32]byte + BlockHash [32]byte + TransactionsRoot [32]byte + WithdrawalRoot [32]byte `ssz-fork:"capella"` + BlobGasUsed uint64 `ssz-fork:"deneb"` + ExcessBlobGas uint64 `ssz-fork:"deneb"` +} diff --git a/tests/testtypes/consensus-spec-tests/types_variation.go b/tests/testtypes/consensus-spec-tests/types_variation.go index ef6b6e3..7823af5 100644 --- a/tests/testtypes/consensus-spec-tests/types_variation.go +++ b/tests/testtypes/consensus-spec-tests/types_variation.go @@ -6,11 +6,19 @@ package consensus_spec_tests import ( "math/big" + + "github.com/prysmaticlabs/go-bitfield" ) //go:generate go run -cover ../../../cmd/sszgen -type WithdrawalVariation -out gen_withdrawal_variation_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type HistoricalBatchVariation -out gen_historical_batch_variation_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadVariation -out gen_execution_payload_variation_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationVariation1 -out gen_attestation_variation_1_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationVariation2 -out gen_attestation_variation_2_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationVariation3 -out gen_attestation_variation_3_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationDataVariation1 -out gen_attestation_data_variation_1_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationDataVariation2 -out gen_attestation_data_variation_2_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type AttestationDataVariation3 -out gen_attestation_data_variation_3_ssz.go type WithdrawalVariation struct { Index uint64 @@ -40,3 +48,50 @@ type ExecutionPayloadVariation struct { BlockHash Hash Transactions [][]byte `ssz-max:"1048576,1073741824"` } + +// The types below test that fork constraints generate correct code for runtime +// types (i.e. static objects embedded) for various positions. + +type AttestationVariation1 struct { + Future uint64 `ssz-fork:"future"` // Currently unused field + AggregationBits bitfield.Bitlist `ssz-max:"2048"` + Data *AttestationData + Signature [96]byte +} +type AttestationVariation2 struct { + AggregationBits bitfield.Bitlist `ssz-max:"2048"` + Data *AttestationData + Future uint64 `ssz-fork:"future"` // Currently unused field + Signature [96]byte +} +type AttestationVariation3 struct { + AggregationBits bitfield.Bitlist `ssz-max:"2048"` + Data *AttestationData + Signature [96]byte + Future uint64 `ssz-fork:"future"` // Currently unused field +} + +type AttestationDataVariation1 struct { + Future uint64 `ssz-fork:"future"` // Currently unused field + Slot Slot + Index uint64 + BeaconBlockHash Hash + Source *Checkpoint + Target *Checkpoint +} +type AttestationDataVariation2 struct { + Slot Slot + Index uint64 + BeaconBlockHash Hash + Future uint64 `ssz-fork:"future"` // Currently unused field + Source *Checkpoint + Target *Checkpoint +} +type AttestationDataVariation3 struct { + Slot Slot + Index uint64 + BeaconBlockHash Hash + Source *Checkpoint + Target *Checkpoint + Future uint64 `ssz-fork:"future"` // Currently unused field +} diff --git a/genutils.go b/utils.go similarity index 77% rename from genutils.go rename to utils.go index 006c6e4..ffef378 100644 --- a/genutils.go +++ b/utils.go @@ -6,11 +6,12 @@ package ssz import "fmt" -// PrecomputeStaticSizeCache is a helper for genssz to precompute SSZ (static) -// sizes for a monolith type on different forks. +// PrecomputeStaticSizeCache is a helper to precompute SSZ (static) sizes for a +// monolith type on different forks. // // For non-monolith types that are constant across forks (or are not meant to be -// used across forks), all the sizes will be the same. +// used across forks), all the sizes will be the same so might as well hard-code +// it instead. func PrecomputeStaticSizeCache(obj Object) []uint32 { var ( sizes = make([]uint32, ForkFuture) From b70002653514960c4f00306d620ef6c1cfdc5869 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 26 Jul 2024 00:26:48 +0300 Subject: [PATCH 03/12] cmd/sszgen: monolith code generator + polishes + tests --- cmd/sszgen/gen.go | 140 ++++++++++++------ example_asymmetric_test.go | 6 +- example_checked_test.go | 2 +- example_dynamic_test.go | 4 +- example_static_test.go | 4 +- tests/consensus_specs_test.go | 51 +++++-- .../gen_beacon_block_body_monolith_ssz.go | 91 ++++++++++++ .../gen_beacon_state_altair_ssz.go | 70 +++++++++ .../gen_beacon_state_bellatrix_ssz.go | 73 +++++++++ .../gen_beacon_state_monolith_ssz.go | 125 ++++++++++++++++ ...n_execution_payload_header_monolith_ssz.go | 9 +- .../gen_execution_payload_monolith_ssz.go | 10 +- .../consensus-spec-tests/types_consensus.go | 57 +++++++ .../consensus-spec-tests/types_monoliths.go | 54 ++++++- 14 files changed, 612 insertions(+), 84 deletions(-) create mode 100644 tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go diff --git a/cmd/sszgen/gen.go b/cmd/sszgen/gen.go index 503bb7b..801d077 100644 --- a/cmd/sszgen/gen.go +++ b/cmd/sszgen/gen.go @@ -103,16 +103,20 @@ func generateStaticSizeAccumulator(w io.Writer, ctx *genContext, typ *sszContain fmt.Fprintf(w, " + ") case typ.forks[i] == "" && typ.forks[i-1] != "": fmt.Fprintf(w, "\n size += ") - case typ.forks[i] != "" && i > 0: + case typ.forks[i] != "" && i > 0 && typ.forks[i-1] != typ.forks[i]: fmt.Fprintf(w, "\n") } if typ.forks[i] != "" { - if typ.forks[i][0] == '!' { - fmt.Fprintf(w, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + if i == 0 || typ.forks[i] != typ.forks[i-1] { + if typ.forks[i][0] == '!' { + fmt.Fprintf(w, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) + } else { + fmt.Fprintf(w, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + } + fmt.Fprintf(w, " size += ") } else { - fmt.Fprintf(w, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) + fmt.Fprintf(w, " + ") } - fmt.Fprintf(w, " size += ") } switch t := typ.opsets[i].(type) { case *opsetStatic: @@ -135,7 +139,7 @@ func generateStaticSizeAccumulator(w io.Writer, ctx *genContext, typ *sszContain case *opsetDynamic: fmt.Fprintf(w, "%d", offsetBytes) } - if typ.forks[i] != "" { + if typ.forks[i] != "" && (i == len(typ.forks)-1 || typ.forks[i] != typ.forks[i+1]) { fmt.Fprintf(w, "\n }") } } @@ -227,23 +231,35 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " if (fixed) {\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, " }\n") - for i := range typ.opsets { - if opset, ok := typ.opsets[i].(*opsetDynamic); ok { - if typ.forks[i] != "" { - if typ.forks[i][0] == '!' { - fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) - } else { - fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) - } - } - call := generateCall(opset.size, "sizer", "obj."+typ.fields[i]) - fmt.Fprintf(&b, " size += ssz.%s\n", call) - if typ.forks[i] != "" { - fmt.Fprintf(&b, " }\n") + var ( + dynFields []string + dynOpsets []opset + dynForks []string + ) + for i := 0; i < len(typ.fields); i++ { + if _, ok := (typ.opsets[i]).(*opsetDynamic); ok { + dynFields = append(dynFields, typ.fields[i]) + dynOpsets = append(dynOpsets, typ.opsets[i]) + dynForks = append(dynForks, typ.forks[i]) + } + } + for i := range dynFields { + if dynForks[i] != "" && (i == 0 || dynForks[i] != dynForks[i-1]) { + if dynForks[i][0] == '!' { + fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", dynForks[i][1:]) + } else { + fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", dynForks[i]) } } + call := generateCall(dynOpsets[i].(*opsetDynamic).size, "sizer", "obj."+dynFields[i]) + fmt.Fprintf(&b, " size += ssz.%s\n", call) + if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { + fmt.Fprintf(&b, " }\n") + } + } + if dynForks[len(dynForks)-1] == "" { + fmt.Fprintf(&b, "\n") } - fmt.Fprintf(&b, "\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, "}\n") } else { @@ -253,23 +269,36 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " if (fixed) {\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, " }\n") - for i := range typ.opsets { - if opset, ok := typ.opsets[i].(*opsetDynamic); ok { - if typ.forks[i] != "" { - if typ.forks[i][0] == '!' { - fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) - } else { - fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", typ.forks[i]) - } - } - call := generateCall(opset.size, "sizer", "obj."+typ.fields[i]) - fmt.Fprintf(&b, " size += ssz.%s\n", call) - if typ.forks[i] != "" { - fmt.Fprintf(&b, " }\n") + + var ( + dynFields []string + dynOpsets []opset + dynForks []string + ) + for i := 0; i < len(typ.fields); i++ { + if _, ok := (typ.opsets[i]).(*opsetDynamic); ok { + dynFields = append(dynFields, typ.fields[i]) + dynOpsets = append(dynOpsets, typ.opsets[i]) + dynForks = append(dynForks, typ.forks[i]) + } + } + for i := range dynFields { + if dynForks[i] != "" && (i == 0 || dynForks[i] != dynForks[i-1]) { + if dynForks[i][0] == '!' { + fmt.Fprintf(&b, " if sizer.Fork() < ssz.Fork%s {\n", dynForks[i][1:]) + } else { + fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", dynForks[i]) } } + call := generateCall(dynOpsets[i].(*opsetDynamic).size, "sizer", "obj."+dynFields[i]) + fmt.Fprintf(&b, " size += ssz.%s\n", call) + if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { + fmt.Fprintf(&b, " }\n") + } + } + if dynForks[len(dynForks)-1] == "" { + fmt.Fprintf(&b, "\n") } - fmt.Fprintf(&b, "\n") fmt.Fprintf(&b, " return size\n") fmt.Fprintf(&b, "}\n") } @@ -314,7 +343,7 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprint(&b, " // Define the static data (fields and dynamic offsets)\n") } for i := 0; i < len(typ.fields); i++ { - if typ.forks[i] != "" { + if typ.forks[i] != "" && (i == 0 || typ.forks[i] != typ.forks[i-1]) { if typ.forks[i][0] == '!' { fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) } else { @@ -338,28 +367,41 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { call := generateCall(opset.defineOffset, "codec", "obj."+field, opset.limits...) fmt.Fprintf(&b, " ssz.%s // Offset ("+indexRule+") - "+nameRule+" - %"+sizeRule+"d bytes\n", call, i, field, offsetBytes) } - if typ.forks[i] != "" { + if typ.forks[i] != "" && (i == len(typ.forks)-1 || typ.forks[i] != typ.forks[i+1]) { fmt.Fprintf(&b, " }\n") } } if !typ.static { fmt.Fprint(&b, "\n // Define the dynamic data (fields)\n") + var ( + dynIndices []int + dynFields []string + dynOpsets []opset + dynForks []string + ) for i := 0; i < len(typ.fields); i++ { - field := typ.fields[i] - if opset, ok := (typ.opsets[i]).(*opsetDynamic); ok { - if typ.forks[i] != "" { - if typ.forks[i][0] == '!' { - fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) - } else { - fmt.Fprintf(&b, " if codec.Fork() >= ssz.Fork%s {\n", typ.forks[i]) - } - } - call := generateCall(opset.defineContent, "codec", "obj."+field, opset.limits...) - fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - ? bytes\n", call, i, field) - if typ.forks[i] != "" { - fmt.Fprintf(&b, " }\n") + if _, ok := (typ.opsets[i]).(*opsetDynamic); ok { + dynIndices = append(dynIndices, i) + dynFields = append(dynFields, typ.fields[i]) + dynOpsets = append(dynOpsets, typ.opsets[i]) + dynForks = append(dynForks, typ.forks[i]) + } + } + for i := 0; i < len(dynFields); i++ { + opset := (dynOpsets[i]).(*opsetDynamic) + + if dynForks[i] != "" && (i == 0 || dynForks[i] != dynForks[i-1]) { + if dynForks[i][0] == '!' { + fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", dynForks[i][1:]) + } else { + fmt.Fprintf(&b, " if codec.Fork() >= ssz.Fork%s {\n", dynForks[i]) } } + call := generateCall(opset.defineContent, "codec", "obj."+dynFields[i], opset.limits...) + fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - ? bytes\n", call, dynIndices[i], dynFields[i]) + if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { + fmt.Fprintf(&b, " }\n") + } } } fmt.Fprint(&b, "}\n") diff --git a/example_asymmetric_test.go b/example_asymmetric_test.go index 368db8b..d50ccf8 100644 --- a/example_asymmetric_test.go +++ b/example_asymmetric_test.go @@ -41,11 +41,11 @@ func (w *WithdrawalAsym) DefineSSZ(codec *ssz.Codec) { } func ExampleEncodeAsymmetricObject() { - blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil))) - if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym)); err != nil { + blob := make([]byte, ssz.Size((*WithdrawalAsym)(nil), ssz.ForkUnknown)) + if err := ssz.EncodeToBytes(blob, new(WithdrawalAsym), ssz.ForkUnknown); err != nil { panic(err) } - hash := ssz.HashSequential(new(WithdrawalAsym)) + hash := ssz.HashSequential(new(WithdrawalAsym), ssz.ForkUnknown) fmt.Printf("ssz: %#x\nhash: %#x\n", blob, hash) // Output: diff --git a/example_checked_test.go b/example_checked_test.go index 41d9e16..bca5f0f 100644 --- a/example_checked_test.go +++ b/example_checked_test.go @@ -30,7 +30,7 @@ func ExampleDecodeCheckedObject() { blob := make([]byte, 44) obj := new(WithdrawalChecked) - if err := ssz.DecodeFromBytes(blob, obj); err != nil { + if err := ssz.DecodeFromBytes(blob, obj, ssz.ForkUnknown); err != nil { panic(err) } fmt.Printf("obj: %#x\n", obj) diff --git a/example_dynamic_test.go b/example_dynamic_test.go index 86fd3e7..1f244bc 100644 --- a/example_dynamic_test.go +++ b/example_dynamic_test.go @@ -72,8 +72,8 @@ func (e *ExecutionPayload) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeDynamicObject() { obj := new(ExecutionPayload) - blob := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(blob, obj); err != nil { + blob := make([]byte, ssz.Size(obj, ssz.ForkUnknown)) + if err := ssz.EncodeToBytes(blob, obj, ssz.ForkUnknown); err != nil { panic(err) } fmt.Printf("ssz: %#x\n", blob) diff --git a/example_static_test.go b/example_static_test.go index 70cbc08..4486222 100644 --- a/example_static_test.go +++ b/example_static_test.go @@ -31,10 +31,10 @@ func (w *Withdrawal) DefineSSZ(codec *ssz.Codec) { func ExampleEncodeStaticObject() { out := new(bytes.Buffer) - if err := ssz.EncodeToStream(out, new(Withdrawal)); err != nil { + if err := ssz.EncodeToStream(out, new(Withdrawal), ssz.ForkUnknown); err != nil { panic(err) } - hash := ssz.HashSequential(new(Withdrawal)) + hash := ssz.HashSequential(new(Withdrawal), ssz.ForkUnknown) fmt.Printf("ssz: %#x\nhash: %#x\n", out, hash) // Output: diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index b176eb2..929b7a1 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -11,6 +11,7 @@ import ( "math/rand" "os" "path/filepath" + "reflect" "strings" "sync" "testing" @@ -190,6 +191,7 @@ func TestConsensusSpecs(t *testing.T) { testConsensusSpecType[*types.BeaconBlockBodyDeneb](t, "BeaconBlockBody", "deneb", "eip7594") testConsensusSpecType[*types.BeaconBlockHeader](t, "BeaconBlockHeader") testConsensusSpecType[*types.BeaconState](t, "BeaconState", "phase0") + testConsensusSpecType[*types.BeaconStateAltair](t, "BeaconState", "altair") testConsensusSpecType[*types.BeaconStateCapella](t, "BeaconState", "capella") testConsensusSpecType[*types.BeaconStateDeneb](t, "BeaconState", "deneb") testConsensusSpecType[*types.BLSToExecutionChange](t, "BLSToExecutionChange") @@ -221,6 +223,8 @@ func TestConsensusSpecs(t *testing.T) { testConsensusSpecType[*types.Withdrawal](t, "Withdrawal") // Add monolith variations to the consensus types + testConsensusSpecType[*types.BeaconBlockBodyMonolith](t, "BeaconBlockBody", "phase0", "altair", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.BeaconStateMonolith](t, "BeaconState", "phase0", "altair", "bellatrix", "capella", "deneb") testConsensusSpecType[*types.ExecutionPayloadMonolith](t, "ExecutionPayload", "bellatrix", "capella", "deneb") testConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](t, "ExecutionPayloadHeader", "bellatrix", "capella", "deneb") @@ -369,16 +373,16 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // BenchmarkConsensusSpecs iterates over all the (supported) consensus SSZ types and // runs the encoding/decoding/hashing benchmark round. func BenchmarkConsensusSpecs(b *testing.B) { - benchmarkConsensusSpecType[*types.ExecutionPayloadVariation](b, "bellatrix", "ExecutionPayload") - benchmarkConsensusSpecType[*types.AggregateAndProof](b, "deneb", "AggregateAndProof") benchmarkConsensusSpecType[*types.Attestation](b, "deneb", "Attestation") benchmarkConsensusSpecType[*types.AttestationData](b, "deneb", "AttestationData") benchmarkConsensusSpecType[*types.AttesterSlashing](b, "deneb", "AttesterSlashing") benchmarkConsensusSpecType[*types.BeaconBlock](b, "phase0", "BeaconBlock") benchmarkConsensusSpecType[*types.BeaconBlockBodyDeneb](b, "deneb", "BeaconBlockBody") + benchmarkConsensusSpecType[*types.BeaconBlockBodyMonolith](b, "deneb", "BeaconBlockBody") benchmarkConsensusSpecType[*types.BeaconBlockHeader](b, "deneb", "BeaconBlockHeader") - benchmarkConsensusSpecType[*types.BeaconState](b, "phase0", "BeaconState") + benchmarkConsensusSpecType[*types.BeaconStateDeneb](b, "deneb", "BeaconState") + benchmarkConsensusSpecType[*types.BeaconStateMonolith](b, "deneb", "BeaconState") benchmarkConsensusSpecType[*types.BLSToExecutionChange](b, "deneb", "BLSToExecutionChange") benchmarkConsensusSpecType[*types.Checkpoint](b, "deneb", "Checkpoint") benchmarkConsensusSpecType[*types.Deposit](b, "deneb", "Deposit") @@ -387,7 +391,9 @@ func BenchmarkConsensusSpecs(b *testing.B) { benchmarkConsensusSpecType[*types.Eth1Block](b, "deneb", "Eth1Block") benchmarkConsensusSpecType[*types.Eth1Data](b, "deneb", "Eth1Data") benchmarkConsensusSpecType[*types.ExecutionPayloadDeneb](b, "deneb", "ExecutionPayload") + benchmarkConsensusSpecType[*types.ExecutionPayloadMonolith](b, "deneb", "ExecutionPayload") benchmarkConsensusSpecType[*types.ExecutionPayloadHeaderDeneb](b, "deneb", "ExecutionPayloadHeader") + benchmarkConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](b, "deneb", "ExecutionPayloadHeader") benchmarkConsensusSpecType[*types.Fork](b, "deneb", "Fork") benchmarkConsensusSpecType[*types.HistoricalBatch](b, "deneb", "HistoricalBatch") benchmarkConsensusSpecType[*types.HistoricalSummary](b, "deneb", "HistoricalSummary") @@ -421,7 +427,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k b.Fatalf("failed to decode SSZ stream: %v", err) } // Start the benchmarks for all the different operations - b.Run(fmt.Sprintf("%s/encode-stream", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/encode-stream", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { b.SetBytes(int64(len(inSSZ))) b.ReportAllocs() b.ResetTimer() @@ -432,7 +438,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k } } }) - b.Run(fmt.Sprintf("%s/encode-buffer", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/encode-buffer", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { blob := make([]byte, len(inSSZ)) b.SetBytes(int64(len(inSSZ))) @@ -445,7 +451,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k } } }) - b.Run(fmt.Sprintf("%s/decode-stream", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/decode-stream", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) r := bytes.NewReader(inSSZ) @@ -460,7 +466,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k r.Reset(inSSZ) } }) - b.Run(fmt.Sprintf("%s/decode-buffer", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/decode-buffer", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) b.SetBytes(int64(len(inSSZ))) @@ -473,7 +479,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k } } }) - b.Run(fmt.Sprintf("%s/merkleize-sequential", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/merkleize-sequential", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) @@ -486,7 +492,7 @@ func benchmarkConsensusSpecType[T newableObject[U], U any](b *testing.B, fork, k ssz.HashSequential(obj, ssz.ForkMapping[fork]) } }) - b.Run(fmt.Sprintf("%s/merkleize-concurrent", kind), func(b *testing.B) { + b.Run(fmt.Sprintf("%s/merkleize-concurrent", reflect.TypeOf(inObj).Elem().Name()), func(b *testing.B) { obj := T(new(U)) if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkMapping[fork]); err != nil { b.Fatalf("failed to decode SSZ stream: %v", err) @@ -542,6 +548,18 @@ func FuzzConsensusSpecsBeaconBlockHeader(f *testing.F) { func FuzzConsensusSpecsBeaconState(f *testing.F) { fuzzConsensusSpecType[*types.BeaconState](f, "BeaconState") } +func FuzzConsensusSpecsBeaconStateAltair(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateAltair](f, "BeaconState") +} +func FuzzConsensusSpecsBeaconStateBellatrix(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateBellatrix](f, "BeaconState") +} +func FuzzConsensusSpecsBeaconStateCapella(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateCapella](f, "BeaconState") +} +func FuzzConsensusSpecsBeaconStateDeneb(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateDeneb](f, "BeaconState") +} func FuzzConsensusSpecsBLSToExecutionChange(f *testing.F) { fuzzConsensusSpecType[*types.BLSToExecutionChange](f, "BLSToExecutionChange") } @@ -624,6 +642,19 @@ func FuzzConsensusSpecsWithdrawal(f *testing.F) { fuzzConsensusSpecType[*types.Withdrawal](f, "Withdrawal") } +func FuzzConsensusSpecsBeaconBlockBodyMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconBlockBodyMonolith](f, "BeaconBlockBody") +} +func FuzzConsensusSpecsBeaconStateMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.BeaconStateMonolith](f, "BeaconState") +} +func FuzzConsensusSpecsExecutionPayloadMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.ExecutionPayloadMonolith](f, "ExecutionPayload") +} +func FuzzConsensusSpecsExecutionPayloadHeaderMonolith(f *testing.F) { + fuzzConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](f, "ExecutionPayloadHeader") +} + func FuzzConsensusSpecsExecutionPayloadVariation(f *testing.F) { fuzzConsensusSpecType[*types.ExecutionPayloadVariation](f, "ExecutionPayload") } @@ -664,7 +695,7 @@ func fuzzConsensusSpecType[T newableObject[U], U any](f *testing.F, kind string) f.Fatalf("failed to parse snappy ssz binary: %v", err) } obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkMapping[fork.Name()]); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkFuture); err == nil { // Stash away all valid ssz streams so we can play with decoding // into previously used objects valids = append(valids, inSSZ) diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go new file mode 100644 index 0000000..10ad204 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go @@ -0,0 +1,91 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconBlockBodyMonolith = ssz.PrecomputeStaticSizeCache((*BeaconBlockBodyMonolith)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconBlockBodyMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconBlockBodyMonolith) { + size = staticSizeCacheBeaconBlockBodyMonolith[fork] + } else { + size = 96 + (*Eth1Data)(nil).SizeSSZ(sizer) + 32 + 4 + 4 + 4 + 4 + 4 + if sizer.Fork() >= ssz.ForkAltair { + size += (*SyncAggregate)(nil).SizeSSZ(sizer) + } + if sizer.Fork() >= ssz.ForkBellatrix { + size += 4 + } + if sizer.Fork() >= ssz.ForkCapella { + size += 4 + } + if sizer.Fork() >= ssz.ForkDeneb { + size += 4 + } + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticObjects(sizer, obj.ProposerSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.AttesterSlashings) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.Attestations) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Deposits) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.VoluntaryExits) + if sizer.Fork() >= ssz.ForkBellatrix { + size += ssz.SizeDynamicObject(sizer, obj.ExecutionPayload) + } + if sizer.Fork() >= ssz.ForkCapella { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.BlsToExecutionChanges) + } + if sizer.Fork() >= ssz.ForkDeneb { + size += ssz.SizeSliceOfStaticBytes(sizer, obj.BlobKzgCommitments) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconBlockBodyMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.RandaoReveal) // Field ( 0) - RandaoReveal - 96 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 1) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineStaticBytes(codec, &obj.Graffiti) // Field ( 2) - Graffiti - 32 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.ProposerSlashings, 16) // Offset ( 3) - ProposerSlashings - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.AttesterSlashings, 2) // Offset ( 4) - AttesterSlashings - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.Attestations, 128) // Offset ( 5) - Attestations - 4 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Deposits, 16) // Offset ( 6) - Deposits - 4 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.VoluntaryExits, 16) // Offset ( 7) - VoluntaryExits - 4 bytes + if codec.Fork() >= ssz.ForkAltair { + ssz.DefineStaticObject(codec, &obj.SyncAggregate) // Field ( 8) - SyncAggregate - ? bytes (SyncAggregate) + } + if codec.Fork() >= ssz.ForkBellatrix { + ssz.DefineDynamicObjectOffset(codec, &obj.ExecutionPayload) // Offset ( 9) - ExecutionPayload - 4 bytes + } + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.BlsToExecutionChanges, 16) // Offset (10) - BlsToExecutionChanges - 4 bytes + } + if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.BlobKzgCommitments, 4096) // Offset (11) - BlobKzgCommitments - 4 bytes + } + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.ProposerSlashings, 16) // Field ( 3) - ProposerSlashings - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.AttesterSlashings, 2) // Field ( 4) - AttesterSlashings - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.Attestations, 128) // Field ( 5) - Attestations - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Deposits, 16) // Field ( 6) - Deposits - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.VoluntaryExits, 16) // Field ( 7) - VoluntaryExits - ? bytes + if codec.Fork() >= ssz.ForkBellatrix { + ssz.DefineDynamicObjectContent(codec, &obj.ExecutionPayload) // Field ( 9) - ExecutionPayload - ? bytes + } + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.BlsToExecutionChanges, 16) // Field (10) - BlsToExecutionChanges - ? bytes + } + if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineSliceOfStaticBytesContent(codec, &obj.BlobKzgCommitments, 4096) // Field (11) - BlobKzgCommitments - ? bytes + } +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go new file mode 100644 index 0000000..032848d --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_altair_ssz.go @@ -0,0 +1,70 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateAltair = ssz.PrecomputeStaticSizeCache((*BeaconStateAltair)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateAltair) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateAltair) { + size = staticSizeCacheBeaconStateAltair[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateAltair) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineCheckedStaticBytes(codec, &obj.GenesisValidatorsRoot, 32) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (21) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go new file mode 100644 index 0000000..88cd65e --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_bellatrix_ssz.go @@ -0,0 +1,73 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateBellatrix = ssz.PrecomputeStaticSizeCache((*BeaconStateBellatrix)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateBellatrix) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateBellatrix) { + size = staticSizeCacheBeaconStateBellatrix[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + 4 + 4 + 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + 4 + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateBellatrix) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (15) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (16) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (17) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (18) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (19) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (20) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (21) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (22) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (23) - NextSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (24) - LatestExecutionPayloadHeader - 4 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (15) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (16) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (21) - InactivityScores - ? bytes + ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (24) - LatestExecutionPayloadHeader - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go new file mode 100644 index 0000000..2d46dff --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -0,0 +1,125 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// Cached static size computed on package init. +var staticSizeCacheBeaconStateMonolith = ssz.PrecomputeStaticSizeCache((*BeaconStateMonolith)(nil)) + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BeaconStateMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + // Load static size if already precomputed, calculate otherwise + if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateMonolith) { + size = staticSizeCacheBeaconStateMonolith[fork] + } else { + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + if sizer.Fork() < ssz.ForkAltair { + size += 4 + 4 + } + if sizer.Fork() >= ssz.ForkAltair { + size += 4 + 4 + } + size += 1 + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + (*Checkpoint)(nil).SizeSSZ(sizer) + if sizer.Fork() >= ssz.ForkAltair { + size += 4 + (*SyncCommittee)(nil).SizeSSZ(sizer) + (*SyncCommittee)(nil).SizeSSZ(sizer) + } + if sizer.Fork() >= ssz.ForkBellatrix { + size += 4 + } + if sizer.Fork() >= ssz.ForkCapella { + size += 8 + 8 + 4 + } + } + // Either return the static size or accumulate the dynamic too + if fixed { + return size + } + size += ssz.SizeSliceOfStaticBytes(sizer, obj.HistoricalRoots) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Eth1DataVotes) + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Validators) + size += ssz.SizeSliceOfUint64s(sizer, obj.Balances) + if sizer.Fork() < ssz.ForkAltair { + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.PreviousEpochAttestations) + size += ssz.SizeSliceOfDynamicObjects(sizer, obj.CurrentEpochAttestations) + } + if sizer.Fork() >= ssz.ForkAltair { + size += ssz.SizeDynamicBytes(sizer, obj.PreviousEpochParticipation) + size += ssz.SizeDynamicBytes(sizer, obj.CurrentEpochParticipation) + size += ssz.SizeSliceOfUint64s(sizer, obj.InactivityScores) + } + if sizer.Fork() >= ssz.ForkBellatrix { + size += ssz.SizeDynamicObject(sizer, obj.LatestExecutionPayloadHeader) + } + if sizer.Fork() >= ssz.ForkCapella { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.HistoricalSummaries) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + if codec.Fork() < ssz.ForkAltair { + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.PreviousEpochAttestations, 4096) // Offset (15) - PreviousEpochAttestations - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.CurrentEpochAttestations, 4096) // Offset (16) - CurrentEpochAttestations - 4 bytes + } + if codec.Fork() >= ssz.ForkAltair { + ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (17) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (18) - CurrentEpochParticipation - 4 bytes + } + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (19) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (20) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (21) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (22) - FinalizedCheckpoint - ? bytes (Checkpoint) + if codec.Fork() >= ssz.ForkAltair { + ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (23) - InactivityScores - 4 bytes + ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (24) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (25) - NextSyncCommittee - ? bytes (SyncCommittee) + } + if codec.Fork() >= ssz.ForkBellatrix { + ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (26) - LatestExecutionPayloadHeader - 4 bytes + } + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineUint64(codec, &obj.NextWithdrawalIndex) // Field (27) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (29) - HistoricalSummaries - 4 bytes + } + + // Define the dynamic data (fields) + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + if codec.Fork() < ssz.ForkAltair { + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.PreviousEpochAttestations, 4096) // Field (15) - PreviousEpochAttestations - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.CurrentEpochAttestations, 4096) // Field (16) - CurrentEpochAttestations - ? bytes + } + if codec.Fork() >= ssz.ForkAltair { + ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (17) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (18) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (23) - InactivityScores - ? bytes + } + if codec.Fork() >= ssz.ForkBellatrix { + ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (26) - LatestExecutionPayloadHeader - ? bytes + } + if codec.Fork() >= ssz.ForkCapella { + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.HistoricalSummaries, 16777216) // Field (29) - HistoricalSummaries - ? bytes + } +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go index f391892..632898d 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -12,10 +12,7 @@ func (obj *ExecutionPayloadHeaderMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) size += 32 } if sizer.Fork() >= ssz.ForkDeneb { - size += 8 - } - if sizer.Fork() >= ssz.ForkDeneb { - size += 8 + size += 8 + 8 } if fixed { return size @@ -46,9 +43,7 @@ func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineStaticBytes(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes } if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - } - if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index 2263ee5..f62f00f 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -12,10 +12,7 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size size += 4 } if sizer.Fork() >= ssz.ForkDeneb { - size += 8 - } - if sizer.Fork() >= ssz.ForkDeneb { - size += 8 + size += 8 + 8 } if fixed { return size @@ -25,7 +22,6 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size if sizer.Fork() >= ssz.ForkCapella { size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) } - return size } @@ -50,9 +46,7 @@ func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Withdrawals, 16) // Offset (14) - Withdrawals - 4 bytes } if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - } - if codec.Fork() >= ssz.ForkDeneb { + ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } diff --git a/tests/testtypes/consensus-spec-tests/types_consensus.go b/tests/testtypes/consensus-spec-tests/types_consensus.go index 983aa7c..908b69d 100644 --- a/tests/testtypes/consensus-spec-tests/types_consensus.go +++ b/tests/testtypes/consensus-spec-tests/types_consensus.go @@ -42,6 +42,8 @@ import ( //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadDeneb -out gen_execution_payload_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadHeaderDeneb -out gen_execution_payload_header_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconState -out gen_beacon_state_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateAltair -out gen_beacon_state_altair_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateBellatrix -out gen_beacon_state_bellatrix_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconStateCapella -out gen_beacon_state_capella_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconStateDeneb -out gen_beacon_state_deneb_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBody -out gen_beacon_block_body_ssz.go @@ -196,6 +198,61 @@ type BeaconState struct { FinalizedCheckpoint *Checkpoint } +type BeaconStateAltair struct { + GenesisTime uint64 + GenesisValidatorsRoot []byte `ssz-size:"32"` + Slot uint64 + Fork *Fork + LatestBlockHeader *BeaconBlockHeader + BlockRoots [8192][32]byte + StateRoots [8192][32]byte + HistoricalRoots [][32]byte `ssz-max:"16777216"` + Eth1Data *Eth1Data + Eth1DataVotes []*Eth1Data `ssz-max:"2048"` + Eth1DepositIndex uint64 + Validators []*Validator `ssz-max:"1099511627776"` + Balances []uint64 `ssz-max:"1099511627776"` + RandaoMixes [65536][32]byte + Slashings [8192]uint64 + PreviousEpochParticipation []byte `ssz-max:"1099511627776"` + CurrentEpochParticipation []byte `ssz-max:"1099511627776"` + JustificationBits [1]byte `ssz-size:"4" ssz:"bits"` + PreviousJustifiedCheckpoint *Checkpoint + CurrentJustifiedCheckpoint *Checkpoint + FinalizedCheckpoint *Checkpoint + InactivityScores []uint64 `ssz-max:"1099511627776"` + CurrentSyncCommittee *SyncCommittee + NextSyncCommittee *SyncCommittee +} + +type BeaconStateBellatrix struct { + GenesisTime uint64 + GenesisValidatorsRoot [32]byte + Slot uint64 + Fork *Fork + LatestBlockHeader *BeaconBlockHeader + BlockRoots [8192][32]byte + StateRoots [8192][32]byte + HistoricalRoots [][32]byte `ssz-max:"16777216"` + Eth1Data *Eth1Data + Eth1DataVotes []*Eth1Data `ssz-max:"2048"` + Eth1DepositIndex uint64 + Validators []*Validator `ssz-max:"1099511627776"` + Balances []uint64 `ssz-max:"1099511627776"` + RandaoMixes [65536][32]byte + Slashings [8192]uint64 + PreviousEpochParticipation []byte `ssz-max:"1099511627776"` + CurrentEpochParticipation []byte `ssz-max:"1099511627776"` + JustificationBits [1]byte `ssz-size:"4" ssz:"bits"` + PreviousJustifiedCheckpoint *Checkpoint + CurrentJustifiedCheckpoint *Checkpoint + FinalizedCheckpoint *Checkpoint + InactivityScores []uint64 `ssz-max:"1099511627776"` + CurrentSyncCommittee *SyncCommittee + NextSyncCommittee *SyncCommittee + LatestExecutionPayloadHeader *ExecutionPayloadHeader +} + type BeaconStateCapella struct { GenesisTime uint64 GenesisValidatorsRoot [32]byte diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go index f08c7c8..c9ab5f0 100644 --- a/tests/testtypes/consensus-spec-tests/types_monoliths.go +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -8,6 +8,56 @@ import "github.com/holiman/uint256" //go:generate go run ../../../cmd/sszgen -type ExecutionPayloadMonolith -out gen_execution_payload_monolith_ssz.go //go:generate go run ../../../cmd/sszgen -type ExecutionPayloadHeaderMonolith -out gen_execution_payload_header_monolith_ssz.go +//go:generate go run ../../../cmd/sszgen -type BeaconBlockBodyMonolith -out gen_beacon_block_body_monolith_ssz.go +//go:generate go run ../../../cmd/sszgen -type BeaconStateMonolith -out gen_beacon_state_monolith_ssz.go + +type BeaconBlockBodyMonolith struct { + RandaoReveal [96]byte + Eth1Data *Eth1Data + Graffiti [32]byte + ProposerSlashings []*ProposerSlashing `ssz-max:"16"` + AttesterSlashings []*AttesterSlashing `ssz-max:"2"` + Attestations []*Attestation `ssz-max:"128"` + Deposits []*Deposit `ssz-max:"16"` + VoluntaryExits []*SignedVoluntaryExit `ssz-max:"16"` + SyncAggregate *SyncAggregate ` ssz-fork:"altair"` + ExecutionPayload *ExecutionPayloadMonolith ` ssz-fork:"bellatrix"` + BlsToExecutionChanges []*SignedBLSToExecutionChange `ssz-max:"16" ssz-fork:"capella"` + BlobKzgCommitments [][48]byte `ssz-max:"4096" ssz-fork:"deneb"` +} + +type BeaconStateMonolith struct { + GenesisTime uint64 + GenesisValidatorsRoot [32]byte + Slot uint64 + Fork *Fork + LatestBlockHeader *BeaconBlockHeader + BlockRoots [8192][32]byte + StateRoots [8192][32]byte + HistoricalRoots [][32]byte `ssz-max:"16777216"` + Eth1Data *Eth1Data + Eth1DataVotes []*Eth1Data `ssz-max:"2048"` + Eth1DepositIndex uint64 + Validators []*Validator `ssz-max:"1099511627776"` + Balances []uint64 `ssz-max:"1099511627776"` + RandaoMixes [65536][32]byte + Slashings [8192]uint64 + PreviousEpochAttestations []*PendingAttestation `ssz-max:"4096" ssz-fork:"!altair"` + CurrentEpochAttestations []*PendingAttestation `ssz-max:"4096" ssz-fork:"!altair"` + PreviousEpochParticipation []byte `ssz-max:"1099511627776" ssz-fork:"altair"` + CurrentEpochParticipation []byte `ssz-max:"1099511627776" ssz-fork:"altair"` + JustificationBits [1]byte `ssz-size:"4" ssz:"bits"` + PreviousJustifiedCheckpoint *Checkpoint + CurrentJustifiedCheckpoint *Checkpoint + FinalizedCheckpoint *Checkpoint + InactivityScores []uint64 `ssz-max:"1099511627776" ssz-fork:"altair"` + CurrentSyncCommittee *SyncCommittee ` ssz-fork:"altair"` + NextSyncCommittee *SyncCommittee ` ssz-fork:"altair"` + LatestExecutionPayloadHeader *ExecutionPayloadHeaderMonolith ` ssz-fork:"bellatrix"` + NextWithdrawalIndex uint64 ` ssz-fork:"capella"` + NextWithdrawalValidatorIndex uint64 ` ssz-fork:"capella"` + HistoricalSummaries []*HistoricalSummary `ssz-max:"16777216" ssz-fork:"capella"` +} type ExecutionPayloadMonolith struct { ParentHash Hash @@ -25,8 +75,8 @@ type ExecutionPayloadMonolith struct { BlockHash Hash Transactions [][]byte `ssz-max:"1048576,1073741824"` Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"capella"` - BlobGasUsed uint64 `ssz-fork:"deneb"` - ExcessBlobGas uint64 `ssz-fork:"deneb"` + BlobGasUsed uint64 ` ssz-fork:"deneb"` + ExcessBlobGas uint64 ` ssz-fork:"deneb"` } type ExecutionPayloadHeaderMonolith struct { From 7b4776e6bd18a5a33e896ea1af2d190e23d612ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 26 Jul 2024 00:31:05 +0300 Subject: [PATCH 04/12] cmd/sszgen: fix a formatting error --- cmd/sszgen/gen.go | 5 ++++- .../consensus-spec-tests/gen_attestation_variation_3_ssz.go | 1 - .../gen_beacon_block_body_monolith_ssz.go | 1 - .../consensus-spec-tests/gen_beacon_state_monolith_ssz.go | 1 - .../gen_execution_payload_header_monolith_ssz.go | 1 - .../gen_execution_payload_monolith_ssz.go | 1 - 6 files changed, 4 insertions(+), 6 deletions(-) diff --git a/cmd/sszgen/gen.go b/cmd/sszgen/gen.go index 801d077..2e90303 100644 --- a/cmd/sszgen/gen.go +++ b/cmd/sszgen/gen.go @@ -372,7 +372,10 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { } } if !typ.static { - fmt.Fprint(&b, "\n // Define the dynamic data (fields)\n") + if typ.forks[len(typ.forks)-1] == "" { + fmt.Fprint(&b, "\n") + } + fmt.Fprint(&b, " // Define the dynamic data (fields)\n") var ( dynIndices []int dynFields []string diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go index d7dcd07..0681689 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go @@ -37,7 +37,6 @@ func (obj *AttestationVariation3) DefineSSZ(codec *ssz.Codec) { if codec.Fork() >= ssz.ForkFuture { ssz.DefineUint64(codec, &obj.Future) // Field (3) - Future - 8 bytes } - // Define the dynamic data (fields) ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go index 10ad204..c3f76cf 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go @@ -72,7 +72,6 @@ func (obj *BeaconBlockBodyMonolith) DefineSSZ(codec *ssz.Codec) { if codec.Fork() >= ssz.ForkDeneb { ssz.DefineSliceOfStaticBytesOffset(codec, &obj.BlobKzgCommitments, 4096) // Offset (11) - BlobKzgCommitments - 4 bytes } - // Define the dynamic data (fields) ssz.DefineSliceOfStaticObjectsContent(codec, &obj.ProposerSlashings, 16) // Field ( 3) - ProposerSlashings - ? bytes ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.AttesterSlashings, 2) // Field ( 4) - AttesterSlashings - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go index 2d46dff..4026f75 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -101,7 +101,6 @@ func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (29) - HistoricalSummaries - 4 bytes } - // Define the dynamic data (fields) ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go index 632898d..a8626e8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -46,7 +46,6 @@ func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } - // Define the dynamic data (fields) ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index f62f00f..537e25f 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -49,7 +49,6 @@ func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } - // Define the dynamic data (fields) ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes From d0416e014f70a3f38028b735eeb047ffdf69376d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 7 Aug 2024 10:49:21 +0300 Subject: [PATCH 05/12] tests: regenerate ssz codecs to fix merge issues --- .../testtypes/consensus-spec-tests/gen_bits_struct_ssz.go | 8 ++++---- .../consensus-spec-tests/gen_fixed_test_struct_ssz.go | 2 +- .../gen_single_field_test_struct_ssz.go | 2 +- .../consensus-spec-tests/gen_small_test_struct_ssz.go | 2 +- tests/testtypes/consensus-spec-tests/types_monoliths.go | 8 ++++---- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go index 1115077..884417d 100644 --- a/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_bits_struct_ssz.go @@ -6,13 +6,13 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. -func (obj *BitsStruct) SizeSSZ(fixed bool) uint32 { - var size = uint32(4 + 1 + 1 + 4 + 1) +func (obj *BitsStruct) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 4 + 1 + 1 + 4 + 1 if fixed { return size } - size += ssz.SizeSliceOfBits(obj.A) - size += ssz.SizeSliceOfBits(obj.D) + size += ssz.SizeSliceOfBits(sizer, obj.A) + size += ssz.SizeSliceOfBits(sizer, obj.D) return size } diff --git a/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go index cfebc25..c9c5f25 100644 --- a/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *FixedTestStruct) SizeSSZ() uint32 { +func (obj *FixedTestStruct) SizeSSZ(sizer *ssz.Sizer) uint32 { return 1 + 8 + 4 } diff --git a/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go index 4790439..a101db8 100644 --- a/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SingleFieldTestStruct) SizeSSZ() uint32 { +func (obj *SingleFieldTestStruct) SizeSSZ(sizer *ssz.Sizer) uint32 { return 1 } diff --git a/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go index 020e433..8191ad5 100644 --- a/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_ssz.go @@ -5,7 +5,7 @@ package consensus_spec_tests import "github.com/karalabe/ssz" // SizeSSZ returns the total size of the static ssz object. -func (obj *SmallTestStruct) SizeSSZ() uint32 { +func (obj *SmallTestStruct) SizeSSZ(sizer *ssz.Sizer) uint32 { return 2 + 2 } diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go index c9ab5f0..153526f 100644 --- a/tests/testtypes/consensus-spec-tests/types_monoliths.go +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -6,10 +6,10 @@ package consensus_spec_tests import "github.com/holiman/uint256" -//go:generate go run ../../../cmd/sszgen -type ExecutionPayloadMonolith -out gen_execution_payload_monolith_ssz.go -//go:generate go run ../../../cmd/sszgen -type ExecutionPayloadHeaderMonolith -out gen_execution_payload_header_monolith_ssz.go -//go:generate go run ../../../cmd/sszgen -type BeaconBlockBodyMonolith -out gen_beacon_block_body_monolith_ssz.go -//go:generate go run ../../../cmd/sszgen -type BeaconStateMonolith -out gen_beacon_state_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadMonolith -out gen_execution_payload_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadHeaderMonolith -out gen_execution_payload_header_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyMonolith -out gen_beacon_block_body_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BeaconStateMonolith -out gen_beacon_state_monolith_ssz.go type BeaconBlockBodyMonolith struct { RandaoReveal [96]byte From 82304b2ac068dea1a7dc88322b6e52f8cbeee4e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 7 Aug 2024 11:33:26 +0300 Subject: [PATCH 06/12] tests: fix another merge issue --- tests/consensus_specs_test.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 929b7a1..476f06f 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -96,11 +96,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st // from yaml and check that too, but hex-in-yaml makes everything // beyond annoying. C'est la vie. obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err != nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkUnknown); err != nil { t.Fatalf("failed to decode SSZ stream: %v", err) } blob := new(bytes.Buffer) - if err := ssz.EncodeToStream(blob, obj); err != nil { + if err := ssz.EncodeToStream(blob, obj, ssz.ForkUnknown); err != nil { t.Fatalf("failed to re-encode SSZ stream: %v", err) } if !bytes.Equal(blob.Bytes(), inSSZ) { @@ -109,11 +109,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st blob, inSSZ, len(prefix), blob.Bytes()[len(prefix):], inSSZ[len(prefix):]) } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err != nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkUnknown); err != nil { t.Fatalf("failed to decode SSZ buffer: %v", err) } - bin := make([]byte, ssz.Size(obj)) - if err := ssz.EncodeToBytes(bin, obj); err != nil { + bin := make([]byte, ssz.Size(obj, ssz.ForkUnknown)) + if err := ssz.EncodeToBytes(bin, obj, ssz.ForkUnknown); err != nil { t.Fatalf("failed to re-encode SSZ buffer: %v", err) } if !bytes.Equal(bin, inSSZ) { @@ -123,14 +123,14 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st } // Encoder/decoder seems to work, check if the size reported by the // encoded object actually matches the encoded stream - if size := ssz.Size(obj); size != uint32(len(inSSZ)) { + if size := ssz.Size(obj, ssz.ForkUnknown); size != uint32(len(inSSZ)) { t.Fatalf("reported/generated size mismatch: reported %v, generated %v", size, len(inSSZ)) } - hash := ssz.HashSequential(obj) + hash := ssz.HashSequential(obj, ssz.ForkUnknown) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("sequential merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } - hash = ssz.HashConcurrent(obj) + hash = ssz.HashConcurrent(obj, ssz.ForkUnknown) if fmt.Sprintf("%#x", hash) != inRoot.Root { t.Fatalf("concurrent merkle root mismatch: have %#x, want %s", hash, inRoot.Root) } @@ -164,11 +164,11 @@ func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind st } // Try to decode, it should fail obj := T(new(U)) - if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ))); err == nil { + if err := ssz.DecodeFromStream(bytes.NewReader(inSSZ), obj, uint32(len(inSSZ)), ssz.ForkUnknown); err == nil { t.Fatalf("succeeded in decoding invalid SSZ stream") } obj = T(new(U)) - if err := ssz.DecodeFromBytes(inSSZ, obj); err == nil { + if err := ssz.DecodeFromBytes(inSSZ, obj, ssz.ForkUnknown); err == nil { t.Fatalf("succeeded in decoding invalid SSZ buffer") } }) From 00f49d0e8dbae8bb16c4d92285bc4cfba056f72e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 20 Aug 2024 08:55:11 +0200 Subject: [PATCH 07/12] various wip snippets for integration testing --- cmd/sszgen/opset.go | 155 +++++++++++------- cmd/sszgen/types.go | 18 +- codec.go | 27 +++ decoder.go | 56 +++++++ encoder.go | 50 +++++- generics.go | 4 +- hasher.go | 21 +++ .../gen_beacon_state_monolith_ssz.go | 4 +- ...n_execution_payload_header_monolith_ssz.go | 6 +- .../gen_execution_payload_monolith_ssz.go | 4 +- .../consensus-spec-tests/types_monoliths.go | 14 +- 11 files changed, 282 insertions(+), 77 deletions(-) diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index e639e56..ecc110f 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -41,7 +41,7 @@ type opsetDynamic struct { // resolveBasicOpset retrieves the opset required to handle a basic struct // field. Yes, we could maybe have some of these be "computed" instead of hard // coded, but it makes things brittle for corner-cases. -func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag) (opset, error) { +func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointer bool) (opset, error) { // Sanity check a few tag constraints relevant for all basic types if tags != nil { if tags.limit != nil { @@ -57,52 +57,77 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag) (opset if tags != nil && tags.size[0] != 1 { return nil, fmt.Errorf("boolean basic type requires ssz-size=1: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineBool({{.Codec}}, &{{.Field}})", - "EncodeBool({{.Codec}}, &{{.Field}})", - "DecodeBool({{.Codec}}, &{{.Field}})", - []int{1}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineBool({{.Codec}}, &{{.Field}})", + "EncodeBool({{.Codec}}, &{{.Field}})", + "DecodeBool({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil + } else { + return nil, fmt.Errorf("pointer of boolean basic type not supported yet") + } case types.Uint8: if tags != nil && tags.size[0] != 1 { return nil, fmt.Errorf("byte basic type requires ssz-size=1: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint8({{.Codec}}, &{{.Field}})", - "EncodeUint8({{.Codec}}, &{{.Field}})", - "DecodeUint8({{.Codec}}, &{{.Field}})", - []int{1}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint8({{.Codec}}, &{{.Field}})", + "EncodeUint8({{.Codec}}, &{{.Field}})", + "DecodeUint8({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil + } else { + return nil, fmt.Errorf("pointer of byte basic type not supported yet") + } case types.Uint16: if tags != nil && tags.size[0] != 2 { return nil, fmt.Errorf("uint16 basic type requires ssz-size=2: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint16({{.Codec}}, &{{.Field}})", - "EncodeUint16({{.Codec}}, &{{.Field}})", - "DecodeUint16({{.Codec}}, &{{.Field}})", - []int{2}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint16({{.Codec}}, &{{.Field}})", + "EncodeUint16({{.Codec}}, &{{.Field}})", + "DecodeUint16({{.Codec}}, &{{.Field}})", + []int{2}, + }, nil + } else { + return nil, fmt.Errorf("pointer of uint16 basic type not supported yet") + } case types.Uint32: if tags != nil && tags.size[0] != 4 { return nil, fmt.Errorf("uint32 basic type requires ssz-size=4: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint32({{.Codec}}, &{{.Field}})", - "EncodeUint32({{.Codec}}, &{{.Field}})", - "DecodeUint32({{.Codec}}, &{{.Field}})", - []int{4}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint32({{.Codec}}, &{{.Field}})", + "EncodeUint32({{.Codec}}, &{{.Field}})", + "DecodeUint32({{.Codec}}, &{{.Field}})", + []int{4}, + }, nil + } else { + return nil, fmt.Errorf("pointer of uint32 basic type not supported yet") + } case types.Uint64: if tags != nil && tags.size[0] != 8 { return nil, fmt.Errorf("uint64 basic type requires ssz-size=8: have %d", tags.size[0]) } - return &opsetStatic{ - "DefineUint64({{.Codec}}, &{{.Field}})", - "EncodeUint64({{.Codec}}, &{{.Field}})", - "DecodeUint64({{.Codec}}, &{{.Field}})", - []int{8}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineUint64({{.Codec}}, &{{.Field}})", + "EncodeUint64({{.Codec}}, &{{.Field}})", + "DecodeUint64({{.Codec}}, &{{.Field}})", + []int{8}, + }, nil + } else { + return &opsetStatic{ + "DefineUint64Ptr({{.Codec}}, &{{.Field}})", + "EncodeUint64Ptr({{.Codec}}, &{{.Field}})", + "DecodeUint64Ptr({{.Codec}}, &{{.Field}})", + []int{8}, + }, nil + } default: return nil, fmt.Errorf("unsupported basic type: %s", typ) } @@ -130,7 +155,7 @@ func (p *parseContext) resolveBitlistOpset(tags *sizeTag) (opset, error) { }, nil } -func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag) (opset, error) { +func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag, pointer bool) (opset, error) { switch typ := typ.(type) { case *types.Basic: // Sanity check a few tag constraints relevant for all arrays of basic types @@ -146,12 +171,16 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag if len(tags.size) != 1 || tags.size[0] < (size-1)*8+1 || tags.size[0] > size*8 { return nil, fmt.Errorf("array of bits tag conflict: field supports %d-%d bits, tag wants %v bits", (size-1)*8+1, size*8, tags.size) } - return &opsetStatic{ - fmt.Sprintf("DefineArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly - fmt.Sprintf("EncodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly - fmt.Sprintf("DecodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly - []int{size}, - }, nil + if !pointer { + return &opsetStatic{ + fmt.Sprintf("DefineArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("EncodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("DecodeArrayOfBits({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + []int{size}, + }, nil + } else { + return nil, fmt.Errorf("pointer of array of bits not supported") + } } // Not a bitvector, interpret as plain byte array if tags != nil { @@ -161,13 +190,22 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag return nil, fmt.Errorf("array of byte basic type tag conflict: field is %d bytes, tag wants %v bytes", size, tags.size) } } - return &opsetStatic{ - "DefineStaticBytes({{.Codec}}, &{{.Field}})", - "EncodeStaticBytes({{.Codec}}, &{{.Field}})", - "DecodeStaticBytes({{.Codec}}, &{{.Field}})", - []int{size}, - }, nil + if !pointer { + return &opsetStatic{ + "DefineStaticBytes({{.Codec}}, &{{.Field}})", + "EncodeStaticBytes({{.Codec}}, &{{.Field}})", + "DecodeStaticBytes({{.Codec}}, &{{.Field}})", + []int{size}, + }, nil + } else { + return &opsetStatic{ + "DefineStaticBytesPtr({{.Codec}}, &{{.Field}})", + "EncodeStaticBytesPtr({{.Codec}}, &{{.Field}})", + "DecodeStaticBytesPtr({{.Codec}}, &{{.Field}})", + []int{size}, + }, nil + } case types.Uint64: if tags != nil { if (len(tags.size) != 1 && len(tags.size) != 2) || @@ -176,13 +214,16 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag return nil, fmt.Errorf("array of byte basic type tag conflict: field is %d bytes, tag wants %v bytes", size, tags.size) } } - return &opsetStatic{ - "DefineArrayOfUint64s({{.Codec}}, &{{.Field}})", - "EncodeArrayOfUint64s({{.Codec}}, &{{.Field}})", - "DecodeArrayOfUint64s({{.Codec}}, &{{.Field}})", - []int{size, 8}, - }, nil - + if !pointer { + return &opsetStatic{ + "DefineArrayOfUint64s({{.Codec}}, &{{.Field}})", + "EncodeArrayOfUint64s({{.Codec}}, &{{.Field}})", + "DecodeArrayOfUint64s({{.Codec}}, &{{.Field}})", + []int{size, 8}, + }, nil + } else { + return nil, fmt.Errorf("pointer of array of byte basic type not supported") + } default: return nil, fmt.Errorf("unsupported array item basic type: %s", typ) } @@ -190,7 +231,7 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag return p.resolveArrayOfArrayOpset(typ.Elem(), size, int(typ.Len()), tags) case *types.Named: - return p.resolveArrayOpset(typ.Underlying(), size, tags) + return p.resolveArrayOpset(typ.Underlying(), size, tags, pointer) default: return nil, fmt.Errorf("unsupported array item type: %s", typ) @@ -456,7 +497,7 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o return nil, fmt.Errorf("uint256 basic type cannot have ssz-max tag") } if len(tags.size) != 1 || tags.size[0] != 32 { - return nil, fmt.Errorf("uint256 basic type tag conflict: filed is [32] bytes, tag wants %v", tags.size) + return nil, fmt.Errorf("uint256 basic type tag conflict: field is [32] bytes, tag wants %v", tags.size) } } return &opsetStatic{ @@ -472,7 +513,7 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o return nil, fmt.Errorf("big.Int (uint256) basic type cannot have ssz-max tag") } if len(tags.size) != 1 || tags.size[0] != 32 { - return nil, fmt.Errorf("big.Int (uint256) basic type tag conflict: filed is [32] bytes, tag wants %v", tags.size) + return nil, fmt.Errorf("big.Int (uint256) basic type tag conflict: field is [32] bytes, tag wants %v", tags.size) } } return &opsetStatic{ @@ -508,5 +549,9 @@ func (p *parseContext) resolvePointerOpset(typ *types.Pointer, tags *sizeTag) (o nil, nil, }, nil } - return nil, fmt.Errorf("unsupported pointer type %s", typ.String()) + named, ok := typ.Elem().(*types.Named) + if !ok { + return nil, fmt.Errorf("unsupported pointer type %s", typ.String()) + } + return p.resolveOpset(named.Underlying(), tags, true) } diff --git a/cmd/sszgen/types.go b/cmd/sszgen/types.go index 1df5ddf..fd46b63 100644 --- a/cmd/sszgen/types.go +++ b/cmd/sszgen/types.go @@ -44,7 +44,7 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss continue } // Required field found, validate type with tag content - opset, err := p.resolveOpset(f.Type(), tags) + opset, err := p.resolveOpset(f.Type(), tags, false) if err != nil { return nil, fmt.Errorf("failed to validate field %s.%s: %v", named.Obj().Name(), f.Name(), err) } @@ -71,24 +71,32 @@ func (p *parseContext) makeContainer(named *types.Named, typ *types.Struct) (*ss // whether there's a collision between them, or if more tags are needed to fully // derive the size. If the type/tags are in sync and well-defined, an opset will // be returned that the generator can use to create the code. -func (p *parseContext) resolveOpset(typ types.Type, tags *sizeTag) (opset, error) { +func (p *parseContext) resolveOpset(typ types.Type, tags *sizeTag, pointer bool) (opset, error) { switch t := typ.(type) { case *types.Named: if isBitlist(typ) { return p.resolveBitlistOpset(tags) } - return p.resolveOpset(t.Underlying(), tags) + return p.resolveOpset(t.Underlying(), tags, pointer) case *types.Basic: - return p.resolveBasicOpset(t, tags) + return p.resolveBasicOpset(t, tags, pointer) case *types.Array: - return p.resolveArrayOpset(t.Elem(), int(t.Len()), tags) + return p.resolveArrayOpset(t.Elem(), int(t.Len()), tags, pointer) case *types.Slice: return p.resolveSliceOpset(t.Elem(), tags) case *types.Pointer: + switch tt := t.Elem().(type) { + case *types.Basic: + return p.resolveBasicOpset(tt, tags, true) + + case *types.Array: + return p.resolveArrayOpset(tt.Elem(), int(tt.Len()), tags, true) + + } return p.resolvePointerOpset(t, tags) } return nil, fmt.Errorf("unsupported type %s", typ.String()) diff --git a/codec.go b/codec.go index 97ea6c8..90fa4ea 100644 --- a/codec.go +++ b/codec.go @@ -125,6 +125,19 @@ func DefineUint64[T ~uint64](c *Codec, n *T) { HashUint64(c.has, *n) } +// DefineUint64Ptr defines the next field as a uint64. +func DefineUint64Ptr[T ~uint64](c *Codec, n **T) { + if c.enc != nil { + EncodeUint64Ptr(c.enc, *n) + return + } + if c.dec != nil { + DecodeUint64Ptr(c.dec, n) + return + } + HashUint64Ptr(c.has, *n) +} + // DefineUint256 defines the next field as a uint256. func DefineUint256(c *Codec, n **uint256.Int) { if c.enc != nil { @@ -165,6 +178,20 @@ func DefineStaticBytes[T commonBytesLengths](c *Codec, blob *T) { HashStaticBytes(c.has, blob) } +// DefineStaticBytesPtr defines the next field as static binary blob. This method +// can be used for byte arrays. +func DefineStaticBytesPtr[T commonBytesLengths](c *Codec, blob **T) { + if c.enc != nil { + EncodeStaticBytesPtr(c.enc, *blob) + return + } + if c.dec != nil { + DecodeStaticBytesPtr(c.dec, blob) + return + } + HashStaticBytesPtr(c.has, *blob) +} + // DefineCheckedStaticBytes defines the next field as static binary blob. This // method can be used for plain byte slices, which is more expensive, since it // needs runtime size validation. diff --git a/decoder.go b/decoder.go index 7f1f13d..2821511 100644 --- a/decoder.go +++ b/decoder.go @@ -191,6 +191,34 @@ func DecodeUint64[T ~uint64](dec *Decoder, n *T) { } } +// DecodeUint64Ptr parses a uint64. +func DecodeUint64Ptr[T ~uint64](dec *Decoder, n **T) { + if dec.err != nil { + return + } + if dec.inReader != nil { + _, dec.err = io.ReadFull(dec.inReader, dec.buf[:8]) + if dec.err != nil { + return + } + if *n == nil { + *n = new(T) + } + *(*n) = T(binary.LittleEndian.Uint64(dec.buf[:8])) + dec.inRead += 8 + } else { + if len(dec.inBuffer) < 8 { + dec.err = io.ErrUnexpectedEOF + return + } + if *n == nil { + *n = new(T) + } + *(*n) = T(binary.LittleEndian.Uint64(dec.inBuffer)) + dec.inBuffer = dec.inBuffer[8:] + } +} + // DecodeUint256 parses a uint256. func DecodeUint256(dec *Decoder, n **uint256.Int) { if dec.err != nil { @@ -267,6 +295,34 @@ func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { } } +// DecodeStaticBytesPtr parses a static binary blob. +func DecodeStaticBytesPtr[T commonBytesLengths](dec *Decoder, blob **T) { + if dec.err != nil { + return + } + if dec.inReader != nil { + if *blob == nil { + *blob = new(T) + } + // The code below should have used `**blob[:]`, alas Go's generics compiler + // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 + _, dec.err = io.ReadFull(dec.inReader, unsafe.Slice(&(*(*blob))[0], len(*(*blob)))) + dec.inRead += uint32(len(*(*blob))) + } else { + if *blob == nil { + *blob = new(T) + } + if len(dec.inBuffer) < len(*(*blob)) { + dec.err = io.ErrUnexpectedEOF + return + } + // The code below should have used `**blob[:]`, alas Go's generics compiler + // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 + copy(unsafe.Slice(&(*(*blob))[0], len(*(*blob))), dec.inBuffer) + dec.inBuffer = dec.inBuffer[len(*(*blob)):] + } +} + // DecodeCheckedStaticBytes parses a static binary blob. func DecodeCheckedStaticBytes(dec *Decoder, blob *[]byte, size uint64) { if dec.err != nil { diff --git a/encoder.go b/encoder.go index 6b2acb1..6e01cb6 100644 --- a/encoder.go +++ b/encoder.go @@ -149,7 +149,6 @@ func EncodeUint32[T ~uint32](enc *Encoder, n T) { // EncodeUint64 serializes a uint64. func EncodeUint64[T ~uint64](enc *Encoder, n T) { - // Nope, dive into actual encoding if enc.outWriter != nil { if enc.err != nil { return @@ -162,6 +161,30 @@ func EncodeUint64[T ~uint64](enc *Encoder, n T) { } } +// EncodeUint64Ptr serializes a uint64. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint64Ptr[T ~uint64](enc *Encoder, n *T) { + if enc.outWriter != nil { + if enc.err != nil { + return + } + if n != nil { + binary.LittleEndian.PutUint64(enc.buf[:8], (uint64)(*n)) + _, enc.err = enc.outWriter.Write(enc.buf[:8]) + } else { + _, enc.err = enc.outWriter.Write(uint256Zero[:8]) + } + } else { + if n != nil { + binary.LittleEndian.PutUint64(enc.outBuffer, (uint64)(*n)) + } else { + copy(enc.outBuffer, uint256Zero[:8]) + } + enc.outBuffer = enc.outBuffer[8:] + } +} + // EncodeUint256 serializes a uint256. // // Note, a nil pointer is serialized as zero. @@ -233,6 +256,31 @@ func EncodeStaticBytes[T commonBytesLengths](enc *Encoder, blob *T) { } } +// EncodeStaticBytesPtr serializes a static binary blob. +// +// Note, a nil pointer is serialized as a zero-value blob. +func EncodeStaticBytesPtr[T commonBytesLengths](enc *Encoder, blob *T) { + if enc.outWriter != nil { + if enc.err != nil { + return + } + if blob == nil { + blob = new(T) // TODO(karalabe): Make this alloc free somehow? + } + // The code below should have used `*blob[:]`, alas Go's generics compiler + // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 + _, enc.err = enc.outWriter.Write(unsafe.Slice(&(*blob)[0], len(*blob))) + } else { + if blob == nil { + blob = new(T) // TODO(karalabe): Make this alloc free somehow? + } + // The code below should have used `blob[:]`, alas Go's generics compiler + // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 + copy(enc.outBuffer, unsafe.Slice(&(*blob)[0], len(*blob))) + enc.outBuffer = enc.outBuffer[len(*blob):] + } +} + // EncodeCheckedStaticBytes serializes a static binary blob. func EncodeCheckedStaticBytes(enc *Encoder, blob []byte) { if enc.outWriter != nil { diff --git a/generics.go b/generics.go index 8b34f6e..24f558f 100644 --- a/generics.go +++ b/generics.go @@ -30,8 +30,8 @@ type newableDynamicObject[U any] interface { // generics compiler that it cannot represent arrays of arbitrary sizes with // one shorthand notation. type commonBytesLengths interface { - // fork | address | verkle-stem | hash | pubkey | committee | signature | bloom - ~[4]byte | ~[20]byte | ~[31]byte | ~[32]byte | ~[48]byte | ~[64]byte | ~[96]byte | ~[256]byte + // fork | nonce | address | verkle-stem | hash | pubkey | committee | signature | bloom + ~[4]byte | ~[8]byte | ~[20]byte | ~[31]byte | ~[32]byte | ~[48]byte | ~[64]byte | ~[96]byte | ~[256]byte } // commonUint64sLengths is a generic type whose purpose is to permit that fixed- diff --git a/hasher.go b/hasher.go index b8a0c6d..b512369 100644 --- a/hasher.go +++ b/hasher.go @@ -108,6 +108,17 @@ func HashUint64[T ~uint64](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint64Ptr hashes a uint64. +// +// Note, a nil pointer is hashed as zero. +func HashUint64Ptr[T ~uint64](h *Hasher, n *T) { + var buffer [32]byte + if n != nil { + binary.LittleEndian.PutUint64(buffer[:], uint64(*n)) + } + h.insertChunk(buffer, 0) +} + // HashUint256 hashes a uint256. // // Note, a nil pointer is hashed as zero. @@ -142,6 +153,16 @@ func HashStaticBytes[T commonBytesLengths](h *Hasher, blob *T) { h.hashBytes(unsafe.Slice(&(*blob)[0], len(*blob))) } +// HashStaticBytesPtr hashes a static binary blob. +func HashStaticBytesPtr[T commonBytesLengths](h *Hasher, blob *T) { + if blob == nil { + blob = new(T) // TODO(karalabe): Make this alloc free somehow? + } + // The code below should have used `blob[:]`, alas Go's generics compiler + // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 + h.hashBytes(unsafe.Slice(&(*blob)[0], len(*blob))) +} + // HashCheckedStaticBytes hashes a static binary blob. func HashCheckedStaticBytes(h *Hasher, blob []byte) { h.hashBytes(blob) diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go index 4026f75..28c07c4 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -97,8 +97,8 @@ func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (26) - LatestExecutionPayloadHeader - 4 bytes } if codec.Fork() >= ssz.ForkCapella { - ssz.DefineUint64(codec, &obj.NextWithdrawalIndex) // Field (27) - NextWithdrawalIndex - 8 bytes - ssz.DefineUint64(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineUint64Ptr(codec, &obj.NextWithdrawalIndex) // Field (27) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64Ptr(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (29) - HistoricalSummaries - 4 bytes } // Define the dynamic data (fields) diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go index a8626e8..6355c0f 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -40,11 +40,11 @@ func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes ssz.DefineStaticBytes(codec, &obj.TransactionsRoot) // Field (13) - TransactionsRoot - 32 bytes if codec.Fork() >= ssz.ForkCapella { - ssz.DefineStaticBytes(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes + ssz.DefineStaticBytesPtr(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes } if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes + ssz.DefineUint64Ptr(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64Ptr(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } // Define the dynamic data (fields) ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index 537e25f..cb2b48b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -46,8 +46,8 @@ func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Withdrawals, 16) // Offset (14) - Withdrawals - 4 bytes } if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes + ssz.DefineUint64Ptr(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64Ptr(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } // Define the dynamic data (fields) ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go index 153526f..5f865c6 100644 --- a/tests/testtypes/consensus-spec-tests/types_monoliths.go +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -54,8 +54,8 @@ type BeaconStateMonolith struct { CurrentSyncCommittee *SyncCommittee ` ssz-fork:"altair"` NextSyncCommittee *SyncCommittee ` ssz-fork:"altair"` LatestExecutionPayloadHeader *ExecutionPayloadHeaderMonolith ` ssz-fork:"bellatrix"` - NextWithdrawalIndex uint64 ` ssz-fork:"capella"` - NextWithdrawalValidatorIndex uint64 ` ssz-fork:"capella"` + NextWithdrawalIndex *uint64 ` ssz-fork:"capella"` + NextWithdrawalValidatorIndex *uint64 ` ssz-fork:"capella"` HistoricalSummaries []*HistoricalSummary `ssz-max:"16777216" ssz-fork:"capella"` } @@ -75,8 +75,8 @@ type ExecutionPayloadMonolith struct { BlockHash Hash Transactions [][]byte `ssz-max:"1048576,1073741824"` Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"capella"` - BlobGasUsed uint64 ` ssz-fork:"deneb"` - ExcessBlobGas uint64 ` ssz-fork:"deneb"` + BlobGasUsed *uint64 ` ssz-fork:"deneb"` + ExcessBlobGas *uint64 ` ssz-fork:"deneb"` } type ExecutionPayloadHeaderMonolith struct { @@ -94,7 +94,7 @@ type ExecutionPayloadHeaderMonolith struct { BaseFeePerGas [32]byte BlockHash [32]byte TransactionsRoot [32]byte - WithdrawalRoot [32]byte `ssz-fork:"capella"` - BlobGasUsed uint64 `ssz-fork:"deneb"` - ExcessBlobGas uint64 `ssz-fork:"deneb"` + WithdrawalRoot *[32]byte `ssz-fork:"capella"` + BlobGasUsed *uint64 `ssz-fork:"deneb"` + ExcessBlobGas *uint64 `ssz-fork:"deneb"` } From c74f127abf3a44a36baf36f6d326b4417845295d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 30 Aug 2024 13:50:07 +0300 Subject: [PATCH 08/12] ssz, tests: support operating on zero values across the board --- cmd/sszgen/opset.go | 12 +- codec.go | 24 ++-- decoder.go | 16 ++- encoder.go | 117 +++++++++++++++--- hasher.go | 66 ++++++++-- sizer.go | 18 ++- tests/consensus_specs_test.go | 13 ++ .../gen_attestation_ssz.go | 4 +- .../gen_beacon_state_monolith_ssz.go | 4 +- ...n_execution_payload_header_monolith_ssz.go | 6 +- .../gen_execution_payload_monolith_ssz.go | 4 +- tests/zeroval_test.go | 77 ++++++++++++ zeroes.go | 46 +++++++ 13 files changed, 347 insertions(+), 60 deletions(-) create mode 100644 tests/zeroval_test.go create mode 100644 zeroes.go diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index ecc110f..0f3db13 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -122,9 +122,9 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointe }, nil } else { return &opsetStatic{ - "DefineUint64Ptr({{.Codec}}, &{{.Field}})", - "EncodeUint64Ptr({{.Codec}}, &{{.Field}})", - "DecodeUint64Ptr({{.Codec}}, &{{.Field}})", + "DefineUint64Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint64Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint64Pointer({{.Codec}}, &{{.Field}})", []int{8}, }, nil } @@ -199,9 +199,9 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag }, nil } else { return &opsetStatic{ - "DefineStaticBytesPtr({{.Codec}}, &{{.Field}})", - "EncodeStaticBytesPtr({{.Codec}}, &{{.Field}})", - "DecodeStaticBytesPtr({{.Codec}}, &{{.Field}})", + "DefineStaticBytesPointer({{.Codec}}, &{{.Field}})", + "EncodeStaticBytesPointer({{.Codec}}, &{{.Field}})", + "DecodeStaticBytesPointer({{.Codec}}, &{{.Field}})", []int{size}, }, nil diff --git a/codec.go b/codec.go index 90fa4ea..6e5bf63 100644 --- a/codec.go +++ b/codec.go @@ -125,17 +125,17 @@ func DefineUint64[T ~uint64](c *Codec, n *T) { HashUint64(c.has, *n) } -// DefineUint64Ptr defines the next field as a uint64. -func DefineUint64Ptr[T ~uint64](c *Codec, n **T) { +// DefineUint64Pointer defines the next field as a uint64. +func DefineUint64Pointer[T ~uint64](c *Codec, n **T) { if c.enc != nil { - EncodeUint64Ptr(c.enc, *n) + EncodeUint64Pointer(c.enc, *n) return } if c.dec != nil { - DecodeUint64Ptr(c.dec, n) + DecodeUint64Pointer(c.dec, n) return } - HashUint64Ptr(c.has, *n) + HashUint64Pointer(c.has, *n) } // DefineUint256 defines the next field as a uint256. @@ -178,18 +178,18 @@ func DefineStaticBytes[T commonBytesLengths](c *Codec, blob *T) { HashStaticBytes(c.has, blob) } -// DefineStaticBytesPtr defines the next field as static binary blob. This method +// DefineStaticBytesPointer defines the next field as static binary blob. This method // can be used for byte arrays. -func DefineStaticBytesPtr[T commonBytesLengths](c *Codec, blob **T) { +func DefineStaticBytesPointer[T commonBytesLengths](c *Codec, blob **T) { if c.enc != nil { - EncodeStaticBytesPtr(c.enc, *blob) + EncodeStaticBytesPointer(c.enc, *blob) return } if c.dec != nil { - DecodeStaticBytesPtr(c.dec, blob) + DecodeStaticBytesPointer(c.dec, blob) return } - HashStaticBytesPtr(c.has, *blob) + HashStaticBytesPointer(c.has, *blob) } // DefineCheckedStaticBytes defines the next field as static binary blob. This @@ -197,7 +197,7 @@ func DefineStaticBytesPtr[T commonBytesLengths](c *Codec, blob **T) { // needs runtime size validation. func DefineCheckedStaticBytes(c *Codec, blob *[]byte, size uint64) { if c.enc != nil { - EncodeCheckedStaticBytes(c.enc, *blob) + EncodeCheckedStaticBytes(c.enc, *blob, size) return } if c.dec != nil { @@ -385,7 +385,7 @@ func DefineUnsafeArrayOfStaticBytes[T commonBytesLengths](c *Codec, blobs []T) { // which is more expensive since it needs runtime size validation. func DefineCheckedArrayOfStaticBytes[T commonBytesLengths](c *Codec, blobs *[]T, size uint64) { if c.enc != nil { - EncodeCheckedArrayOfStaticBytes(c.enc, *blobs) + EncodeCheckedArrayOfStaticBytes(c.enc, *blobs, size) return } if c.dec != nil { diff --git a/decoder.go b/decoder.go index 2821511..0134c34 100644 --- a/decoder.go +++ b/decoder.go @@ -191,8 +191,11 @@ func DecodeUint64[T ~uint64](dec *Decoder, n *T) { } } -// DecodeUint64Ptr parses a uint64. -func DecodeUint64Ptr[T ~uint64](dec *Decoder, n **T) { +// DecodeUint64Pointer parses a uint64. +// +// This method is similar to DecodeUint64, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint64Pointer[T ~uint64](dec *Decoder, n **T) { if dec.err != nil { return } @@ -295,8 +298,11 @@ func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { } } -// DecodeStaticBytesPtr parses a static binary blob. -func DecodeStaticBytesPtr[T commonBytesLengths](dec *Decoder, blob **T) { +// DecodeStaticBytesPointer parses a static binary blob. +// +// This method is similar to DecodeStaticBytes, but will also initialize the +// pointer if it is not allocated yet. +func DecodeStaticBytesPointer[T commonBytesLengths](dec *Decoder, blob **T) { if dec.err != nil { return } @@ -933,7 +939,7 @@ func (dec *Decoder) decodeOffset(list bool) { dec.offsets = append(dec.offsets, offset) } -// retrieveSize retrieves the length of the nest dynamic item based on the seen +// retrieveSize retrieves the length of the next dynamic item based on the seen // and cached offsets. func (dec *Decoder) retrieveSize() uint32 { // If sizes aren't yet available, pre-compute them all. The reason we use a diff --git a/encoder.go b/encoder.go index 6e01cb6..f9ca8d0 100644 --- a/encoder.go +++ b/encoder.go @@ -8,6 +8,7 @@ import ( "encoding/binary" "io" "math/big" + "reflect" "unsafe" "github.com/holiman/uint256" @@ -19,6 +20,7 @@ var ( boolFalse = []byte{0x00} boolTrue = []byte{0x01} uint256Zero = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + bitlistZero = bitfield.NewBitlist(0) ) // Encoder is a wrapper around an io.Writer or a []byte buffer to implement SSZ @@ -161,10 +163,10 @@ func EncodeUint64[T ~uint64](enc *Encoder, n T) { } } -// EncodeUint64Ptr serializes a uint64. +// EncodeUint64Pointer serializes a uint64. // // Note, a nil pointer is serialized as zero. -func EncodeUint64Ptr[T ~uint64](enc *Encoder, n *T) { +func EncodeUint64Pointer[T ~uint64](enc *Encoder, n *T) { if enc.outWriter != nil { if enc.err != nil { return @@ -256,24 +258,24 @@ func EncodeStaticBytes[T commonBytesLengths](enc *Encoder, blob *T) { } } -// EncodeStaticBytesPtr serializes a static binary blob. +// EncodeStaticBytesPointer serializes a static binary blob. // // Note, a nil pointer is serialized as a zero-value blob. -func EncodeStaticBytesPtr[T commonBytesLengths](enc *Encoder, blob *T) { +func EncodeStaticBytesPointer[T commonBytesLengths](enc *Encoder, blob *T) { + // If the blob is nil, write a batch of zeroes and exit + if blob == nil { + enc.encodeZeroes(reflect.TypeFor[T]().Len()) + return + } + // Blob not nil, write the actual data content if enc.outWriter != nil { if enc.err != nil { return } - if blob == nil { - blob = new(T) // TODO(karalabe): Make this alloc free somehow? - } // The code below should have used `*blob[:]`, alas Go's generics compiler // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 _, enc.err = enc.outWriter.Write(unsafe.Slice(&(*blob)[0], len(*blob))) } else { - if blob == nil { - blob = new(T) // TODO(karalabe): Make this alloc free somehow? - } // The code below should have used `blob[:]`, alas Go's generics compiler // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 copy(enc.outBuffer, unsafe.Slice(&(*blob)[0], len(*blob))) @@ -282,7 +284,13 @@ func EncodeStaticBytesPtr[T commonBytesLengths](enc *Encoder, blob *T) { } // EncodeCheckedStaticBytes serializes a static binary blob. -func EncodeCheckedStaticBytes(enc *Encoder, blob []byte) { +func EncodeCheckedStaticBytes(enc *Encoder, blob []byte, size uint64) { + // If the blob is nil, write a batch of zeroes and exit + if blob == nil { + enc.encodeZeroes(int(size)) + return + } + // Blob not nil, write the actual data content if enc.outWriter != nil { if enc.err != nil { return @@ -323,15 +331,22 @@ func EncodeDynamicBytesContent(enc *Encoder, blob []byte) { } // EncodeStaticObject serializes a static ssz object. -func EncodeStaticObject(enc *Encoder, obj StaticObject) { +func EncodeStaticObject[T newableStaticObject[U], U any](enc *Encoder, obj T) { if enc.err != nil { return } + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueStatic[T, U]() + } obj.DefineSSZ(enc.codec) } // EncodeDynamicObjectOffset serializes a dynamic ssz object. -func EncodeDynamicObjectOffset(enc *Encoder, obj DynamicObject) { +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectOffset[T newableDynamicObject[U], U any](enc *Encoder, obj T) { if enc.outWriter != nil { if enc.err != nil { return @@ -342,14 +357,26 @@ func EncodeDynamicObjectOffset(enc *Encoder, obj DynamicObject) { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] } + // If the object is nil, pull up it's zero value. This will be very slow, but + // it should not happen in production, only during tests mostly. + if obj == nil { + obj = zeroValueDynamic[T, U]() + } enc.offset += obj.SizeSSZ(enc.sizer, false) } // EncodeDynamicObjectContent is the lazy data writer for EncodeDynamicObjectOffset. -func EncodeDynamicObjectContent(enc *Encoder, obj DynamicObject) { +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectContent[T newableDynamicObject[U], U any](enc *Encoder, obj T) { if enc.err != nil { return } + // If the object is nil, pull up it's zero value. This will be very slow, but + // it should not happen in production, only during tests mostly. + if obj == nil { + obj = zeroValueDynamic[T, U]() + } enc.offsetDynamics(obj.SizeSSZ(enc.sizer, true)) obj.DefineSSZ(enc.codec) } @@ -372,6 +399,8 @@ func EncodeArrayOfBits[T commonBitsLengths](enc *Encoder, bits *T) { } // EncodeSliceOfBitsOffset serializes a dynamic slice of (packed) bits. +// +// Note, a nil slice of bits is serialized as an empty bit list. func EncodeSliceOfBitsOffset(enc *Encoder, bits bitfield.Bitlist) { if enc.outWriter != nil { if enc.err != nil { @@ -383,19 +412,34 @@ func EncodeSliceOfBitsOffset(enc *Encoder, bits bitfield.Bitlist) { binary.LittleEndian.PutUint32(enc.outBuffer, enc.offset) enc.outBuffer = enc.outBuffer[4:] } - enc.offset += uint32(len(bits)) + if bits != nil { + enc.offset += uint32(len(bits)) + } else { + enc.offset += uint32(len(bitlistZero)) + } } // EncodeSliceOfBitsContent is the lazy data writer for EncodeSliceOfBitsOffset. +// +// Note, a nil slice of bits is serialized as an empty bit list. func EncodeSliceOfBitsContent(enc *Encoder, bits bitfield.Bitlist) { if enc.outWriter != nil { if enc.err != nil { return } - _, enc.err = enc.outWriter.Write(bits) // bitfield.Bitlist already has the length bit set + if bits != nil { + _, enc.err = enc.outWriter.Write(bits) // bitfield.Bitlist already has the length bit set + } else { + _, enc.err = enc.outWriter.Write(bitlistZero) + } } else { - copy(enc.outBuffer, bits) - enc.outBuffer = enc.outBuffer[len(bits):] // bitfield.Bitlist already has the length bit set + if bits != nil { + copy(enc.outBuffer, bits) + enc.outBuffer = enc.outBuffer[len(bits):] // bitfield.Bitlist already has the length bit set + } else { + copy(enc.outBuffer, bitlistZero) + enc.outBuffer = enc.outBuffer[len(bitlistZero):] + } } } @@ -502,7 +546,12 @@ func EncodeUnsafeArrayOfStaticBytes[T commonBytesLengths](enc *Encoder, blobs [] // EncodeCheckedArrayOfStaticBytes serializes a static array of static binary // blobs. -func EncodeCheckedArrayOfStaticBytes[T commonBytesLengths](enc *Encoder, blobs []T) { +func EncodeCheckedArrayOfStaticBytes[T commonBytesLengths](enc *Encoder, blobs []T, size uint64) { + // If the blobs are nil, write a batch of zeroes and exit + if blobs == nil { + enc.encodeZeroes(int(size) * reflect.TypeFor[T]().Len()) + return + } // Internally this method is essentially calling EncodeStaticBytes on all // the blobs in a loop. Practically, we've inlined that call to make things // a *lot* faster. @@ -721,3 +770,33 @@ func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects [ func (enc *Encoder) offsetDynamics(offset uint32) { enc.offset = offset } + +// encodeZeroes is a helper to append a bunch of zero values to the output stream. +// This method is mainly used for encoding uninitialized fields without allocating +// them beforehand. +func (enc *Encoder) encodeZeroes(size int) { + if enc.outWriter != nil { + if enc.err != nil { + return + } + for size >= 32 { + if _, enc.err = enc.outWriter.Write(uint256Zero); enc.err != nil { + return + } + size -= 32 + } + if size > 0 { + _, enc.err = enc.outWriter.Write(uint256Zero[:size]) + } + } else { + for size >= 32 { + copy(enc.outBuffer, uint256Zero) + enc.outBuffer = enc.outBuffer[32:] + size -= 32 + } + if size > 0 { + copy(enc.outBuffer, uint256Zero[:size]) + enc.outBuffer = enc.outBuffer[size:] + } + } +} diff --git a/hasher.go b/hasher.go index b512369..56a8f36 100644 --- a/hasher.go +++ b/hasher.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "math/big" bitops "math/bits" + "reflect" "runtime" "unsafe" @@ -27,6 +28,7 @@ const concurrencyThreshold = 65536 // Some helpers to avoid occasional allocations var ( + hasherZeroChunk = [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} hasherBoolFalse = [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} hasherBoolTrue = [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} @@ -108,10 +110,10 @@ func HashUint64[T ~uint64](h *Hasher, n T) { h.insertChunk(buffer, 0) } -// HashUint64Ptr hashes a uint64. +// HashUint64Pointer hashes a uint64. // // Note, a nil pointer is hashed as zero. -func HashUint64Ptr[T ~uint64](h *Hasher, n *T) { +func HashUint64Pointer[T ~uint64](h *Hasher, n *T) { var buffer [32]byte if n != nil { binary.LittleEndian.PutUint64(buffer[:], uint64(*n)) @@ -153,10 +155,18 @@ func HashStaticBytes[T commonBytesLengths](h *Hasher, blob *T) { h.hashBytes(unsafe.Slice(&(*blob)[0], len(*blob))) } -// HashStaticBytesPtr hashes a static binary blob. -func HashStaticBytesPtr[T commonBytesLengths](h *Hasher, blob *T) { +// HashStaticBytesPointer hashes a static binary blob. +// +// Note, a nil pointer is hashed as an empty binary blob. +func HashStaticBytesPointer[T commonBytesLengths](h *Hasher, blob *T) { + // If the pointer is nil, hash as an empty blob if blob == nil { - blob = new(T) // TODO(karalabe): Make this alloc free somehow? + // Go generics cannot do len(T{}), so we either allocate and bear the GC + // costs, or we use reflect. Both is kind of crappy. + // + // https://github.com/golang/go/issues/69100 + h.hashBytesEmpty(reflect.TypeOf(blob).Elem().Len()) + return } // The code below should have used `blob[:]`, alas Go's generics compiler // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 @@ -176,15 +186,25 @@ func HashDynamicBytes(h *Hasher, blob []byte, maxSize uint64) { } // HashStaticObject hashes a static ssz object. -func HashStaticObject(h *Hasher, obj StaticObject) { +func HashStaticObject[T newableStaticObject[U], U any](h *Hasher, obj T) { h.descendLayer() + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueStatic[T, U]() + } obj.DefineSSZ(h.codec) h.ascendLayer(0) } // HashDynamicObject hashes a dynamic ssz object. -func HashDynamicObject(h *Hasher, obj DynamicObject) { +func HashDynamicObject[T newableDynamicObject[U], U any](h *Hasher, obj T) { h.descendLayer() + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueDynamic[T, U]() + } obj.DefineSSZ(h.codec) h.ascendLayer(0) } @@ -197,7 +217,14 @@ func HashArrayOfBits[T commonBitsLengths](h *Hasher, bits *T) { } // HashSliceOfBits hashes a dynamic slice of (packed) bits. +// +// Note, a nil slice of bits is serialized as an empty bit list. func HashSliceOfBits(h *Hasher, bits bitfield.Bitlist, maxBits uint64) { + // If the slice of bits is nil (i.e. uninitialized), hash it as empty + if bits == nil { + HashSliceOfBits(h, bitlistZero, maxBits) + return + } // Parse the bit-list into a hashable representation var ( msb = uint8(bitops.Len8(bits[len(bits)-1])) - 1 @@ -422,6 +449,21 @@ func (h *Hasher) hashBytes(blob []byte) { h.ascendLayer(0) } +// hashBytesEmpty is analogous to hashBytes, but where the input is all zeroes, +// so it's passed by length, not by content. This allows hashing zero pointers +// without allocating them first. +func (h *Hasher) hashBytesEmpty(size int) { + // If the blob is small, accumulate as a single chunk + if size <= 32 { + h.insertChunk(hasherZeroChunk, 0) + return + } + // Otherwise hash it as its own tree + h.descendLayer() + h.insertBlobChunksEmpty(size) + h.ascendLayer(0) +} + // insertChunk adds a chunk to the accumulators, collapsing matching pairs. func (h *Hasher) insertChunk(chunk [32]byte, depth int) { // Insert the chunk into the accumulator @@ -495,6 +537,16 @@ func (h *Hasher) insertBlobChunks(blob []byte) { } } +// insertBlobChunksEmpty is analogous to insertBlobChunks, but where the input +// is all zeroes, so it's passed by length, not by content. This allows hashing +// zero pointers without allocating them first. +func (h *Hasher) insertBlobChunksEmpty(size int) { + for size > 0 { // will insert a full chunk for the last segment + h.insertChunk(hasherZeroChunk, 0) + size -= 32 + } +} + // descendLayer starts a new hashing layer, acting as a barrier to prevent the // chunks from being collapsed into previous pending ones. func (h *Hasher) descendLayer() { diff --git a/sizer.go b/sizer.go index e910d2c..9f30898 100644 --- a/sizer.go +++ b/sizer.go @@ -4,7 +4,9 @@ package ssz -import "github.com/prysmaticlabs/go-bitfield" +import ( + "github.com/prysmaticlabs/go-bitfield" +) // Sizer is an SSZ static and dynamic size computer. type Sizer struct { @@ -24,8 +26,13 @@ func SizeDynamicBytes(siz *Sizer, blobs []byte) uint32 { // SizeSliceOfBits returns the serialized size of the dynamic part of a slice of // bits. +// +// Note, a nil slice of bits is sized as an empty bit list. func SizeSliceOfBits(siz *Sizer, bits bitfield.Bitlist) uint32 { - return uint32(len(bits)) + if bits != nil { + return uint32(len(bits)) + } + return uint32(len(bitlistZero)) } // SizeSliceOfUint64s returns the serialized size of the dynamic part of a dynamic @@ -36,7 +43,12 @@ func SizeSliceOfUint64s[T ~uint64](siz *Sizer, ns []T) uint32 { // SizeDynamicObject returns the serialized size of the dynamic part of a dynamic // object. -func SizeDynamicObject[T DynamicObject](siz *Sizer, obj T) uint32 { +func SizeDynamicObject[T newableDynamicObject[U], U any](siz *Sizer, obj T) uint32 { + if obj == nil { + // If the object is nil, pull up it's zero value. This will be very slow, + // but it should not happen in production, only during tests mostly. + obj = zeroValueDynamic[T, U]() + } return obj.SizeSSZ(siz, false) } diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 476f06f..861c8d5 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -56,6 +56,12 @@ func TestConsensusSpecBasics(t *testing.T) { } func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind string) { + // Sanity check that the zero values can be handled before diving into the + // actual test datasets. This is mostly to catch implementation faults with + // uninitialized field handling. + t.Run(fmt.Sprintf("zero/%s", kind), func(t *testing.T) { + testZeroValue[T, U](t, ssz.ForkUnknown) + }) // Filter out the valid tests for this specific type path := filepath.Join(consensusSpecTestsBasicsRoot, "valid") @@ -286,6 +292,13 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, } // Some specific fork was requested, look that up explicitly for _, fork := range forks { + // Sanity check that the zero values can be handled before diving into the + // actual test datasets. This is mostly to catch implementation faults with + // uninitialized field handling. + t.Run(fmt.Sprintf("zero/%s/%s", fork, kind), func(t *testing.T) { + testZeroValue[T, U](t, ssz.ForkMapping[fork]) + }) + // Zero value on this specific fork ok, pull in the consensus dataset path := filepath.Join(consensusSpecTestsRoot, fork, "ssz_static", kind, "ssz_random") tests, err := os.ReadDir(path) diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go index f7b08d7..dce58a0 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go @@ -2,7 +2,9 @@ package consensus_spec_tests -import "github.com/karalabe/ssz" +import ( + "github.com/karalabe/ssz" +) // Cached static size computed on package init. var staticSizeCacheAttestation = ssz.PrecomputeStaticSizeCache((*Attestation)(nil)) diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go index 28c07c4..d746a4b 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -97,8 +97,8 @@ func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (26) - LatestExecutionPayloadHeader - 4 bytes } if codec.Fork() >= ssz.ForkCapella { - ssz.DefineUint64Ptr(codec, &obj.NextWithdrawalIndex) // Field (27) - NextWithdrawalIndex - 8 bytes - ssz.DefineUint64Ptr(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineUint64Pointer(codec, &obj.NextWithdrawalIndex) // Field (27) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64Pointer(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (29) - HistoricalSummaries - 4 bytes } // Define the dynamic data (fields) diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go index 6355c0f..6ca7b3f 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -40,11 +40,11 @@ func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes ssz.DefineStaticBytes(codec, &obj.TransactionsRoot) // Field (13) - TransactionsRoot - 32 bytes if codec.Fork() >= ssz.ForkCapella { - ssz.DefineStaticBytesPtr(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes + ssz.DefineStaticBytesPointer(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes } if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64Ptr(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64Ptr(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes + ssz.DefineUint64Pointer(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64Pointer(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } // Define the dynamic data (fields) ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index cb2b48b..19289a0 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -46,8 +46,8 @@ func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Withdrawals, 16) // Offset (14) - Withdrawals - 4 bytes } if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64Ptr(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64Ptr(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes + ssz.DefineUint64Pointer(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64Pointer(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes } // Define the dynamic data (fields) ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes diff --git a/tests/zeroval_test.go b/tests/zeroval_test.go new file mode 100644 index 0000000..0ee6c8c --- /dev/null +++ b/tests/zeroval_test.go @@ -0,0 +1,77 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package tests + +import ( + "bytes" + "reflect" + "testing" + + "github.com/karalabe/ssz" +) + +// testZeroValue does a bunch of encoding/decoding/hashing variations on the zero +// value of input types to check that the SSZ implementation can correctly handle +// the different uninitialized fields. +func testZeroValue[T newableObject[U], U any](t *testing.T, fork ssz.Fork) { + // Verify that streaming/buffering encoding of a zero value results in the + // same binary (maybe incorrect, we just want to see that they're the same). + str1 := new(bytes.Buffer) + if err := ssz.EncodeToStream(str1, T(new(U)), fork); err != nil { + t.Fatalf("failed to stream-encode zero-value object: %v", err) + } + bin1 := make([]byte, ssz.Size(T(new(U)), fork)) + if err := ssz.EncodeToBytes(bin1, T(new(U)), fork); err != nil { + t.Fatalf("failed to buffer-encode zero-value object: %v", err) + } + if !bytes.Equal(str1.Bytes(), bin1) { + t.Fatalf("zero-value encoding mismatch: stream %x, buffer %x", str1, bin1) + } + // Decode the previous encoding in both streaming/buffering mode and check + // that the produced objects are the same. + obj1 := T(new(U)) + if err := ssz.DecodeFromStream(bytes.NewReader(bin1), T(new(U)), uint32(len(bin1)), fork); err != nil { + t.Fatalf("failed to stream-decode zero-value object: %v", err) + } + obj2 := T(new(U)) + if err := ssz.DecodeFromBytes(bin1, T(new(U)), fork); err != nil { + t.Fatalf("failed to buffer-decode zero-value object: %v", err) + } + if !reflect.DeepEqual(obj1, obj2) { + t.Fatalf("zero-value decoding mismatch: stream %+v, buffer %+v", obj1, obj2) + } + // We can't compare the decoded zero-value to the true zero-values as pointer + // nil-ness might be different. To verify that the decoding was successful, do + // yet another round of encodings and check that to the original ones. + str2 := new(bytes.Buffer) + if err := ssz.EncodeToStream(str2, obj1, fork); err != nil { + t.Fatalf("failed to stream-encode decoded object: %v", err) + } + bin2 := make([]byte, ssz.Size(obj1, fork)) + if err := ssz.EncodeToBytes(bin2, obj1, fork); err != nil { + t.Fatalf("failed to buffer-encode decoded object: %v", err) + } + if !bytes.Equal(str2.Bytes(), bin2) { + t.Fatalf("re-encoding mismatch: stream %x, buffer %x", str2, bin2) + } + if !bytes.Equal(bin1, bin2) { + t.Fatalf("re-encoding mismatch: zero-value %x, decoded %x", bin1, bin2) + } + // Encoding/decoding seems to work, hash the zero-value and re-encoded value + // in both sequential/concurrent more and verify the results. + hashes := map[string][32]byte{ + "zero-value-sequential": ssz.HashSequential(T(new(U)), fork), + "zero-value-concurrent": ssz.HashConcurrent(T(new(U)), fork), + "decoded-sequential": ssz.HashSequential(obj1, fork), + "decoded-concurrent": ssz.HashSequential(obj1, fork), + } + for key1, hash1 := range hashes { + for key2, hash2 := range hashes { + if hash1 != hash2 { + t.Errorf("hash mismatch: %s %x, %s %x", key1, hash1, key2, hash2) + } + } + } +} diff --git a/zeroes.go b/zeroes.go new file mode 100644 index 0000000..09091a3 --- /dev/null +++ b/zeroes.go @@ -0,0 +1,46 @@ +// ssz: Go Simple Serialize (SSZ) codec library +// Copyright 2024 ssz Authors +// SPDX-License-Identifier: BSD-3-Clause + +package ssz + +import ( + "reflect" + "sync" +) + +// zeroCache contains zero-values for dynamic objects that got hit during codec +// operations. This is a global sync map, meaning it will be slow to access, but +// encoding/hashing zero values should not happen in production code, it's more +// of a sanity thing to handle weird corner-cases without blowing up. +var zeroCache = new(sync.Map) + +// zeroValueStatic retrieves a previously created (or creates one on the fly) +// zero value for a static object to support operating on half-initialized +// objects (useful for tests mainly, but can also avoid crashes in case of bad +// calling parameters). +func zeroValueStatic[T newableStaticObject[U], U any]() T { + kind := reflect.TypeFor[U]() + + if val, ok := zeroCache.Load(kind); ok { + return val.(T) + } + val := T(new(U)) + zeroCache.Store(kind, val) + return val +} + +// zeroValueDynamic retrieves a previously created (or creates one on the fly) +// zero value for a dynamic object to support operating on half-initialized +// objects (useful for tests mainly, but can also avoid crashes in case of bad +// calling parameters). +func zeroValueDynamic[T newableDynamicObject[U], U any]() T { + kind := reflect.TypeFor[U]() + + if val, ok := zeroCache.Load(kind); ok { + return val.(T) + } + val := T(new(U)) + zeroCache.Store(kind, val) + return val +} From 991387ea36c11b92e4787ecf17c66e794a738c30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Wed, 11 Sep 2024 19:15:38 +0300 Subject: [PATCH 09/12] ssz, tests: rework monolith API to support clearing pointer fields --- cmd/sszgen/gen.go | 57 ++--- codec.go | 231 ++++++++++++++++-- decoder.go | 215 ++++++++++++---- encoder.go | 207 ++++++++++++---- forks.go | 7 + hasher.go | 104 +++++++- .../gen_attestation_data_variation_1_ssz.go | 14 +- .../gen_attestation_data_variation_2_ssz.go | 14 +- .../gen_attestation_data_variation_3_ssz.go | 14 +- .../gen_attestation_ssz.go | 4 +- .../gen_attestation_variation_1_ssz.go | 10 +- .../gen_attestation_variation_2_ssz.go | 10 +- .../gen_attestation_variation_3_ssz.go | 11 +- .../gen_beacon_block_body_monolith_ssz.go | 55 ++--- .../gen_beacon_state_monolith_ssz.go | 101 ++++---- ...n_execution_payload_header_monolith_ssz.go | 56 +++-- .../gen_execution_payload_monolith_ssz.go | 63 ++--- .../consensus-spec-tests/types_monoliths.go | 16 +- .../consensus-spec-tests/types_variation.go | 12 +- 19 files changed, 827 insertions(+), 374 deletions(-) diff --git a/cmd/sszgen/gen.go b/cmd/sszgen/gen.go index 2e90303..a06ef34 100644 --- a/cmd/sszgen/gen.go +++ b/cmd/sszgen/gen.go @@ -12,6 +12,7 @@ import ( "io" "math" "sort" + "strings" ) const ( @@ -251,7 +252,7 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", dynForks[i]) } } - call := generateCall(dynOpsets[i].(*opsetDynamic).size, "sizer", "obj."+dynFields[i]) + call := generateCall(dynOpsets[i].(*opsetDynamic).size, "", "sizer", "obj."+dynFields[i]) fmt.Fprintf(&b, " size += ssz.%s\n", call) if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { fmt.Fprintf(&b, " }\n") @@ -290,7 +291,7 @@ func generateSizeSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " if sizer.Fork() >= ssz.Fork%s {\n", dynForks[i]) } } - call := generateCall(dynOpsets[i].(*opsetDynamic).size, "sizer", "obj."+dynFields[i]) + call := generateCall(dynOpsets[i].(*opsetDynamic).size, "", "sizer", "obj."+dynFields[i]) fmt.Fprintf(&b, " size += ssz.%s\n", call) if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { fmt.Fprintf(&b, " }\n") @@ -343,17 +344,10 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprint(&b, " // Define the static data (fields and dynamic offsets)\n") } for i := 0; i < len(typ.fields); i++ { - if typ.forks[i] != "" && (i == 0 || typ.forks[i] != typ.forks[i-1]) { - if typ.forks[i][0] == '!' { - fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", typ.forks[i][1:]) - } else { - fmt.Fprintf(&b, " if codec.Fork() >= ssz.Fork%s {\n", typ.forks[i]) - } - } field := typ.fields[i] switch opset := typ.opsets[i].(type) { case *opsetStatic: - call := generateCall(opset.define, "codec", "obj."+field, opset.bytes...) + call := generateCall(opset.define, typ.forks[i], "codec", "obj."+field, opset.bytes...) switch len(opset.bytes) { case 0: typ := typ.types[i].(*types.Pointer).Elem().(*types.Named) @@ -364,18 +358,12 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - %"+sizeRule+"d bytes\n", call, i, field, opset.bytes[0]*opset.bytes[1]) } case *opsetDynamic: - call := generateCall(opset.defineOffset, "codec", "obj."+field, opset.limits...) + call := generateCall(opset.defineOffset, typ.forks[i], "codec", "obj."+field, opset.limits...) fmt.Fprintf(&b, " ssz.%s // Offset ("+indexRule+") - "+nameRule+" - %"+sizeRule+"d bytes\n", call, i, field, offsetBytes) } - if typ.forks[i] != "" && (i == len(typ.forks)-1 || typ.forks[i] != typ.forks[i+1]) { - fmt.Fprintf(&b, " }\n") - } } if !typ.static { - if typ.forks[len(typ.forks)-1] == "" { - fmt.Fprint(&b, "\n") - } - fmt.Fprint(&b, " // Define the dynamic data (fields)\n") + fmt.Fprint(&b, "\n // Define the dynamic data (fields)\n") var ( dynIndices []int dynFields []string @@ -393,18 +381,8 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { for i := 0; i < len(dynFields); i++ { opset := (dynOpsets[i]).(*opsetDynamic) - if dynForks[i] != "" && (i == 0 || dynForks[i] != dynForks[i-1]) { - if dynForks[i][0] == '!' { - fmt.Fprintf(&b, " if codec.Fork() < ssz.Fork%s {\n", dynForks[i][1:]) - } else { - fmt.Fprintf(&b, " if codec.Fork() >= ssz.Fork%s {\n", dynForks[i]) - } - } - call := generateCall(opset.defineContent, "codec", "obj."+dynFields[i], opset.limits...) + call := generateCall(opset.defineContent, dynForks[i], "codec", "obj."+dynFields[i], opset.limits...) fmt.Fprintf(&b, " ssz.%s // Field ("+indexRule+") - "+nameRule+" - ? bytes\n", call, dynIndices[i], dynFields[i]) - if dynForks[i] != "" && (i == len(dynForks)-1 || dynForks[i] != dynForks[i+1]) { - fmt.Fprintf(&b, " }\n") - } } } fmt.Fprint(&b, "}\n") @@ -413,7 +391,8 @@ func generateDefineSSZ(ctx *genContext, typ *sszContainer) ([]byte, error) { // generateCall parses a Go template and fills it with the provided data. This // could be done more optimally, but we really don't care for a code generator. -func generateCall(tmpl string, recv string, field string, limits ...int) string { +func generateCall(tmpl string, fork string, recv string, field string, limits ...int) string { + // Generate the base call without taking forks into consideration t, err := template.New("").Parse(tmpl) if err != nil { panic(err) @@ -433,5 +412,21 @@ func generateCall(tmpl string, recv string, field string, limits ...int) string if err := t.Execute(buf, d); err != nil { panic(err) } - return string(buf.Bytes()) + call := string(buf.Bytes()) + + // If a fork filter was specified, inject it now into the call + if fork != "" { + // Mutate the call to the fork variant + call = strings.ReplaceAll(call, "(", "OnFork(") + + // Inject a fork filter as the last parameter + var filter string + if fork[0] == '!' { + filter = fmt.Sprintf("ssz.ForkFilter{Removed: ssz.Fork%s}", fork[1:]) + } else { + filter = fmt.Sprintf("ssz.ForkFilter{Added: ssz.Fork%s}", fork) + } + call = strings.ReplaceAll(call, ")", ","+filter+")") + } + return call } diff --git a/codec.go b/codec.go index 6e5bf63..2140e95 100644 --- a/codec.go +++ b/codec.go @@ -55,11 +55,6 @@ func (c *Codec) DefineHasher(impl func(has *Hasher)) { } } -// Fork retrieves the current fork (if any) that the codec is operating in. -func (c *Codec) Fork() Fork { - return c.fork -} - // DefineBool defines the next field as a 1 byte boolean. func DefineBool[T ~bool](c *Codec, v *T) { if c.enc != nil { @@ -125,17 +120,17 @@ func DefineUint64[T ~uint64](c *Codec, n *T) { HashUint64(c.has, *n) } -// DefineUint64Pointer defines the next field as a uint64. -func DefineUint64Pointer[T ~uint64](c *Codec, n **T) { +// DefineUint64PointerOnFork defines the next field as a uint64 if present in a fork. +func DefineUint64PointerOnFork[T ~uint64](c *Codec, n **T, filter ForkFilter) { if c.enc != nil { - EncodeUint64Pointer(c.enc, *n) + EncodeUint64PointerOnFork(c.enc, *n, filter) return } if c.dec != nil { - DecodeUint64Pointer(c.dec, n) + DecodeUint64PointerOnFork(c.dec, n, filter) return } - HashUint64Pointer(c.has, *n) + HashUint64PointerOnFork(c.has, *n, filter) } // DefineUint256 defines the next field as a uint256. @@ -178,18 +173,18 @@ func DefineStaticBytes[T commonBytesLengths](c *Codec, blob *T) { HashStaticBytes(c.has, blob) } -// DefineStaticBytesPointer defines the next field as static binary blob. This method -// can be used for byte arrays. -func DefineStaticBytesPointer[T commonBytesLengths](c *Codec, blob **T) { +// DefineStaticBytesPointerOnFork defines the next field as static binary blob if present +// in a fork. This method can be used for byte arrays. +func DefineStaticBytesPointerOnFork[T commonBytesLengths](c *Codec, blob **T, filter ForkFilter) { if c.enc != nil { - EncodeStaticBytesPointer(c.enc, *blob) + EncodeStaticBytesPointerOnFork(c.enc, *blob, filter) return } if c.dec != nil { - DecodeStaticBytesPointer(c.dec, blob) + DecodeStaticBytesPointerOnFork(c.dec, blob, filter) return } - HashStaticBytesPointer(c.has, *blob) + HashStaticBytesPointerOnFork(c.has, *blob, filter) } // DefineCheckedStaticBytes defines the next field as static binary blob. This @@ -220,6 +215,20 @@ func DefineDynamicBytesOffset(c *Codec, blob *[]byte, maxSize uint64) { HashDynamicBytes(c.has, *blob, maxSize) } +// DefineDynamicBytesOffsetOnFork defines the next field as dynamic binary blob +// if present in a fork. +func DefineDynamicBytesOffsetOnFork(c *Codec, blob *[]byte, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicBytesOffsetOnFork(c.enc, *blob, filter) + return + } + if c.dec != nil { + DecodeDynamicBytesOffsetOnFork(c.dec, blob, filter) + return + } + HashDynamicBytesOnFork(c.has, *blob, maxSize, filter) +} + // DefineDynamicBytesContent defines the next field as dynamic binary blob. func DefineDynamicBytesContent(c *Codec, blob *[]byte, maxSize uint64) { if c.enc != nil { @@ -233,6 +242,20 @@ func DefineDynamicBytesContent(c *Codec, blob *[]byte, maxSize uint64) { // No hashing, done at the offset position } +// DefineDynamicBytesContentOnFork defines the next field as dynamic binary blob +// if present in a fork. +func DefineDynamicBytesContentOnFork(c *Codec, blob *[]byte, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicBytesContentOnFork(c.enc, *blob, filter) + return + } + if c.dec != nil { + DecodeDynamicBytesContentOnFork(c.dec, blob, maxSize, filter) + return + } + // No hashing, done at the offset position +} + // DefineStaticObject defines the next field as a static ssz object. func DefineStaticObject[T newableStaticObject[U], U any](c *Codec, obj *T) { if c.enc != nil { @@ -246,6 +269,20 @@ func DefineStaticObject[T newableStaticObject[U], U any](c *Codec, obj *T) { HashStaticObject(c.has, *obj) } +// DefineStaticObjectOnFork defines the next field as a static ssz object if +// present in a fork. +func DefineStaticObjectOnFork[T newableStaticObject[U], U any](c *Codec, obj *T, filter ForkFilter) { + if c.enc != nil { + EncodeStaticObjectOnFork(c.enc, *obj, filter) + return + } + if c.dec != nil { + DecodeStaticObjectOnFork(c.dec, obj, filter) + return + } + HashStaticObjectOnFork(c.has, *obj, filter) +} + // DefineDynamicObjectOffset defines the next field as a dynamic ssz object. func DefineDynamicObjectOffset[T newableDynamicObject[U], U any](c *Codec, obj *T) { if c.enc != nil { @@ -259,6 +296,20 @@ func DefineDynamicObjectOffset[T newableDynamicObject[U], U any](c *Codec, obj * HashDynamicObject(c.has, *obj) } +// DefineDynamicObjectOffsetOnFork defines the next field as a dynamic ssz object +// if present in a fork. +func DefineDynamicObjectOffsetOnFork[T newableDynamicObject[U], U any](c *Codec, obj *T, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicObjectOffsetOnFork(c.enc, *obj, filter) + return + } + if c.dec != nil { + DecodeDynamicObjectOffsetOnFork(c.dec, obj, filter) + return + } + HashDynamicObjectOnFork(c.has, *obj, filter) +} + // DefineDynamicObjectContent defines the next field as a dynamic ssz object. func DefineDynamicObjectContent[T newableDynamicObject[U], U any](c *Codec, obj *T) { if c.enc != nil { @@ -272,6 +323,20 @@ func DefineDynamicObjectContent[T newableDynamicObject[U], U any](c *Codec, obj // No hashing, done at the offset position } +// DefineDynamicObjectContentOnFork defines the next field as a dynamic ssz object +// if present in a fork. +func DefineDynamicObjectContentOnFork[T newableDynamicObject[U], U any](c *Codec, obj *T, filter ForkFilter) { + if c.enc != nil { + EncodeDynamicObjectContentOnFork(c.enc, *obj, filter) + return + } + if c.dec != nil { + DecodeDynamicObjectContentOnFork(c.dec, obj, filter) + return + } + // No hashing, done at the offset position +} + // DefineArrayOfBits defines the next field as a static array of (packed) bits. func DefineArrayOfBits[T commonBitsLengths](c *Codec, bits *T, size uint64) { if c.enc != nil { @@ -337,6 +402,20 @@ func DefineSliceOfUint64sOffset[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { HashSliceOfUint64s(c.has, *ns, maxItems) } +// DefineSliceOfUint64sOffsetOnFork defines the next field as a dynamic slice of +// uint64s if present in fork. +func DefineSliceOfUint64sOffsetOnFork[T ~uint64](c *Codec, ns *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfUint64sOffsetOnFork(c.enc, *ns, filter) + return + } + if c.dec != nil { + DecodeSliceOfUint64sOffsetOnFork(c.dec, ns, filter) + return + } + HashSliceOfUint64sOnFork(c.has, *ns, maxItems, filter) +} + // DefineSliceOfUint64sContent defines the next field as a dynamic slice of uint64s. func DefineSliceOfUint64sContent[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { if c.enc != nil { @@ -350,6 +429,20 @@ func DefineSliceOfUint64sContent[T ~uint64](c *Codec, ns *[]T, maxItems uint64) // No hashing, done at the offset position } +// DefineSliceOfUint64sContentOnFork defines the next field as a dynamic slice of +// uint64s if present in a fork. +func DefineSliceOfUint64sContentOnFork[T ~uint64](c *Codec, ns *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfUint64sContentOnFork(c.enc, *ns, filter) + return + } + if c.dec != nil { + DecodeSliceOfUint64sContentOnFork(c.dec, ns, maxItems, filter) + return + } + // No hashing, done at the offset position +} + // DefineArrayOfStaticBytes defines the next field as a static array of static // binary blobs. func DefineArrayOfStaticBytes[T commonBytesArrayLengths[U], U commonBytesLengths](c *Codec, blobs *T) { @@ -395,8 +488,8 @@ func DefineCheckedArrayOfStaticBytes[T commonBytesLengths](c *Codec, blobs *[]T, HashCheckedArrayOfStaticBytes(c.has, *blobs) } -// DefineSliceOfStaticBytesOffset defines the next field as a dynamic slice of static -// binary blobs. +// DefineSliceOfStaticBytesOffset defines the next field as a dynamic slice of +// static binary blobs. func DefineSliceOfStaticBytesOffset[T commonBytesLengths](c *Codec, bytes *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfStaticBytesOffset(c.enc, *bytes) @@ -409,6 +502,20 @@ func DefineSliceOfStaticBytesOffset[T commonBytesLengths](c *Codec, bytes *[]T, HashSliceOfStaticBytes(c.has, *bytes, maxItems) } +// DefineSliceOfStaticBytesOffsetOnFork defines the next field as a dynamic slice +// of static binary blobs if present in a fork. +func DefineSliceOfStaticBytesOffsetOnFork[T commonBytesLengths](c *Codec, bytes *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticBytesOffsetOnFork(c.enc, *bytes, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticBytesOffsetOnFork(c.dec, bytes, filter) + return + } + HashSliceOfStaticBytesOnFork(c.has, *bytes, maxItems, filter) +} + // DefineSliceOfStaticBytesContent defines the next field as a dynamic slice of static // binary blobs. func DefineSliceOfStaticBytesContent[T commonBytesLengths](c *Codec, blobs *[]T, maxItems uint64) { @@ -423,6 +530,20 @@ func DefineSliceOfStaticBytesContent[T commonBytesLengths](c *Codec, blobs *[]T, // No hashing, done at the offset position } +// DefineSliceOfStaticBytesContentOnFork defines the next field as a dynamic slice +// of static binary blobs if present in a fork. +func DefineSliceOfStaticBytesContentOnFork[T commonBytesLengths](c *Codec, blobs *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticBytesContentOnFork(c.enc, *blobs, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticBytesContentOnFork(c.dec, blobs, maxItems, filter) + return + } + // No hashing, done at the offset position +} + // DefineSliceOfDynamicBytesOffset defines the next field as a dynamic slice of dynamic // binary blobs. func DefineSliceOfDynamicBytesOffset(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64) { @@ -437,8 +558,8 @@ func DefineSliceOfDynamicBytesOffset(c *Codec, blobs *[][]byte, maxItems uint64, HashSliceOfDynamicBytes(c.has, *blobs, maxItems, maxSize) } -// DefineSliceOfDynamicBytesContent defines the next field as a dynamic slice of dynamic -// binary blobs. +// DefineSliceOfDynamicBytesContent defines the next field as a dynamic slice of +// dynamic binary blobs. func DefineSliceOfDynamicBytesContent(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64) { if c.enc != nil { EncodeSliceOfDynamicBytesContent(c.enc, *blobs) @@ -451,8 +572,8 @@ func DefineSliceOfDynamicBytesContent(c *Codec, blobs *[][]byte, maxItems uint64 // No hashing, done at the offset position } -// DefineSliceOfStaticObjectsOffset defines the next field as a dynamic slice of static -// ssz objects. +// DefineSliceOfStaticObjectsOffset defines the next field as a dynamic slice of +// static ssz objects. func DefineSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfStaticObjectsOffset(c.enc, *objects) @@ -465,6 +586,20 @@ func DefineSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](c *Codec, HashSliceOfStaticObjects(c.has, *objects, maxItems) } +// DefineSliceOfStaticObjectsOffsetOnFork defines the next field as a dynamic +// slice of static ssz objects if present in a fork. +func DefineSliceOfStaticObjectsOffsetOnFork[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticObjectsOffsetOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticObjectsOffsetOnFork(c.dec, objects, filter) + return + } + HashSliceOfStaticObjectsOnFork(c.has, *objects, maxItems, filter) +} + // DefineSliceOfStaticObjectsContent defines the next field as a dynamic slice of static // ssz objects. func DefineSliceOfStaticObjectsContent[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { @@ -476,11 +611,25 @@ func DefineSliceOfStaticObjectsContent[T newableStaticObject[U], U any](c *Codec DecodeSliceOfStaticObjectsContent(c.dec, objects, maxItems) return } - // No hashing, done at the offset posiiton + // No hashing, done at the offset position } -// DefineSliceOfDynamicObjectsOffset defines the next field as a dynamic slice of dynamic -// ssz objects. +// DefineSliceOfStaticObjectsContentOnFork defines the next field as a dynamic +// slice of static ssz objects if present in a fork. +func DefineSliceOfStaticObjectsContentOnFork[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfStaticObjectsContentOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfStaticObjectsContentOnFork(c.dec, objects, maxItems, filter) + return + } + // No hashing, done at the offset position +} + +// DefineSliceOfDynamicObjectsOffset defines the next field as a dynamic slice of +// dynamic ssz objects. func DefineSliceOfDynamicObjectsOffset[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfDynamicObjectsOffset(c.enc, *objects) @@ -493,8 +642,22 @@ func DefineSliceOfDynamicObjectsOffset[T newableDynamicObject[U], U any](c *Code HashSliceOfDynamicObjects(c.has, *objects, maxItems) } -// DefineSliceOfDynamicObjectsContent defines the next field as a dynamic slice of dynamic -// ssz objects. +// DefineSliceOfDynamicObjectsOffsetOnFork defines the next field as a dynamic +// slice of dynamic ssz objects if present in a fork. +func DefineSliceOfDynamicObjectsOffsetOnFork[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicObjectsOffsetOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicObjectsOffsetOnFork(c.dec, objects, filter) + return + } + HashSliceOfDynamicObjectsOnFork(c.has, *objects, maxItems, filter) +} + +// DefineSliceOfDynamicObjectsContent defines the next field as a dynamic slice +// of dynamic ssz objects. func DefineSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { if c.enc != nil { EncodeSliceOfDynamicObjectsContent(c.enc, *objects) @@ -506,3 +669,17 @@ func DefineSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](c *Cod } // No hashing, done at the offset position } + +// DefineSliceOfDynamicObjectsContentOnFork defines the next field as a dynamic +// slice of dynamic ssz objects if present in a fork. +func DefineSliceOfDynamicObjectsContentOnFork[T newableDynamicObject[U], U any](c *Codec, objects *[]T, maxItems uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicObjectsContentOnFork(c.enc, *objects, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicObjectsContentOnFork(c.dec, objects, maxItems, filter) + return + } + // No hashing, done at the offset position +} diff --git a/decoder.go b/decoder.go index 0134c34..bb87f62 100644 --- a/decoder.go +++ b/decoder.go @@ -74,11 +74,6 @@ type Decoder struct { sizess [][]uint32 // Stack of computed sizes from outer calls } -// Fork retrieves the current fork (if any) that the decoder is operating in. -func (dec *Decoder) Fork() Fork { - return dec.codec.fork -} - // DecodeBool parses a boolean. func DecodeBool[T ~bool](dec *Decoder, v *T) { if dec.err != nil { @@ -191,35 +186,22 @@ func DecodeUint64[T ~uint64](dec *Decoder, n *T) { } } -// DecodeUint64Pointer parses a uint64. +// DecodeUint64PointerOnFork parses a uint64 if present in a fork. If not, the +// uint64 pointer is set to nil. // // This method is similar to DecodeUint64, but will also initialize the pointer // if it is not allocated yet. -func DecodeUint64Pointer[T ~uint64](dec *Decoder, n **T) { - if dec.err != nil { +func DecodeUint64PointerOnFork[T ~uint64](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil return } - if dec.inReader != nil { - _, dec.err = io.ReadFull(dec.inReader, dec.buf[:8]) - if dec.err != nil { - return - } - if *n == nil { - *n = new(T) - } - *(*n) = T(binary.LittleEndian.Uint64(dec.buf[:8])) - dec.inRead += 8 - } else { - if len(dec.inBuffer) < 8 { - dec.err = io.ErrUnexpectedEOF - return - } - if *n == nil { - *n = new(T) - } - *(*n) = T(binary.LittleEndian.Uint64(dec.inBuffer)) - dec.inBuffer = dec.inBuffer[8:] + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) } + DecodeUint64(dec, *n) } // DecodeUint256 parses a uint256. @@ -298,35 +280,22 @@ func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { } } -// DecodeStaticBytesPointer parses a static binary blob. +// DecodeStaticBytesPointerOnFork parses a static binary blob if present in a fork. +// If not, the bytes are set to nil. // // This method is similar to DecodeStaticBytes, but will also initialize the // pointer if it is not allocated yet. -func DecodeStaticBytesPointer[T commonBytesLengths](dec *Decoder, blob **T) { - if dec.err != nil { +func DecodeStaticBytesPointerOnFork[T commonBytesLengths](dec *Decoder, blob **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blob = nil return } - if dec.inReader != nil { - if *blob == nil { - *blob = new(T) - } - // The code below should have used `**blob[:]`, alas Go's generics compiler - // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 - _, dec.err = io.ReadFull(dec.inReader, unsafe.Slice(&(*(*blob))[0], len(*(*blob)))) - dec.inRead += uint32(len(*(*blob))) - } else { - if *blob == nil { - *blob = new(T) - } - if len(dec.inBuffer) < len(*(*blob)) { - dec.err = io.ErrUnexpectedEOF - return - } - // The code below should have used `**blob[:]`, alas Go's generics compiler - // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 - copy(unsafe.Slice(&(*(*blob))[0], len(*(*blob))), dec.inBuffer) - dec.inBuffer = dec.inBuffer[len(*(*blob)):] + // Otherwise fall back to the standard decoder + if *blob == nil { + *blob = new(T) } + DecodeStaticBytes(dec, *blob) } // DecodeCheckedStaticBytes parses a static binary blob. @@ -353,11 +322,22 @@ func DecodeCheckedStaticBytes(dec *Decoder, blob *[]byte, size uint64) { } } -// DecodeDynamicBytesOffset parses a dynamic binary blob. +// DecodeDynamicBytesOffset parses the offset of a dynamic binary blob. func DecodeDynamicBytesOffset(dec *Decoder, blob *[]byte) { dec.decodeOffset(false) } +// DecodeDynamicBytesOffsetOnFork parses the offset of dynamic binary blob if +// present in a fork. +func DecodeDynamicBytesOffsetOnFork(dec *Decoder, blob *[]byte, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicBytesOffset(dec, blob) +} + // DecodeDynamicBytesContent is the lazy data reader of DecodeDynamicBytesOffset. func DecodeDynamicBytesContent(dec *Decoder, blob *[]byte, maxSize uint64) { if dec.err != nil { @@ -391,6 +371,17 @@ func DecodeDynamicBytesContent(dec *Decoder, blob *[]byte, maxSize uint64) { } } +// DecodeDynamicBytesContentOnFork is the lazy data reader of DecodeDynamicBytesOffsetOnFork. +func DecodeDynamicBytesContentOnFork(dec *Decoder, blob *[]byte, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blob = nil + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicBytesContent(dec, blob, maxSize) +} + // DecodeStaticObject parses a static ssz object. func DecodeStaticObject[T newableStaticObject[U], U any](dec *Decoder, obj *T) { if dec.err != nil { @@ -402,11 +393,32 @@ func DecodeStaticObject[T newableStaticObject[U], U any](dec *Decoder, obj *T) { (*obj).DefineSSZ(dec.codec) } +// DecodeStaticObjectOnFork parses a static ssz object if present in a fork. +func DecodeStaticObjectOnFork[T newableStaticObject[U], U any](dec *Decoder, obj *T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *obj = nil + return + } + // Otherwise fall back to the standard decoder + DecodeStaticObject(dec, obj) +} + // DecodeDynamicObjectOffset parses a dynamic ssz object. func DecodeDynamicObjectOffset[T newableDynamicObject[U], U any](dec *Decoder, obj *T) { dec.decodeOffset(false) } +// DecodeDynamicObjectOffsetOnFork parses a dynamic ssz object if present in a fork. +func DecodeDynamicObjectOffsetOnFork[T newableDynamicObject[U], U any](dec *Decoder, obj *T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicObjectOffset(dec, obj) +} + // DecodeDynamicObjectContent is the lazy data reader of DecodeDynamicObjectOffset. func DecodeDynamicObjectContent[T newableDynamicObject[U], U any](dec *Decoder, obj *T) { if dec.err != nil { @@ -427,6 +439,17 @@ func DecodeDynamicObjectContent[T newableDynamicObject[U], U any](dec *Decoder, dec.flushDynamics() } +// DecodeDynamicObjectContentOnFork is the lazy data reader of DecodeDynamicObjectOffsetOnFork. +func DecodeDynamicObjectContentOnFork[T newableDynamicObject[U], U any](dec *Decoder, obj *T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *obj = nil + return + } + // Otherwise fall back to the standard decoder + DecodeDynamicObjectContent(dec, obj) +} + // DecodeArrayOfBits parses a static array of (packed) bits. func DecodeArrayOfBits[T commonBitsLengths](dec *Decoder, bits *T, size uint64) { if dec.err != nil { @@ -547,6 +570,17 @@ func DecodeSliceOfUint64sOffset[T ~uint64](dec *Decoder, ns *[]T) { dec.decodeOffset(false) } +// DecodeSliceOfUint64sOffsetOnFork parses a dynamic slice of uint64s if present +// in a fork. +func DecodeSliceOfUint64sOffsetOnFork[T ~uint64](dec *Decoder, ns *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfUint64sOffset(dec, ns) +} + // DecodeSliceOfUint64sContent is the lazy data reader of DecodeSliceOfUint64sOffset. func DecodeSliceOfUint64sContent[T ~uint64](dec *Decoder, ns *[]T, maxItems uint64) { if dec.err != nil { @@ -596,6 +630,17 @@ func DecodeSliceOfUint64sContent[T ~uint64](dec *Decoder, ns *[]T, maxItems uint } } +// DecodeSliceOfUint64sContentOnFork is the lazy data reader of DecodeSliceOfUint64sOffsetOnFork. +func DecodeSliceOfUint64sContentOnFork[T ~uint64](dec *Decoder, ns *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *ns = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfUint64sContent(dec, ns, maxItems) +} + // DecodeArrayOfStaticBytes parses a static array of static binary blobs. func DecodeArrayOfStaticBytes[T commonBytesArrayLengths[U], U commonBytesLengths](dec *Decoder, blobs *T) { // The code below should have used `(*blobs)[:]`, alas Go's generics compiler @@ -672,6 +717,17 @@ func DecodeSliceOfStaticBytesOffset[T commonBytesLengths](dec *Decoder, blobs *[ dec.decodeOffset(false) } +// DecodeSliceOfStaticBytesOffsetOnFork parses a dynamic slice of static binary +// blobs if present in a fork. +func DecodeSliceOfStaticBytesOffsetOnFork[T commonBytesLengths](dec *Decoder, blobs *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticBytesOffset(dec, blobs) +} + // DecodeSliceOfStaticBytesContent is the lazy data reader of DecodeSliceOfStaticBytesOffset. func DecodeSliceOfStaticBytesContent[T commonBytesLengths](dec *Decoder, blobs *[]T, maxItems uint64) { if dec.err != nil { @@ -731,6 +787,17 @@ func DecodeSliceOfStaticBytesContent[T commonBytesLengths](dec *Decoder, blobs * } } +// DecodeSliceOfStaticBytesContentOnFork is the lazy data reader of DecodeSliceOfStaticBytesOffsetOnFork. +func DecodeSliceOfStaticBytesContentOnFork[T commonBytesLengths](dec *Decoder, blobs *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blobs = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticBytesContent(dec, blobs, maxItems) +} + // DecodeSliceOfDynamicBytesOffset parses a dynamic slice of dynamic binary blobs. func DecodeSliceOfDynamicBytesOffset(dec *Decoder, blobs *[][]byte) { dec.decodeOffset(false) @@ -796,6 +863,17 @@ func DecodeSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](dec *Deco dec.decodeOffset(false) } +// DecodeSliceOfStaticObjectsOffsetOnFork parses a dynamic slice of static ssz +// objects if present in a fork. +func DecodeSliceOfStaticObjectsOffsetOnFork[T newableStaticObject[U], U any](dec *Decoder, objects *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticObjectsOffset(dec, objects) +} + // DecodeSliceOfStaticObjectsContent is the lazy data reader of DecodeSliceOfStaticObjectsOffset. func DecodeSliceOfStaticObjectsContent[T newableStaticObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64) { if dec.err != nil { @@ -842,11 +920,33 @@ func DecodeSliceOfStaticObjectsContent[T newableStaticObject[U], U any](dec *Dec } } +// DecodeSliceOfStaticObjectsContentOnFork is the lazy data reader of DecodeSliceOfStaticObjectsOffsetOnFork. +func DecodeSliceOfStaticObjectsContentOnFork[T newableStaticObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *objects = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfStaticObjectsContent(dec, objects, maxItems) +} + // DecodeSliceOfDynamicObjectsOffset parses a dynamic slice of dynamic ssz objects. func DecodeSliceOfDynamicObjectsOffset[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T) { dec.decodeOffset(false) } +// DecodeSliceOfDynamicObjectsOffsetOnFork parses a dynamic slice of dynamic ssz +// objects if present in a fork. +func DecodeSliceOfDynamicObjectsOffsetOnFork[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicObjectsOffset(dec, objects) +} + // DecodeSliceOfDynamicObjectsContent is the lazy data reader of DecodeSliceOfDynamicObjectsOffset. func DecodeSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64) { if dec.err != nil { @@ -903,6 +1003,17 @@ func DecodeSliceOfDynamicObjectsContent[T newableDynamicObject[U], U any](dec *D } } +// DecodeSliceOfDynamicObjectsContentOnFork is the lazy data reader of DecodeSliceOfDynamicObjectsOffsetOnFork. +func DecodeSliceOfDynamicObjectsContentOnFork[T newableDynamicObject[U], U any](dec *Decoder, objects *[]T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *objects = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicObjectsContent(dec, objects, maxItems) +} + // decodeOffset decodes the next uint32 as an offset and validates it. func (dec *Decoder) decodeOffset(list bool) { if dec.err != nil { diff --git a/encoder.go b/encoder.go index f9ca8d0..a57c1f6 100644 --- a/encoder.go +++ b/encoder.go @@ -81,11 +81,6 @@ type Encoder struct { offset uint32 // Offset tracker for dynamic fields } -// Fork retrieves the current fork (if any) that the encoder is operating in. -func (enc *Encoder) Fork() Fork { - return enc.codec.fork -} - // EncodeBool serializes a boolean. func EncodeBool[T ~bool](enc *Encoder, v T) { if enc.outWriter != nil { @@ -163,28 +158,20 @@ func EncodeUint64[T ~uint64](enc *Encoder, n T) { } } -// EncodeUint64Pointer serializes a uint64. +// EncodeUint64PointerOnFork serializes a uint64 if present in a fork. // // Note, a nil pointer is serialized as zero. -func EncodeUint64Pointer[T ~uint64](enc *Encoder, n *T) { - if enc.outWriter != nil { - if enc.err != nil { - return - } - if n != nil { - binary.LittleEndian.PutUint64(enc.buf[:8], (uint64)(*n)) - _, enc.err = enc.outWriter.Write(enc.buf[:8]) - } else { - _, enc.err = enc.outWriter.Write(uint256Zero[:8]) - } - } else { - if n != nil { - binary.LittleEndian.PutUint64(enc.outBuffer, (uint64)(*n)) - } else { - copy(enc.outBuffer, uint256Zero[:8]) - } - enc.outBuffer = enc.outBuffer[8:] +func EncodeUint64PointerOnFork[T ~uint64](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint64[uint64](enc, 0) + return } + EncodeUint64(enc, *n) } // EncodeUint256 serializes a uint256. @@ -258,29 +245,21 @@ func EncodeStaticBytes[T commonBytesLengths](enc *Encoder, blob *T) { } } -// EncodeStaticBytesPointer serializes a static binary blob. +// EncodeStaticBytesPointerOnFork serializes a static binary blob if present in +// a fork. // // Note, a nil pointer is serialized as a zero-value blob. -func EncodeStaticBytesPointer[T commonBytesLengths](enc *Encoder, blob *T) { - // If the blob is nil, write a batch of zeroes and exit +func EncodeStaticBytesPointerOnFork[T commonBytesLengths](enc *Encoder, blob *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder if blob == nil { enc.encodeZeroes(reflect.TypeFor[T]().Len()) return } - // Blob not nil, write the actual data content - if enc.outWriter != nil { - if enc.err != nil { - return - } - // The code below should have used `*blob[:]`, alas Go's generics compiler - // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 - _, enc.err = enc.outWriter.Write(unsafe.Slice(&(*blob)[0], len(*blob))) - } else { - // The code below should have used `blob[:]`, alas Go's generics compiler - // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 - copy(enc.outBuffer, unsafe.Slice(&(*blob)[0], len(*blob))) - enc.outBuffer = enc.outBuffer[len(*blob):] - } + EncodeStaticBytes(enc, blob) } // EncodeCheckedStaticBytes serializes a static binary blob. @@ -317,6 +296,17 @@ func EncodeDynamicBytesOffset(enc *Encoder, blob []byte) { enc.offset += uint32(len(blob)) } +// EncodeDynamicBytesOffsetOnFork serializes a dynamic binary blob if present in +// a fork. +func EncodeDynamicBytesOffsetOnFork(enc *Encoder, blob []byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicBytesOffset(enc, blob) +} + // EncodeDynamicBytesContent is the lazy data writer for EncodeDynamicBytesOffset. func EncodeDynamicBytesContent(enc *Encoder, blob []byte) { if enc.outWriter != nil { @@ -330,7 +320,19 @@ func EncodeDynamicBytesContent(enc *Encoder, blob []byte) { } } +// EncodeDynamicBytesContentOnFork is the lazy data writer for EncodeDynamicBytesOffsetOnFork. +func EncodeDynamicBytesContentOnFork(enc *Encoder, blob []byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicBytesContent(enc, blob) +} + // EncodeStaticObject serializes a static ssz object. +// +// Note, nil will be encoded as a zero-value initialized object. func EncodeStaticObject[T newableStaticObject[U], U any](enc *Encoder, obj T) { if enc.err != nil { return @@ -343,6 +345,18 @@ func EncodeStaticObject[T newableStaticObject[U], U any](enc *Encoder, obj T) { obj.DefineSSZ(enc.codec) } +// EncodeStaticObjectOnFork serializes a static ssz object is present in a fork. +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeStaticObjectOnFork[T newableStaticObject[U], U any](enc *Encoder, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeStaticObject(enc, obj) +} + // EncodeDynamicObjectOffset serializes a dynamic ssz object. // // Note, nil will be encoded as a zero-value initialized object. @@ -365,6 +379,19 @@ func EncodeDynamicObjectOffset[T newableDynamicObject[U], U any](enc *Encoder, o enc.offset += obj.SizeSSZ(enc.sizer, false) } +// EncodeDynamicObjectOffsetOnFork serializes a dynamic ssz object if present in +// a fork. +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectOffsetOnFork[T newableDynamicObject[U], U any](enc *Encoder, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicObjectOffset(enc, obj) +} + // EncodeDynamicObjectContent is the lazy data writer for EncodeDynamicObjectOffset. // // Note, nil will be encoded as a zero-value initialized object. @@ -381,6 +408,18 @@ func EncodeDynamicObjectContent[T newableDynamicObject[U], U any](enc *Encoder, obj.DefineSSZ(enc.codec) } +// EncodeDynamicObjectContentOnFork is the lazy data writer for EncodeDynamicObjectOffsetOnFork. +// +// Note, nil will be encoded as a zero-value initialized object. +func EncodeDynamicObjectContentOnFork[T newableDynamicObject[U], U any](enc *Encoder, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeDynamicObjectContent(enc, obj) +} + // EncodeArrayOfBits serializes a static array of (packed) bits. func EncodeArrayOfBits[T commonBitsLengths](enc *Encoder, bits *T) { if enc.outWriter != nil { @@ -489,6 +528,17 @@ func EncodeSliceOfUint64sOffset[T ~uint64](enc *Encoder, ns []T) { } } +// EncodeSliceOfUint64sOffsetOnFork serializes a dynamic slice of uint64s if +// present in a fork. +func EncodeSliceOfUint64sOffsetOnFork[T ~uint64](enc *Encoder, ns []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfUint64sOffset(enc, ns) +} + // EncodeSliceOfUint64sContent is the lazy data writer for EncodeSliceOfUint64sOffset. func EncodeSliceOfUint64sContent[T ~uint64](enc *Encoder, ns []T) { if enc.outWriter != nil { @@ -507,6 +557,16 @@ func EncodeSliceOfUint64sContent[T ~uint64](enc *Encoder, ns []T) { } } +// EncodeSliceOfUint64sContentOnFork is the lazy data writer for EncodeSliceOfUint64sOffsetOnFork. +func EncodeSliceOfUint64sContentOnFork[T ~uint64](enc *Encoder, ns []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfUint64sContent(enc, ns) +} + // EncodeArrayOfStaticBytes serializes a static array of static binary // blobs. // @@ -591,6 +651,16 @@ func EncodeSliceOfStaticBytesOffset[T commonBytesLengths](enc *Encoder, blobs [] } } +// EncodeSliceOfStaticBytesOffsetOnFork serializes a dynamic slice of static binary blobs. +func EncodeSliceOfStaticBytesOffsetOnFork[T commonBytesLengths](enc *Encoder, blobs []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticBytesOffset(enc, blobs) +} + // EncodeSliceOfStaticBytesContent is the lazy data writer for EncodeSliceOfStaticBytesOffset. func EncodeSliceOfStaticBytesContent[T commonBytesLengths](enc *Encoder, blobs []T) { // Internally this method is essentially calling EncodeStaticBytes on all @@ -615,6 +685,16 @@ func EncodeSliceOfStaticBytesContent[T commonBytesLengths](enc *Encoder, blobs [ } } +// EncodeSliceOfStaticBytesContentOnFork is the lazy data writer for EncodeSliceOfStaticBytesOffsetOnFork. +func EncodeSliceOfStaticBytesContentOnFork[T commonBytesLengths](enc *Encoder, blobs []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticBytesContent(enc, blobs) +} + // EncodeSliceOfDynamicBytesOffset serializes a dynamic slice of dynamic binary blobs. func EncodeSliceOfDynamicBytesOffset(enc *Encoder, blobs [][]byte) { if enc.outWriter != nil { @@ -697,6 +777,17 @@ func EncodeSliceOfStaticObjectsOffset[T StaticObject](enc *Encoder, objects []T) } } +// EncodeSliceOfStaticObjectsOffsetOnFork serializes a dynamic slice of static ssz +// objects if present in a fork. +func EncodeSliceOfStaticObjectsOffsetOnFork[T StaticObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticObjectsOffset(enc, objects) +} + // EncodeSliceOfStaticObjectsContent is the lazy data writer for EncodeSliceOfStaticObjectsOffset. func EncodeSliceOfStaticObjectsContent[T StaticObject](enc *Encoder, objects []T) { for _, obj := range objects { @@ -707,7 +798,18 @@ func EncodeSliceOfStaticObjectsContent[T StaticObject](enc *Encoder, objects []T } } -// EncodeSliceOfDynamicObjectsOffset serializes a dynamic slice of dynamic ssz objects. +// EncodeSliceOfStaticObjectsContentOnFork is the lazy data writer for EncodeSliceOfStaticObjectsOffsetOnFork. +func EncodeSliceOfStaticObjectsContentOnFork[T StaticObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfStaticObjectsContent(enc, objects) +} + +// EncodeSliceOfDynamicObjectsOffset serializes a dynamic slice of dynamic ssz +// objects. func EncodeSliceOfDynamicObjectsOffset[T DynamicObject](enc *Encoder, objects []T) { if enc.outWriter != nil { if enc.err != nil { @@ -724,6 +826,17 @@ func EncodeSliceOfDynamicObjectsOffset[T DynamicObject](enc *Encoder, objects [] } } +// EncodeSliceOfDynamicObjectsOffsetOnFork serializes a dynamic slice of dynamic +// ssz objects if present in a fork. +func EncodeSliceOfDynamicObjectsOffsetOnFork[T DynamicObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicObjectsOffset(enc, objects) +} + // EncodeSliceOfDynamicObjectsContent is the lazy data writer for EncodeSliceOfDynamicObjectsOffset. func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects []T) { enc.offsetDynamics(uint32(4 * len(objects))) @@ -765,6 +878,16 @@ func EncodeSliceOfDynamicObjectsContent[T DynamicObject](enc *Encoder, objects [ } } +// EncodeSliceOfDynamicObjectsContentOnFork is the lazy data writer for EncodeSliceOfDynamicObjectsOffsetOnFork. +func EncodeSliceOfDynamicObjectsContentOnFork[T DynamicObject](enc *Encoder, objects []T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicObjectsContent(enc, objects) +} + // offsetDynamics marks the item being encoded as a dynamic type, setting the starting // offset for the dynamic fields. func (enc *Encoder) offsetDynamics(offset uint32) { diff --git a/forks.go b/forks.go index 3f40807..47b6566 100644 --- a/forks.go +++ b/forks.go @@ -79,3 +79,10 @@ var ForkMapping = map[string]Fork{ "electra": ForkElectra, "future": ForkFuture, } + +// ForkFilter can be used by the XXXOnFork methods inside monolithic types to +// define certain fields appearing only in certain forks. +type ForkFilter struct { + Added Fork + Removed Fork +} diff --git a/hasher.go b/hasher.go index 56a8f36..15616a9 100644 --- a/hasher.go +++ b/hasher.go @@ -110,15 +110,20 @@ func HashUint64[T ~uint64](h *Hasher, n T) { h.insertChunk(buffer, 0) } -// HashUint64Pointer hashes a uint64. +// HashUint64PointerOnFork hashes a uint64 if present in a fork. // // Note, a nil pointer is hashed as zero. -func HashUint64Pointer[T ~uint64](h *Hasher, n *T) { - var buffer [32]byte - if n != nil { - binary.LittleEndian.PutUint64(buffer[:], uint64(*n)) +func HashUint64PointerOnFork[T ~uint64](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return } - h.insertChunk(buffer, 0) + // Otherwise fall back to the standard hasher + if n == nil { + HashUint64[uint64](h, 0) + return + } + HashUint64(h, *n) } // HashUint256 hashes a uint256. @@ -155,11 +160,15 @@ func HashStaticBytes[T commonBytesLengths](h *Hasher, blob *T) { h.hashBytes(unsafe.Slice(&(*blob)[0], len(*blob))) } -// HashStaticBytesPointer hashes a static binary blob. +// HashStaticBytesPointerOnFork hashes a static binary blob if present in a fork. // // Note, a nil pointer is hashed as an empty binary blob. -func HashStaticBytesPointer[T commonBytesLengths](h *Hasher, blob *T) { - // If the pointer is nil, hash as an empty blob +func HashStaticBytesPointerOnFork[T commonBytesLengths](h *Hasher, blob *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher if blob == nil { // Go generics cannot do len(T{}), so we either allocate and bear the GC // costs, or we use reflect. Both is kind of crappy. @@ -168,9 +177,7 @@ func HashStaticBytesPointer[T commonBytesLengths](h *Hasher, blob *T) { h.hashBytesEmpty(reflect.TypeOf(blob).Elem().Len()) return } - // The code below should have used `blob[:]`, alas Go's generics compiler - // is missing that (i.e. a bug): https://github.com/golang/go/issues/51740 - h.hashBytes(unsafe.Slice(&(*blob)[0], len(*blob))) + HashStaticBytes(h, blob) } // HashCheckedStaticBytes hashes a static binary blob. @@ -185,6 +192,16 @@ func HashDynamicBytes(h *Hasher, blob []byte, maxSize uint64) { h.ascendMixinLayer(uint64(len(blob)), (maxSize+31)/32) } +// HashDynamicBytesOnFork hashes a dynamic binary blob if present in a fork. +func HashDynamicBytesOnFork(h *Hasher, blob []byte, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashDynamicBytes(h, blob, maxSize) +} + // HashStaticObject hashes a static ssz object. func HashStaticObject[T newableStaticObject[U], U any](h *Hasher, obj T) { h.descendLayer() @@ -197,6 +214,16 @@ func HashStaticObject[T newableStaticObject[U], U any](h *Hasher, obj T) { h.ascendLayer(0) } +// HashStaticObjectOnFork hashes a static ssz object if present in a fork. +func HashStaticObjectOnFork[T newableStaticObject[U], U any](h *Hasher, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashStaticObject(h, obj) +} + // HashDynamicObject hashes a dynamic ssz object. func HashDynamicObject[T newableDynamicObject[U], U any](h *Hasher, obj T) { h.descendLayer() @@ -209,6 +236,16 @@ func HashDynamicObject[T newableDynamicObject[U], U any](h *Hasher, obj T) { h.ascendLayer(0) } +// HashDynamicObjectOnFork hashes a dynamic ssz object if present in a fork. +func HashDynamicObjectOnFork[T newableDynamicObject[U], U any](h *Hasher, obj T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashDynamicObject(h, obj) +} + // HashArrayOfBits hashes a static array of (packed) bits. func HashArrayOfBits[T commonBitsLengths](h *Hasher, bits *T) { // The code below should have used `*bits[:]`, alas Go's generics compiler @@ -308,6 +345,16 @@ func HashSliceOfUint64s[T ~uint64](h *Hasher, ns []T, maxItems uint64) { h.ascendMixinLayer(uint64(len(ns)), (maxItems*8+31)/32) } +// HashSliceOfUint64sOnFork hashes a dynamic slice of uint64s if present in a fork. +func HashSliceOfUint64sOnFork[T ~uint64](h *Hasher, ns []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfUint64s(h, ns, maxItems) +} + // HashArrayOfStaticBytes hashes a static array of static binary blobs. // // The reason the blobs is passed by pointer and not by value is to prevent it @@ -352,6 +399,17 @@ func HashSliceOfStaticBytes[T commonBytesLengths](h *Hasher, blobs []T, maxItems h.ascendMixinLayer(uint64(len(blobs)), maxItems) } +// HashSliceOfStaticBytesOnFork hashes a dynamic slice of static binary blobs if +// present in a fork. +func HashSliceOfStaticBytesOnFork[T commonBytesLengths](h *Hasher, blobs []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfStaticBytes(h, blobs, maxItems) +} + // HashSliceOfDynamicBytes hashes a dynamic slice of dynamic binary blobs. func HashSliceOfDynamicBytes(h *Hasher, blobs [][]byte, maxItems uint64, maxSize uint64) { h.descendMixinLayer() @@ -422,6 +480,17 @@ func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems u } } +// HashSliceOfStaticObjectsOnFork hashes a dynamic slice of static ssz objects +// if present in a fork. +func HashSliceOfStaticObjectsOnFork[T StaticObject](h *Hasher, objects []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfStaticObjects(h, objects, maxItems) +} + // HashSliceOfDynamicObjects hashes a dynamic slice of dynamic ssz objects. func HashSliceOfDynamicObjects[T DynamicObject](h *Hasher, objects []T, maxItems uint64) { h.descendMixinLayer() @@ -433,6 +502,17 @@ func HashSliceOfDynamicObjects[T DynamicObject](h *Hasher, objects []T, maxItems h.ascendMixinLayer(uint64(len(objects)), maxItems) } +// HashSliceOfDynamicObjectsOnFork hashes a dynamic slice of dynamic ssz objects +// if present in a fork. +func HashSliceOfDynamicObjectsOnFork[T DynamicObject](h *Hasher, objects []T, maxItems uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfDynamicObjects(h, objects, maxItems) +} + // hashBytes either appends the blob to the hasher's scratch space if it's small // enough to fit into a single chunk, or chunks it up and merkleizes it first. func (h *Hasher) hashBytes(blob []byte) { diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go index c52c0b8..0879436 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_1_ssz.go @@ -21,12 +21,10 @@ func (obj *AttestationDataVariation1) SizeSSZ(sizer *ssz.Sizer) (size uint32) { // DefineSSZ defines how an object is encoded/decoded. func (obj *AttestationDataVariation1) DefineSSZ(codec *ssz.Codec) { - if codec.Fork() >= ssz.ForkFuture { - ssz.DefineUint64(codec, &obj.Future) // Field (0) - Future - 8 bytes - } - ssz.DefineUint64(codec, &obj.Slot) // Field (1) - Slot - 8 bytes - ssz.DefineUint64(codec, &obj.Index) // Field (2) - Index - 8 bytes - ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (3) - BeaconBlockHash - 32 bytes - ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (0) - Future - 8 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field (1) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (2) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (3) - BeaconBlockHash - 32 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go index 9f0b981..615e127 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_2_ssz.go @@ -22,12 +22,10 @@ func (obj *AttestationDataVariation2) SizeSSZ(sizer *ssz.Sizer) (size uint32) { // DefineSSZ defines how an object is encoded/decoded. func (obj *AttestationDataVariation2) DefineSSZ(codec *ssz.Codec) { - ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes - ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes - ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes - if codec.Fork() >= ssz.ForkFuture { - ssz.DefineUint64(codec, &obj.Future) // Field (3) - Future - 8 bytes - } - ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) + ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (3) - Future - 8 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (4) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (5) - Target - ? bytes (Checkpoint) } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go index fbfb6b5..d24b625 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_data_variation_3_ssz.go @@ -21,12 +21,10 @@ func (obj *AttestationDataVariation3) SizeSSZ(sizer *ssz.Sizer) (size uint32) { // DefineSSZ defines how an object is encoded/decoded. func (obj *AttestationDataVariation3) DefineSSZ(codec *ssz.Codec) { - ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes - ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes - ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes - ssz.DefineStaticObject(codec, &obj.Source) // Field (3) - Source - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.Target) // Field (4) - Target - ? bytes (Checkpoint) - if codec.Fork() >= ssz.ForkFuture { - ssz.DefineUint64(codec, &obj.Future) // Field (5) - Future - 8 bytes - } + ssz.DefineUint64(codec, &obj.Slot) // Field (0) - Slot - 8 bytes + ssz.DefineUint64(codec, &obj.Index) // Field (1) - Index - 8 bytes + ssz.DefineStaticBytes(codec, &obj.BeaconBlockHash) // Field (2) - BeaconBlockHash - 32 bytes + ssz.DefineStaticObject(codec, &obj.Source) // Field (3) - Source - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.Target) // Field (4) - Target - ? bytes (Checkpoint) + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (5) - Future - 8 bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go index dce58a0..f7b08d7 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_ssz.go @@ -2,9 +2,7 @@ package consensus_spec_tests -import ( - "github.com/karalabe/ssz" -) +import "github.com/karalabe/ssz" // Cached static size computed on package init. var staticSizeCacheAttestation = ssz.PrecomputeStaticSizeCache((*Attestation)(nil)) diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go index 130882d..6fe098e 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_1_ssz.go @@ -31,12 +31,10 @@ func (obj *AttestationVariation1) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size ui // DefineSSZ defines how an object is encoded/decoded. func (obj *AttestationVariation1) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - if codec.Fork() >= ssz.ForkFuture { - ssz.DefineUint64(codec, &obj.Future) // Field (0) - Future - 8 bytes - } - ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (1) - AggregationBits - 4 bytes - ssz.DefineStaticObject(codec, &obj.Data) // Field (2) - Data - ? bytes (AttestationData) - ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (0) - Future - 8 bytes + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (1) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (2) - Data - ? bytes (AttestationData) + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes // Define the dynamic data (fields) ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (1) - AggregationBits - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go index 96672ed..c280c37 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_2_ssz.go @@ -32,12 +32,10 @@ func (obj *AttestationVariation2) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size ui // DefineSSZ defines how an object is encoded/decoded. func (obj *AttestationVariation2) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes - ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) - if codec.Fork() >= ssz.ForkFuture { - ssz.DefineUint64(codec, &obj.Future) // Field (2) - Future - 8 bytes - } - ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (2) - Future - 8 bytes + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (3) - Signature - 96 bytes // Define the dynamic data (fields) ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go index 0681689..80f35f2 100644 --- a/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_attestation_variation_3_ssz.go @@ -31,12 +31,11 @@ func (obj *AttestationVariation3) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size ui // DefineSSZ defines how an object is encoded/decoded. func (obj *AttestationVariation3) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes - ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) - ssz.DefineStaticBytes(codec, &obj.Signature) // Field (2) - Signature - 96 bytes - if codec.Fork() >= ssz.ForkFuture { - ssz.DefineUint64(codec, &obj.Future) // Field (3) - Future - 8 bytes - } + ssz.DefineSliceOfBitsOffset(codec, &obj.AggregationBits, 2048) // Offset (0) - AggregationBits - 4 bytes + ssz.DefineStaticObject(codec, &obj.Data) // Field (1) - Data - ? bytes (AttestationData) + ssz.DefineStaticBytes(codec, &obj.Signature) // Field (2) - Signature - 96 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.Future, ssz.ForkFilter{Added: ssz.ForkFuture}) // Field (3) - Future - 8 bytes + // Define the dynamic data (fields) ssz.DefineSliceOfBitsContent(codec, &obj.AggregationBits, 2048) // Field (0) - AggregationBits - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go index c3f76cf..6c3af26 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_block_body_monolith_ssz.go @@ -52,39 +52,26 @@ func (obj *BeaconBlockBodyMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size // DefineSSZ defines how an object is encoded/decoded. func (obj *BeaconBlockBodyMonolith) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineStaticBytes(codec, &obj.RandaoReveal) // Field ( 0) - RandaoReveal - 96 bytes - ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 1) - Eth1Data - ? bytes (Eth1Data) - ssz.DefineStaticBytes(codec, &obj.Graffiti) // Field ( 2) - Graffiti - 32 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.ProposerSlashings, 16) // Offset ( 3) - ProposerSlashings - 4 bytes - ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.AttesterSlashings, 2) // Offset ( 4) - AttesterSlashings - 4 bytes - ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.Attestations, 128) // Offset ( 5) - Attestations - 4 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Deposits, 16) // Offset ( 6) - Deposits - 4 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.VoluntaryExits, 16) // Offset ( 7) - VoluntaryExits - 4 bytes - if codec.Fork() >= ssz.ForkAltair { - ssz.DefineStaticObject(codec, &obj.SyncAggregate) // Field ( 8) - SyncAggregate - ? bytes (SyncAggregate) - } - if codec.Fork() >= ssz.ForkBellatrix { - ssz.DefineDynamicObjectOffset(codec, &obj.ExecutionPayload) // Offset ( 9) - ExecutionPayload - 4 bytes - } - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.BlsToExecutionChanges, 16) // Offset (10) - BlsToExecutionChanges - 4 bytes - } - if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineSliceOfStaticBytesOffset(codec, &obj.BlobKzgCommitments, 4096) // Offset (11) - BlobKzgCommitments - 4 bytes - } + ssz.DefineStaticBytes(codec, &obj.RandaoReveal) // Field ( 0) - RandaoReveal - 96 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 1) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineStaticBytes(codec, &obj.Graffiti) // Field ( 2) - Graffiti - 32 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.ProposerSlashings, 16) // Offset ( 3) - ProposerSlashings - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.AttesterSlashings, 2) // Offset ( 4) - AttesterSlashings - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.Attestations, 128) // Offset ( 5) - Attestations - 4 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Deposits, 16) // Offset ( 6) - Deposits - 4 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.VoluntaryExits, 16) // Offset ( 7) - VoluntaryExits - 4 bytes + ssz.DefineStaticObjectOnFork(codec, &obj.SyncAggregate, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field ( 8) - SyncAggregate - ? bytes (SyncAggregate) + ssz.DefineDynamicObjectOffsetOnFork(codec, &obj.ExecutionPayload, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Offset ( 9) - ExecutionPayload - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.BlsToExecutionChanges, 16, ssz.ForkFilter{Added: ssz.ForkCapella}) // Offset (10) - BlsToExecutionChanges - 4 bytes + ssz.DefineSliceOfStaticBytesOffsetOnFork(codec, &obj.BlobKzgCommitments, 4096, ssz.ForkFilter{Added: ssz.ForkDeneb}) // Offset (11) - BlobKzgCommitments - 4 bytes + // Define the dynamic data (fields) - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.ProposerSlashings, 16) // Field ( 3) - ProposerSlashings - ? bytes - ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.AttesterSlashings, 2) // Field ( 4) - AttesterSlashings - ? bytes - ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.Attestations, 128) // Field ( 5) - Attestations - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Deposits, 16) // Field ( 6) - Deposits - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.VoluntaryExits, 16) // Field ( 7) - VoluntaryExits - ? bytes - if codec.Fork() >= ssz.ForkBellatrix { - ssz.DefineDynamicObjectContent(codec, &obj.ExecutionPayload) // Field ( 9) - ExecutionPayload - ? bytes - } - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.BlsToExecutionChanges, 16) // Field (10) - BlsToExecutionChanges - ? bytes - } - if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineSliceOfStaticBytesContent(codec, &obj.BlobKzgCommitments, 4096) // Field (11) - BlobKzgCommitments - ? bytes - } + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.ProposerSlashings, 16) // Field ( 3) - ProposerSlashings - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.AttesterSlashings, 2) // Field ( 4) - AttesterSlashings - ? bytes + ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.Attestations, 128) // Field ( 5) - Attestations - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Deposits, 16) // Field ( 6) - Deposits - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.VoluntaryExits, 16) // Field ( 7) - VoluntaryExits - ? bytes + ssz.DefineDynamicObjectContentOnFork(codec, &obj.ExecutionPayload, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Field ( 9) - ExecutionPayload - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.BlsToExecutionChanges, 16, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (10) - BlsToExecutionChanges - ? bytes + ssz.DefineSliceOfStaticBytesContentOnFork(codec, &obj.BlobKzgCommitments, 4096, ssz.ForkFilter{Added: ssz.ForkDeneb}) // Field (11) - BlobKzgCommitments - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go index d746a4b..a2fb232 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -61,64 +61,47 @@ func (obj *BeaconStateMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint // DefineSSZ defines how an object is encoded/decoded. func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes - ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes - ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes - ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) - ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes - ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes - ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes - ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes - ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes - ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes - ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes - if codec.Fork() < ssz.ForkAltair { - ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.PreviousEpochAttestations, 4096) // Offset (15) - PreviousEpochAttestations - 4 bytes - ssz.DefineSliceOfDynamicObjectsOffset(codec, &obj.CurrentEpochAttestations, 4096) // Offset (16) - CurrentEpochAttestations - 4 bytes - } - if codec.Fork() >= ssz.ForkAltair { - ssz.DefineDynamicBytesOffset(codec, &obj.PreviousEpochParticipation, 1099511627776) // Offset (17) - PreviousEpochParticipation - 4 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.CurrentEpochParticipation, 1099511627776) // Offset (18) - CurrentEpochParticipation - 4 bytes - } - ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (19) - JustificationBits - 1 bytes - ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (20) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (21) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) - ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (22) - FinalizedCheckpoint - ? bytes (Checkpoint) - if codec.Fork() >= ssz.ForkAltair { - ssz.DefineSliceOfUint64sOffset(codec, &obj.InactivityScores, 1099511627776) // Offset (23) - InactivityScores - 4 bytes - ssz.DefineStaticObject(codec, &obj.CurrentSyncCommittee) // Field (24) - CurrentSyncCommittee - ? bytes (SyncCommittee) - ssz.DefineStaticObject(codec, &obj.NextSyncCommittee) // Field (25) - NextSyncCommittee - ? bytes (SyncCommittee) - } - if codec.Fork() >= ssz.ForkBellatrix { - ssz.DefineDynamicObjectOffset(codec, &obj.LatestExecutionPayloadHeader) // Offset (26) - LatestExecutionPayloadHeader - 4 bytes - } - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineUint64Pointer(codec, &obj.NextWithdrawalIndex) // Field (27) - NextWithdrawalIndex - 8 bytes - ssz.DefineUint64Pointer(codec, &obj.NextWithdrawalValidatorIndex) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.HistoricalSummaries, 16777216) // Offset (29) - HistoricalSummaries - 4 bytes - } + ssz.DefineUint64(codec, &obj.GenesisTime) // Field ( 0) - GenesisTime - 8 bytes + ssz.DefineStaticBytes(codec, &obj.GenesisValidatorsRoot) // Field ( 1) - GenesisValidatorsRoot - 32 bytes + ssz.DefineUint64(codec, &obj.Slot) // Field ( 2) - Slot - 8 bytes + ssz.DefineStaticObject(codec, &obj.Fork) // Field ( 3) - Fork - ? bytes (Fork) + ssz.DefineStaticObject(codec, &obj.LatestBlockHeader) // Field ( 4) - LatestBlockHeader - ? bytes (BeaconBlockHeader) + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.BlockRoots[:]) // Field ( 5) - BlockRoots - 262144 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.StateRoots[:]) // Field ( 6) - StateRoots - 262144 bytes + ssz.DefineSliceOfStaticBytesOffset(codec, &obj.HistoricalRoots, 16777216) // Offset ( 7) - HistoricalRoots - 4 bytes + ssz.DefineStaticObject(codec, &obj.Eth1Data) // Field ( 8) - Eth1Data - ? bytes (Eth1Data) + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Eth1DataVotes, 2048) // Offset ( 9) - Eth1DataVotes - 4 bytes + ssz.DefineUint64(codec, &obj.Eth1DepositIndex) // Field (10) - Eth1DepositIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes + ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes + ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes + ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineSliceOfDynamicObjectsOffsetOnFork(codec, &obj.PreviousEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Offset (15) - PreviousEpochAttestations - 4 bytes + ssz.DefineSliceOfDynamicObjectsOffsetOnFork(codec, &obj.CurrentEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Offset (16) - CurrentEpochAttestations - 4 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.PreviousEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (17) - PreviousEpochParticipation - 4 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.CurrentEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (18) - CurrentEpochParticipation - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.JustificationBits, 4) // Field (19) - JustificationBits - 1 bytes + ssz.DefineStaticObject(codec, &obj.PreviousJustifiedCheckpoint) // Field (20) - PreviousJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.CurrentJustifiedCheckpoint) // Field (21) - CurrentJustifiedCheckpoint - ? bytes (Checkpoint) + ssz.DefineStaticObject(codec, &obj.FinalizedCheckpoint) // Field (22) - FinalizedCheckpoint - ? bytes (Checkpoint) + ssz.DefineSliceOfUint64sOffsetOnFork(codec, &obj.InactivityScores, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (23) - InactivityScores - 4 bytes + ssz.DefineStaticObjectOnFork(codec, &obj.CurrentSyncCommittee, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (24) - CurrentSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineStaticObjectOnFork(codec, &obj.NextSyncCommittee, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (25) - NextSyncCommittee - ? bytes (SyncCommittee) + ssz.DefineDynamicObjectOffsetOnFork(codec, &obj.LatestExecutionPayloadHeader, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Offset (26) - LatestExecutionPayloadHeader - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.NextWithdrawalIndex, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (27) - NextWithdrawalIndex - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.NextWithdrawalValidatorIndex, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (28) - NextWithdrawalValidatorIndex - 8 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.HistoricalSummaries, 16777216, ssz.ForkFilter{Added: ssz.ForkCapella}) // Offset (29) - HistoricalSummaries - 4 bytes + // Define the dynamic data (fields) - ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes - ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes - if codec.Fork() < ssz.ForkAltair { - ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.PreviousEpochAttestations, 4096) // Field (15) - PreviousEpochAttestations - ? bytes - ssz.DefineSliceOfDynamicObjectsContent(codec, &obj.CurrentEpochAttestations, 4096) // Field (16) - CurrentEpochAttestations - ? bytes - } - if codec.Fork() >= ssz.ForkAltair { - ssz.DefineDynamicBytesContent(codec, &obj.PreviousEpochParticipation, 1099511627776) // Field (17) - PreviousEpochParticipation - ? bytes - ssz.DefineDynamicBytesContent(codec, &obj.CurrentEpochParticipation, 1099511627776) // Field (18) - CurrentEpochParticipation - ? bytes - ssz.DefineSliceOfUint64sContent(codec, &obj.InactivityScores, 1099511627776) // Field (23) - InactivityScores - ? bytes - } - if codec.Fork() >= ssz.ForkBellatrix { - ssz.DefineDynamicObjectContent(codec, &obj.LatestExecutionPayloadHeader) // Field (26) - LatestExecutionPayloadHeader - ? bytes - } - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.HistoricalSummaries, 16777216) // Field (29) - HistoricalSummaries - ? bytes - } + ssz.DefineSliceOfStaticBytesContent(codec, &obj.HistoricalRoots, 16777216) // Field ( 7) - HistoricalRoots - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Eth1DataVotes, 2048) // Field ( 9) - Eth1DataVotes - ? bytes + ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Validators, 1099511627776) // Field (11) - Validators - ? bytes + ssz.DefineSliceOfUint64sContent(codec, &obj.Balances, 1099511627776) // Field (12) - Balances - ? bytes + ssz.DefineSliceOfDynamicObjectsContentOnFork(codec, &obj.PreviousEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Field (15) - PreviousEpochAttestations - ? bytes + ssz.DefineSliceOfDynamicObjectsContentOnFork(codec, &obj.CurrentEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Field (16) - CurrentEpochAttestations - ? bytes + ssz.DefineDynamicBytesContentOnFork(codec, &obj.PreviousEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (17) - PreviousEpochParticipation - ? bytes + ssz.DefineDynamicBytesContentOnFork(codec, &obj.CurrentEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (18) - CurrentEpochParticipation - ? bytes + ssz.DefineSliceOfUint64sContentOnFork(codec, &obj.InactivityScores, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Field (23) - InactivityScores - ? bytes + ssz.DefineDynamicObjectContentOnFork(codec, &obj.LatestExecutionPayloadHeader, ssz.ForkFilter{Added: ssz.ForkBellatrix}) // Field (26) - LatestExecutionPayloadHeader - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.HistoricalSummaries, 16777216, ssz.ForkFilter{Added: ssz.ForkCapella}) // Field (29) - HistoricalSummaries - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go index 6ca7b3f..579de25 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_header_monolith_ssz.go @@ -7,45 +7,47 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. func (obj *ExecutionPayloadHeaderMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { - size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 32 - if sizer.Fork() >= ssz.ForkCapella { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + if sizer.Fork() >= ssz.ForkFrontier { + size += 4 + } + size += 32 + 32 + 32 + if sizer.Fork() >= ssz.ForkShanghai { size += 32 } - if sizer.Fork() >= ssz.ForkDeneb { + if sizer.Fork() >= ssz.ForkCancun { size += 8 + 8 } if fixed { return size } - size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) - + if sizer.Fork() >= ssz.ForkFrontier { + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + } return size } // DefineSSZ defines how an object is encoded/decoded. func (obj *ExecutionPayloadHeaderMonolith) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes - ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes - ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes - ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes - ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes - ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes - ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes - ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes - ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes - ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.ExtraData, 32) // Offset (10) - ExtraData - 4 bytes - ssz.DefineStaticBytes(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes - ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes - ssz.DefineStaticBytes(codec, &obj.TransactionsRoot) // Field (13) - TransactionsRoot - 32 bytes - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineStaticBytesPointer(codec, &obj.WithdrawalRoot) // Field (14) - WithdrawalRoot - 32 bytes - } - if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64Pointer(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64Pointer(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes - } + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineStaticBytes(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.TransactionsRoot) // Field (13) - TransactionsRoot - 32 bytes + ssz.DefineStaticBytesPointerOnFork(codec, &obj.WithdrawalRoot, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - WithdrawalRoot - 32 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + // Define the dynamic data (fields) - ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index 19289a0..7436180 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -7,19 +7,25 @@ import "github.com/karalabe/ssz" // SizeSSZ returns either the static size of the object if fixed == true, or // the total size otherwise. func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { - size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + 4 + 32 + 32 + 4 - if sizer.Fork() >= ssz.ForkCapella { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + if sizer.Fork() >= ssz.ForkFrontier { size += 4 } - if sizer.Fork() >= ssz.ForkDeneb { + size += 32 + 32 + 4 + if sizer.Fork() >= ssz.ForkShanghai { + size += 4 + } + if sizer.Fork() >= ssz.ForkCancun { size += 8 + 8 } if fixed { return size } - size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + if sizer.Fork() >= ssz.ForkFrontier { + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + } size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) - if sizer.Fork() >= ssz.ForkCapella { + if sizer.Fork() >= ssz.ForkShanghai { size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) } return size @@ -28,31 +34,26 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size // DefineSSZ defines how an object is encoded/decoded. func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes - ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes - ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes - ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes - ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes - ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes - ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes - ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes - ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes - ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes - ssz.DefineDynamicBytesOffset(codec, &obj.ExtraData, 32) // Offset (10) - ExtraData - 4 bytes - ssz.DefineUint256(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes - ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes - ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Withdrawals, 16) // Offset (14) - Withdrawals - 4 bytes - } - if codec.Fork() >= ssz.ForkDeneb { - ssz.DefineUint64Pointer(codec, &obj.BlobGasUsed) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64Pointer(codec, &obj.ExcessBlobGas) // Field (16) - ExcessBlobGas - 8 bytes - } + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + // Define the dynamic data (fields) - ssz.DefineDynamicBytesContent(codec, &obj.ExtraData, 32) // Field (10) - ExtraData - ? bytes - ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes - if codec.Fork() >= ssz.ForkCapella { - ssz.DefineSliceOfStaticObjectsContent(codec, &obj.Withdrawals, 16) // Field (14) - Withdrawals - ? bytes - } + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go index 5f865c6..e52500b 100644 --- a/tests/testtypes/consensus-spec-tests/types_monoliths.go +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -70,13 +70,13 @@ type ExecutionPayloadMonolith struct { GasLimit uint64 GasUsed uint64 Timestamp uint64 - ExtraData []byte `ssz-max:"32"` + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` BaseFeePerGas *uint256.Int BlockHash Hash Transactions [][]byte `ssz-max:"1048576,1073741824"` - Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"capella"` - BlobGasUsed *uint64 ` ssz-fork:"deneb"` - ExcessBlobGas *uint64 ` ssz-fork:"deneb"` + Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` + BlobGasUsed *uint64 ` ssz-fork:"cancun"` + ExcessBlobGas *uint64 ` ssz-fork:"cancun"` } type ExecutionPayloadHeaderMonolith struct { @@ -90,11 +90,11 @@ type ExecutionPayloadHeaderMonolith struct { GasLimit uint64 GasUsed uint64 Timestamp uint64 - ExtraData []byte `ssz-max:"32"` + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` BaseFeePerGas [32]byte BlockHash [32]byte TransactionsRoot [32]byte - WithdrawalRoot *[32]byte `ssz-fork:"capella"` - BlobGasUsed *uint64 `ssz-fork:"deneb"` - ExcessBlobGas *uint64 `ssz-fork:"deneb"` + WithdrawalRoot *[32]byte `ssz-fork:"shanghai"` + BlobGasUsed *uint64 `ssz-fork:"cancun"` + ExcessBlobGas *uint64 `ssz-fork:"cancun"` } diff --git a/tests/testtypes/consensus-spec-tests/types_variation.go b/tests/testtypes/consensus-spec-tests/types_variation.go index 7823af5..6a9e4d8 100644 --- a/tests/testtypes/consensus-spec-tests/types_variation.go +++ b/tests/testtypes/consensus-spec-tests/types_variation.go @@ -53,7 +53,7 @@ type ExecutionPayloadVariation struct { // types (i.e. static objects embedded) for various positions. type AttestationVariation1 struct { - Future uint64 `ssz-fork:"future"` // Currently unused field + Future *uint64 `ssz-fork:"future"` // Currently unused field AggregationBits bitfield.Bitlist `ssz-max:"2048"` Data *AttestationData Signature [96]byte @@ -61,18 +61,18 @@ type AttestationVariation1 struct { type AttestationVariation2 struct { AggregationBits bitfield.Bitlist `ssz-max:"2048"` Data *AttestationData - Future uint64 `ssz-fork:"future"` // Currently unused field + Future *uint64 `ssz-fork:"future"` // Currently unused field Signature [96]byte } type AttestationVariation3 struct { AggregationBits bitfield.Bitlist `ssz-max:"2048"` Data *AttestationData Signature [96]byte - Future uint64 `ssz-fork:"future"` // Currently unused field + Future *uint64 `ssz-fork:"future"` // Currently unused field } type AttestationDataVariation1 struct { - Future uint64 `ssz-fork:"future"` // Currently unused field + Future *uint64 `ssz-fork:"future"` // Currently unused field Slot Slot Index uint64 BeaconBlockHash Hash @@ -83,7 +83,7 @@ type AttestationDataVariation2 struct { Slot Slot Index uint64 BeaconBlockHash Hash - Future uint64 `ssz-fork:"future"` // Currently unused field + Future *uint64 `ssz-fork:"future"` // Currently unused field Source *Checkpoint Target *Checkpoint } @@ -93,5 +93,5 @@ type AttestationDataVariation3 struct { BeaconBlockHash Hash Source *Checkpoint Target *Checkpoint - Future uint64 `ssz-fork:"future"` // Currently unused field + Future *uint64 `ssz-fork:"future"` // Currently unused field } From 29f4f69c8d57c48767a5cd30f0d223494c65fa62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Fri, 13 Sep 2024 12:45:07 +0300 Subject: [PATCH 10/12] cmd, ssz, tests: implement a bunch more monolithic type support --- cmd/sszgen/forks.go | 1 + cmd/sszgen/opset.go | 28 ++++- codec.go | 116 ++++++++++++++++- decoder.go | 116 +++++++++++++++++ encoder.go | 117 +++++++++++++++++- forks.go | 1 + hasher.go | 103 +++++++++++++++ tests/consensus_specs_test.go | 9 ++ .../gen_bits_struct_monolith_ssz.go | 37 ++++++ .../gen_execution_payload_monolith_2_ssz.go | 62 ++++++++++ .../gen_execution_payload_monolith_ssz.go | 7 +- .../gen_fixed_test_struct_monolith_ssz.go | 20 +++ ...n_single_field_test_struct_monolith_ssz.go | 18 +++ .../gen_small_test_struct_monolith_ssz.go | 20 +++ .../gen_validator_monolith_ssz.go | 27 ++++ .../consensus-spec-tests/types_monoliths.go | 72 ++++++++++- 16 files changed, 741 insertions(+), 13 deletions(-) create mode 100644 tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go create mode 100644 tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go diff --git a/cmd/sszgen/forks.go b/cmd/sszgen/forks.go index 3738e4b..9c13996 100644 --- a/cmd/sszgen/forks.go +++ b/cmd/sszgen/forks.go @@ -7,6 +7,7 @@ package main // forkMapping maps fork names to fork values. This is used internally by the // ssz codec generator to convert tags to values. var forkMapping = map[string]string{ + "unknown": "Unknown", "frontier": "Frontier", "homestead": "Homestead", "dao": "DAO", diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index 0f3db13..9dc681b 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -65,7 +65,12 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointe []int{1}, }, nil } else { - return nil, fmt.Errorf("pointer of boolean basic type not supported yet") + return &opsetStatic{ + "DefineBoolPointer({{.Codec}}, &{{.Field}})", + "EncodeBoolPointer({{.Codec}}, &{{.Field}})", + "DecodeBoolPointer({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil } case types.Uint8: if tags != nil && tags.size[0] != 1 { @@ -79,7 +84,12 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointe []int{1}, }, nil } else { - return nil, fmt.Errorf("pointer of byte basic type not supported yet") + return &opsetStatic{ + "DefineUint8Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint8Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint8Pointer({{.Codec}}, &{{.Field}})", + []int{1}, + }, nil } case types.Uint16: if tags != nil && tags.size[0] != 2 { @@ -93,7 +103,12 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointe []int{2}, }, nil } else { - return nil, fmt.Errorf("pointer of uint16 basic type not supported yet") + return &opsetStatic{ + "DefineUint16Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint16Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint16Pointer({{.Codec}}, &{{.Field}})", + []int{2}, + }, nil } case types.Uint32: if tags != nil && tags.size[0] != 4 { @@ -107,7 +122,12 @@ func (p *parseContext) resolveBasicOpset(typ *types.Basic, tags *sizeTag, pointe []int{4}, }, nil } else { - return nil, fmt.Errorf("pointer of uint32 basic type not supported yet") + return &opsetStatic{ + "DefineUint32Pointer({{.Codec}}, &{{.Field}})", + "EncodeUint32Pointer({{.Codec}}, &{{.Field}})", + "DecodeUint32Pointer({{.Codec}}, &{{.Field}})", + []int{4}, + }, nil } case types.Uint64: if tags != nil && tags.size[0] != 8 { diff --git a/codec.go b/codec.go index 2140e95..58e2a11 100644 --- a/codec.go +++ b/codec.go @@ -68,6 +68,20 @@ func DefineBool[T ~bool](c *Codec, v *T) { HashBool(c.has, *v) } +// DefineBoolPointerOnFork defines the next field as a 1 byte boolean if present +// in a fork. +func DefineBoolPointerOnFork[T ~bool](c *Codec, v **T, filter ForkFilter) { + if c.enc != nil { + EncodeBoolPointerOnFork(c.enc, *v, filter) + return + } + if c.dec != nil { + DecodeBoolPointerOnFork(c.dec, v, filter) + return + } + HashBoolPointerOnFork(c.has, *v, filter) +} + // DefineUint8 defines the next field as a uint8. func DefineUint8[T ~uint8](c *Codec, n *T) { if c.enc != nil { @@ -81,6 +95,19 @@ func DefineUint8[T ~uint8](c *Codec, n *T) { HashUint8(c.has, *n) } +// DefineUint8PointerOnFork defines the next field as a uint8 if present in a fork. +func DefineUint8PointerOnFork[T ~uint8](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint8PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint8PointerOnFork(c.dec, n, filter) + return + } + HashUint8PointerOnFork(c.has, *n, filter) +} + // DefineUint16 defines the next field as a uint16. func DefineUint16[T ~uint16](c *Codec, n *T) { if c.enc != nil { @@ -94,6 +121,19 @@ func DefineUint16[T ~uint16](c *Codec, n *T) { HashUint16(c.has, *n) } +// DefineUint16PointerOnFork defines the next field as a uint16 if present in a fork. +func DefineUint16PointerOnFork[T ~uint16](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint16PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint16PointerOnFork(c.dec, n, filter) + return + } + HashUint16PointerOnFork(c.has, *n, filter) +} + // DefineUint32 defines the next field as a uint32. func DefineUint32[T ~uint32](c *Codec, n *T) { if c.enc != nil { @@ -107,6 +147,19 @@ func DefineUint32[T ~uint32](c *Codec, n *T) { HashUint32(c.has, *n) } +// DefineUint32PointerOnFork defines the next field as a uint32 if present in a fork. +func DefineUint32PointerOnFork[T ~uint32](c *Codec, n **T, filter ForkFilter) { + if c.enc != nil { + EncodeUint32PointerOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint32PointerOnFork(c.dec, n, filter) + return + } + HashUint32PointerOnFork(c.has, *n, filter) +} + // DefineUint64 defines the next field as a uint64. func DefineUint64[T ~uint64](c *Codec, n *T) { if c.enc != nil { @@ -146,6 +199,19 @@ func DefineUint256(c *Codec, n **uint256.Int) { HashUint256(c.has, *n) } +// DefineUint256OnFork defines the next field as a uint256 if present in a fork. +func DefineUint256OnFork(c *Codec, n **uint256.Int, filter ForkFilter) { + if c.enc != nil { + EncodeUint256OnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint256OnFork(c.dec, n, filter) + return + } + HashUint256OnFork(c.has, *n, filter) // TODO(karalabe): Interesting bug, duplciate, weird place fails, explore +} + // DefineUint256BigInt defines the next field as a uint256. func DefineUint256BigInt(c *Codec, n **big.Int) { if c.enc != nil { @@ -159,6 +225,20 @@ func DefineUint256BigInt(c *Codec, n **big.Int) { HashUint256BigInt(c.has, *n) } +// DefineUint256BigIntOnFork defines the next field as a uint256 if present in a +// fork. +func DefineUint256BigIntOnFork(c *Codec, n **big.Int, filter ForkFilter) { + if c.enc != nil { + EncodeUint256BigIntOnFork(c.enc, *n, filter) + return + } + if c.dec != nil { + DecodeUint256BigIntOnFork(c.dec, n, filter) + return + } + HashUint256BigIntOnFork(c.has, *n, filter) +} + // DefineStaticBytes defines the next field as static binary blob. This method // can be used for byte arrays. func DefineStaticBytes[T commonBytesLengths](c *Codec, blob *T) { @@ -350,7 +430,8 @@ func DefineArrayOfBits[T commonBitsLengths](c *Codec, bits *T, size uint64) { HashArrayOfBits(c.has, bits) } -// DefineSliceOfBitsOffset defines the next field as a dynamic slice of (packed) bits. +// DefineSliceOfBitsOffset defines the next field as a dynamic slice of (packed) +// bits. func DefineSliceOfBitsOffset(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { if c.enc != nil { EncodeSliceOfBitsOffset(c.enc, *bits) @@ -363,7 +444,22 @@ func DefineSliceOfBitsOffset(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { HashSliceOfBits(c.has, *bits, maxBits) } -// DefineSliceOfBitsContent defines the next field as a dynamic slice of (packed) bits. +// DefineSliceOfBitsOffsetOnFork defines the next field as a dynamic slice of +// (packed) bits if present in a fork. +func DefineSliceOfBitsOffsetOnFork(c *Codec, bits *bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfBitsOffsetOnFork(c.enc, *bits, filter) + return + } + if c.dec != nil { + DecodeSliceOfBitsOffsetOnFork(c.dec, bits, filter) + return + } + HashSliceOfBitsOnFork(c.has, *bits, maxBits, filter) +} + +// DefineSliceOfBitsContent defines the next field as a dynamic slice of (packed) +// bits. func DefineSliceOfBitsContent(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { if c.enc != nil { EncodeSliceOfBitsContent(c.enc, *bits) @@ -376,6 +472,20 @@ func DefineSliceOfBitsContent(c *Codec, bits *bitfield.Bitlist, maxBits uint64) // No hashing, done at the offset position } +// DefineSliceOfBitsContentOnFork defines the next field as a dynamic slice of +// (packed) bits if present in a fork. +func DefineSliceOfBitsContentOnFork(c *Codec, bits *bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfBitsContentOnFork(c.enc, *bits, filter) + return + } + if c.dec != nil { + DecodeSliceOfBitsContentOnFork(c.dec, bits, maxBits, filter) + return + } + // No hashing, done at the offset position +} + // DefineArrayOfUint64s defines the next field as a static array of uint64s. func DefineArrayOfUint64s[T commonUint64sLengths](c *Codec, ns *T) { if c.enc != nil { @@ -403,7 +513,7 @@ func DefineSliceOfUint64sOffset[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { } // DefineSliceOfUint64sOffsetOnFork defines the next field as a dynamic slice of -// uint64s if present in fork. +// uint64s if present in a fork. func DefineSliceOfUint64sOffsetOnFork[T ~uint64](c *Codec, ns *[]T, maxItems uint64, filter ForkFilter) { if c.enc != nil { EncodeSliceOfUint64sOffsetOnFork(c.enc, *ns, filter) diff --git a/decoder.go b/decoder.go index bb87f62..7ede075 100644 --- a/decoder.go +++ b/decoder.go @@ -110,6 +110,24 @@ func DecodeBool[T ~bool](dec *Decoder, v *T) { } } +// DecodeBoolPointerOnFork parses a boolean if present in a fork. If not, the +// boolean pointer is set to nil. +// +// This method is similar to DecodeBool, but will also initialize the pointer +// if it is not allocated yet. +func DecodeBoolPointerOnFork[T ~bool](dec *Decoder, v **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *v = nil + return + } + // Otherwise fall back to the standard decoder + if *v == nil { + *v = new(T) + } + DecodeBool(dec, *v) +} + // DecodeUint8 parses a uint8. func DecodeUint8[T ~uint8](dec *Decoder, n *T) { if dec.err != nil { @@ -129,6 +147,24 @@ func DecodeUint8[T ~uint8](dec *Decoder, n *T) { } } +// DecodeUint8PointerOnFork parses a uint8 if present in a fork. If not, the +// uint8 pointer is set to nil. +// +// This method is similar to DecodeUint8, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint8PointerOnFork[T ~uint8](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint8(dec, *n) +} + // DecodeUint16 parses a uint16. func DecodeUint16[T ~uint16](dec *Decoder, n *T) { if dec.err != nil { @@ -148,6 +184,24 @@ func DecodeUint16[T ~uint16](dec *Decoder, n *T) { } } +// DecodeUint16PointerOnFork parses a uint16 if present in a fork. If not, the +// uint16 pointer is set to nil. +// +// This method is similar to DecodeUint16, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint16PointerOnFork[T ~uint16](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint16(dec, *n) +} + // DecodeUint32 parses a uint32. func DecodeUint32[T ~uint32](dec *Decoder, n *T) { if dec.err != nil { @@ -167,6 +221,24 @@ func DecodeUint32[T ~uint32](dec *Decoder, n *T) { } } +// DecodeUint32PointerOnFork parses a uint32 if present in a fork. If not, the +// uint32 pointer is set to nil. +// +// This method is similar to DecodeUint32, but will also initialize the pointer +// if it is not allocated yet. +func DecodeUint32PointerOnFork[T ~uint32](dec *Decoder, n **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + if *n == nil { + *n = new(T) + } + DecodeUint32(dec, *n) +} + // DecodeUint64 parses a uint64. func DecodeUint64[T ~uint64](dec *Decoder, n *T) { if dec.err != nil { @@ -233,6 +305,17 @@ func DecodeUint256(dec *Decoder, n **uint256.Int) { } } +// DecodeUint256OnFork parses a uint256 if present in a fork. +func DecodeUint256OnFork(dec *Decoder, n **uint256.Int, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + DecodeUint256(dec, n) +} + // DecodeUint256BigInt parses a uint256 into a big.Int. func DecodeUint256BigInt(dec *Decoder, n **big.Int) { if dec.err != nil { @@ -258,6 +341,17 @@ func DecodeUint256BigInt(dec *Decoder, n **big.Int) { } } +// DecodeUint256BigIntOnFork parses a uint256 into a big.Int if present in a fork. +func DecodeUint256BigIntOnFork(dec *Decoder, n **big.Int, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *n = nil + return + } + // Otherwise fall back to the standard decoder + DecodeUint256BigInt(dec, n) +} + // DecodeStaticBytes parses a static binary blob. func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { if dec.err != nil { @@ -487,6 +581,17 @@ func DecodeSliceOfBitsOffset(dec *Decoder, bitlist *bitfield.Bitlist) { dec.decodeOffset(false) } +// DecodeSliceOfBitsOffsetOnFork parses a dynamic slice of (packed) bits if present +// in a fork. +func DecodeSliceOfBitsOffsetOnFork(dec *Decoder, bitlist *bitfield.Bitlist, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfBitsOffset(dec, bitlist) +} + // DecodeSliceOfBitsContent is the lazy data reader of DecodeSliceOfBitsOffset. func DecodeSliceOfBitsContent(dec *Decoder, bitlist *bitfield.Bitlist, maxBits uint64) { if dec.err != nil { @@ -535,6 +640,17 @@ func DecodeSliceOfBitsContent(dec *Decoder, bitlist *bitfield.Bitlist, maxBits u } } +// DecodeSliceOfBitsContentOnFork is the lazy data reader of DecodeSliceOfBitsOffsetOnFork. +func DecodeSliceOfBitsContentOnFork(dec *Decoder, bitlist *bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *bitlist = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfBitsContent(dec, bitlist, maxBits) +} + // DecodeArrayOfUint64s parses a static array of uint64s. func DecodeArrayOfUint64s[T commonUint64sLengths](dec *Decoder, ns *T) { if dec.err != nil { diff --git a/encoder.go b/encoder.go index a57c1f6..a0d0501 100644 --- a/encoder.go +++ b/encoder.go @@ -102,6 +102,22 @@ func EncodeBool[T ~bool](enc *Encoder, v T) { } } +// EncodeBoolPointerOnFork serializes a boolean if present in a fork. +// +// Note, a nil pointer is serialized as false. +func EncodeBoolPointerOnFork[T ~bool](enc *Encoder, v *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if v == nil { + EncodeBool[bool](enc, false) + return + } + EncodeBool(enc, *v) +} + // EncodeUint8 serializes a uint8. func EncodeUint8[T ~uint8](enc *Encoder, n T) { if enc.outWriter != nil { @@ -116,6 +132,22 @@ func EncodeUint8[T ~uint8](enc *Encoder, n T) { } } +// EncodeUint8PointerOnFork serializes a uint8 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint8PointerOnFork[T ~uint8](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint8[uint8](enc, 0) + return + } + EncodeUint8(enc, *n) +} + // EncodeUint16 serializes a uint16. func EncodeUint16[T ~uint16](enc *Encoder, n T) { if enc.outWriter != nil { @@ -130,6 +162,22 @@ func EncodeUint16[T ~uint16](enc *Encoder, n T) { } } +// EncodeUint16PointerOnFork serializes a uint16 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint16PointerOnFork[T ~uint16](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint16[uint16](enc, 0) + return + } + EncodeUint16(enc, *n) +} + // EncodeUint32 serializes a uint32. func EncodeUint32[T ~uint32](enc *Encoder, n T) { if enc.outWriter != nil { @@ -144,6 +192,22 @@ func EncodeUint32[T ~uint32](enc *Encoder, n T) { } } +// EncodeUint32PointerOnFork serializes a uint32 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint32PointerOnFork[T ~uint32](enc *Encoder, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if n == nil { + EncodeUint32[uint32](enc, 0) + return + } + EncodeUint32(enc, *n) +} + // EncodeUint64 serializes a uint64. func EncodeUint64[T ~uint64](enc *Encoder, n T) { if enc.outWriter != nil { @@ -198,7 +262,19 @@ func EncodeUint256(enc *Encoder, n *uint256.Int) { } } -// EncodeUint256BigInt serializes a big.Ing as uint256. +// EncodeUint256OnFork serializes a uint256 if present in a fork. +// +// Note, a nil pointer is serialized as zero. +func EncodeUint256OnFork(enc *Encoder, n *uint256.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeUint256(enc, n) +} + +// EncodeUint256BigInt serializes a big.Int as uint256. // // Note, a nil pointer is serialized as zero. // Note, an overflow will be silently dropped. @@ -225,6 +301,20 @@ func EncodeUint256BigInt(enc *Encoder, n *big.Int) { } } +// EncodeUint256BigIntOnFork serializes a big.Int as uint256 if present in a +// fork. +// +// Note, a nil pointer is serialized as zero. +// Note, an overflow will be silently dropped. +func EncodeUint256BigIntOnFork(enc *Encoder, n *big.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeUint256BigInt(enc, n) +} + // EncodeStaticBytes serializes a static binary blob. // // The blob is passed by pointer to avoid high stack copy costs and a potential @@ -458,6 +548,19 @@ func EncodeSliceOfBitsOffset(enc *Encoder, bits bitfield.Bitlist) { } } +// EncodeSliceOfBitsOffsetOnFork serializes a dynamic slice of (packed) bits if +// present in a fork. +// +// Note, a nil slice of bits is serialized as an empty bit list. +func EncodeSliceOfBitsOffsetOnFork(enc *Encoder, bits bitfield.Bitlist, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfBitsOffset(enc, bits) +} + // EncodeSliceOfBitsContent is the lazy data writer for EncodeSliceOfBitsOffset. // // Note, a nil slice of bits is serialized as an empty bit list. @@ -482,6 +585,18 @@ func EncodeSliceOfBitsContent(enc *Encoder, bits bitfield.Bitlist) { } } +// EncodeSliceOfBitsContentOnFork is the lazy data writer for EncodeSliceOfBitsOffsetOnFork. +// +// Note, a nil slice of bits is serialized as an empty bit list. +func EncodeSliceOfBitsContentOnFork(enc *Encoder, bits bitfield.Bitlist, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfBitsContent(enc, bits) +} + // EncodeArrayOfUint64s serializes a static array of uint64s. // // The reason the ns is passed by pointer and not by value is to prevent it from diff --git a/forks.go b/forks.go index 47b6566..f131841 100644 --- a/forks.go +++ b/forks.go @@ -50,6 +50,7 @@ const ( // ForkMapping maps fork names to fork values. This is used internally by the // ssz codec generator to convert tags to values. var ForkMapping = map[string]Fork{ + "unknown": ForkUnknown, "frontier": ForkFrontier, "homestead": ForkHomestead, "dao": ForkDAO, diff --git a/hasher.go b/hasher.go index 15616a9..941574e 100644 --- a/hasher.go +++ b/hasher.go @@ -82,6 +82,22 @@ func HashBool[T ~bool](h *Hasher, v T) { } } +// HashBoolPointerOnFork hashes a boolean if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashBoolPointerOnFork[T ~bool](h *Hasher, v *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if v == nil { + HashBool[bool](h, false) + return + } + HashBool(h, *v) +} + // HashUint8 hashes a uint8. func HashUint8[T ~uint8](h *Hasher, n T) { var buffer [32]byte @@ -89,6 +105,22 @@ func HashUint8[T ~uint8](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint8PointerOnFork hashes a uint8 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint8PointerOnFork[T ~uint8](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint8[uint8](h, 0) + return + } + HashUint8(h, *n) +} + // HashUint16 hashes a uint16. func HashUint16[T ~uint16](h *Hasher, n T) { var buffer [32]byte @@ -96,6 +128,22 @@ func HashUint16[T ~uint16](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint16PointerOnFork hashes a uint16 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint16PointerOnFork[T ~uint16](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint16[uint16](h, 0) + return + } + HashUint16(h, *n) +} + // HashUint32 hashes a uint32. func HashUint32[T ~uint32](h *Hasher, n T) { var buffer [32]byte @@ -103,6 +151,22 @@ func HashUint32[T ~uint32](h *Hasher, n T) { h.insertChunk(buffer, 0) } +// HashUint32PointerOnFork hashes a uint32 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint32PointerOnFork[T ~uint32](h *Hasher, n *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if n == nil { + HashUint32[uint32](h, 0) + return + } + HashUint32(h, *n) +} + // HashUint64 hashes a uint64. func HashUint64[T ~uint64](h *Hasher, n T) { var buffer [32]byte @@ -137,9 +201,22 @@ func HashUint256(h *Hasher, n *uint256.Int) { h.insertChunk(buffer, 0) } +// HashUint256OnFork hashes a uint256 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +func HashUint256OnFork(h *Hasher, n *uint256.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashUint256(h, n) +} + // HashUint256BigInt hashes a big.Int as uint256. // // Note, a nil pointer is hashed as zero. +// Note, an overflow will be silently dropped. func HashUint256BigInt(h *Hasher, n *big.Int) { var buffer [32]byte if n != nil { @@ -150,6 +227,19 @@ func HashUint256BigInt(h *Hasher, n *big.Int) { h.insertChunk(buffer, 0) } +// HashUint256BigIntOnFork hashes a big.Int as uint256 if present in a fork. +// +// Note, a nil pointer is hashed as zero. +// Note, an overflow will be silently dropped. +func HashUint256BigIntOnFork(h *Hasher, n *big.Int, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashUint256BigInt(h, n) +} + // HashStaticBytes hashes a static binary blob. // // The blob is passed by pointer to avoid high stack copy costs and a potential @@ -289,6 +379,19 @@ func HashSliceOfBits(h *Hasher, bits bitfield.Bitlist, maxBits uint64) { h.ascendMixinLayer(size, (maxBits+255)/256) } +// HashSliceOfBitsOnFork hashes a dynamic slice of (packed) bits if present in a +// fork. +// +// Note, a nil slice of bits is serialized as an empty bit list. +func HashSliceOfBitsOnFork(h *Hasher, bits bitfield.Bitlist, maxBits uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfBits(h, bits, maxBits) +} + // HashArrayOfUint64s hashes a static array of uint64s. // // The reason the ns is passed by pointer and not by value is to prevent it from diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 861c8d5..1b7acdb 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -49,10 +49,17 @@ func commonPrefix(a []byte, b []byte) []byte { // TestConsensusSpecBasics iterates over the basic container tests from the // consensus spec tests repo and runs the encoding/decoding/hashing round. func TestConsensusSpecBasics(t *testing.T) { + // Run through all the basic tests as simple types testConsensusSpecBasicType[*types.SingleFieldTestStruct](t, "SingleFieldTestStruct") testConsensusSpecBasicType[*types.SmallTestStruct](t, "SmallTestStruct") testConsensusSpecBasicType[*types.FixedTestStruct](t, "FixedTestStruct") testConsensusSpecBasicType[*types.BitsStruct](t, "BitsStruct") + + // Add monolith variations to the basic types + testConsensusSpecBasicType[*types.SingleFieldTestStructMonolith](t, "SingleFieldTestStruct") + testConsensusSpecBasicType[*types.SmallTestStructMonolith](t, "SmallTestStruct") + testConsensusSpecBasicType[*types.FixedTestStructMonolith](t, "FixedTestStruct") + testConsensusSpecBasicType[*types.BitsStructMonolith](t, "BitsStruct") } func testConsensusSpecBasicType[T newableObject[U], U any](t *testing.T, kind string) { @@ -232,7 +239,9 @@ func TestConsensusSpecs(t *testing.T) { testConsensusSpecType[*types.BeaconBlockBodyMonolith](t, "BeaconBlockBody", "phase0", "altair", "bellatrix", "capella", "deneb") testConsensusSpecType[*types.BeaconStateMonolith](t, "BeaconState", "phase0", "altair", "bellatrix", "capella", "deneb") testConsensusSpecType[*types.ExecutionPayloadMonolith](t, "ExecutionPayload", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ExecutionPayloadMonolith2](t, "ExecutionPayload", "bellatrix", "capella", "deneb") testConsensusSpecType[*types.ExecutionPayloadHeaderMonolith](t, "ExecutionPayloadHeader", "bellatrix", "capella", "deneb") + testConsensusSpecType[*types.ValidatorMonolith](t, "Validator") // Add some API variations to test different codec implementations testConsensusSpecType[*types.ExecutionPayloadVariation](t, "ExecutionPayload", "bellatrix") diff --git a/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go new file mode 100644 index 0000000..56d026c --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go @@ -0,0 +1,37 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *BitsStructMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 4 + } + size += 1 + 1 + 4 + 1 + if fixed { + return size + } + if sizer.Fork() >= ssz.ForkUnknown { + size += ssz.SizeSliceOfBits(sizer, obj.A) + } + size += ssz.SizeSliceOfBits(sizer, obj.D) + + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *BitsStructMonolith) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineSliceOfBitsOffsetOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Offset (0) - A - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.B, 2) // Field (1) - B - 1 bytes + ssz.DefineArrayOfBits(codec, &obj.C, 1) // Field (2) - C - 1 bytes + ssz.DefineSliceOfBitsOffset(codec, &obj.D, 6) // Offset (3) - D - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.E, 8) // Field (4) - E - 1 bytes + + // Define the dynamic data (fields) + ssz.DefineSliceOfBitsContentOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - ? bytes + ssz.DefineSliceOfBitsContent(codec, &obj.D, 6) // Field (3) - D - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go new file mode 100644 index 0000000..8ce32e1 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_2_ssz.go @@ -0,0 +1,62 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns either the static size of the object if fixed == true, or +// the total size otherwise. +func (obj *ExecutionPayloadMonolith2) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { + size = 32 + 20 + 32 + 32 + 256 + 32 + 8 + 8 + 8 + 8 + if sizer.Fork() >= ssz.ForkFrontier { + size += 4 + } + if sizer.Fork() >= ssz.ForkUnknown { + size += 32 + } + size += 32 + 4 + if sizer.Fork() >= ssz.ForkShanghai { + size += 4 + } + if sizer.Fork() >= ssz.ForkCancun { + size += 8 + 8 + } + if fixed { + return size + } + if sizer.Fork() >= ssz.ForkFrontier { + size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) + } + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + if sizer.Fork() >= ssz.ForkShanghai { + size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ExecutionPayloadMonolith2) DefineSSZ(codec *ssz.Codec) { + // Define the static data (fields and dynamic offsets) + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256BigIntOnFork(codec, &obj.BaseFeePerGas, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + + // Define the dynamic data (fields) + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index 7436180..569b201 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -11,7 +11,10 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size if sizer.Fork() >= ssz.ForkFrontier { size += 4 } - size += 32 + 32 + 4 + if sizer.Fork() >= ssz.ForkUnknown { + size += 32 + } + size += 32 + 4 if sizer.Fork() >= ssz.ForkShanghai { size += 4 } @@ -45,7 +48,7 @@ func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes - ssz.DefineUint256(codec, &obj.BaseFeePerGas) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineUint256OnFork(codec, &obj.BaseFeePerGas, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (11) - BaseFeePerGas - 32 bytes ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go new file mode 100644 index 0000000..9451427 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_fixed_test_struct_monolith_ssz.go @@ -0,0 +1,20 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *FixedTestStructMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 1 + 8 + 4 + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *FixedTestStructMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint8PointerOnFork(codec, &obj.A, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - 1 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.B, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (1) - B - 8 bytes + ssz.DefineUint32PointerOnFork(codec, &obj.C, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (2) - C - 4 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go new file mode 100644 index 0000000..86b5874 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_single_field_test_struct_monolith_ssz.go @@ -0,0 +1,18 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *SingleFieldTestStructMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 1 + } + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *SingleFieldTestStructMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint8PointerOnFork(codec, &obj.A, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - 1 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go new file mode 100644 index 0000000..3996ab2 --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_small_test_struct_monolith_ssz.go @@ -0,0 +1,20 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *SmallTestStructMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + if sizer.Fork() >= ssz.ForkUnknown { + size += 2 + } + size += 2 + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *SmallTestStructMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineUint16PointerOnFork(codec, &obj.A, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - 2 bytes + ssz.DefineUint16(codec, &obj.B) // Field (1) - B - 2 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go new file mode 100644 index 0000000..173a9db --- /dev/null +++ b/tests/testtypes/consensus-spec-tests/gen_validator_monolith_ssz.go @@ -0,0 +1,27 @@ +// Code generated by github.com/karalabe/ssz. DO NOT EDIT. + +package consensus_spec_tests + +import "github.com/karalabe/ssz" + +// SizeSSZ returns the total size of the static ssz object. +func (obj *ValidatorMonolith) SizeSSZ(sizer *ssz.Sizer) (size uint32) { + size = 48 + 32 + 8 + if sizer.Fork() >= ssz.ForkUnknown { + size += 1 + } + size += 8 + 8 + 8 + 8 + return size +} + +// DefineSSZ defines how an object is encoded/decoded. +func (obj *ValidatorMonolith) DefineSSZ(codec *ssz.Codec) { + ssz.DefineStaticBytes(codec, &obj.Pubkey) // Field (0) - Pubkey - 48 bytes + ssz.DefineStaticBytes(codec, &obj.WithdrawalCredentials) // Field (1) - WithdrawalCredentials - 32 bytes + ssz.DefineUint64(codec, &obj.EffectiveBalance) // Field (2) - EffectiveBalance - 8 bytes + ssz.DefineBoolPointerOnFork(codec, &obj.Slashed, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (3) - Slashed - 1 bytes + ssz.DefineUint64(codec, &obj.ActivationEligibilityEpoch) // Field (4) - ActivationEligibilityEpoch - 8 bytes + ssz.DefineUint64(codec, &obj.ActivationEpoch) // Field (5) - ActivationEpoch - 8 bytes + ssz.DefineUint64(codec, &obj.ExitEpoch) // Field (6) - ExitEpoch - 8 bytes + ssz.DefineUint64(codec, &obj.WithdrawableEpoch) // Field (7) - WithdrawableEpoch - 8 bytes +} diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go index e52500b..5388e7d 100644 --- a/tests/testtypes/consensus-spec-tests/types_monoliths.go +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -4,12 +4,47 @@ package consensus_spec_tests -import "github.com/holiman/uint256" +import ( + "math/big" + + "github.com/holiman/uint256" + "github.com/prysmaticlabs/go-bitfield" +) + +//go:generate go run -cover ../../../cmd/sszgen -type SingleFieldTestStructMonolith -out gen_single_field_test_struct_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type SmallTestStructMonolith -out gen_small_test_struct_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type FixedTestStructMonolith -out gen_fixed_test_struct_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type BitsStructMonolith -out gen_bits_struct_monolith_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadMonolith -out gen_execution_payload_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadMonolith2 -out gen_execution_payload_monolith_2_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type ExecutionPayloadHeaderMonolith -out gen_execution_payload_header_monolith_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconBlockBodyMonolith -out gen_beacon_block_body_monolith_ssz.go //go:generate go run -cover ../../../cmd/sszgen -type BeaconStateMonolith -out gen_beacon_state_monolith_ssz.go +//go:generate go run -cover ../../../cmd/sszgen -type ValidatorMonolith -out gen_validator_monolith_ssz.go + +type SingleFieldTestStructMonolith struct { + A *byte `ssz-fork:"unknown"` +} + +type SmallTestStructMonolith struct { + A *uint16 `ssz-fork:"unknown"` + B uint16 +} + +type FixedTestStructMonolith struct { + A *uint8 `ssz-fork:"unknown"` + B *uint64 `ssz-fork:"unknown"` + C *uint32 `ssz-fork:"unknown"` +} + +type BitsStructMonolith struct { + A bitfield.Bitlist `ssz-max:"5" ssz-fork:"unknown"` + B [1]byte `ssz-size:"2" ssz:"bits"` + C [1]byte `ssz-size:"1" ssz:"bits"` + D bitfield.Bitlist `ssz-max:"6"` + E [1]byte `ssz-size:"8" ssz:"bits"` +} type BeaconBlockBodyMonolith struct { RandaoReveal [96]byte @@ -70,8 +105,28 @@ type ExecutionPayloadMonolith struct { GasLimit uint64 GasUsed uint64 Timestamp uint64 - ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` - BaseFeePerGas *uint256.Int + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` + BaseFeePerGas *uint256.Int `ssz-fork:"unknown"` + BlockHash Hash + Transactions [][]byte `ssz-max:"1048576,1073741824"` + Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` + BlobGasUsed *uint64 ` ssz-fork:"cancun"` + ExcessBlobGas *uint64 ` ssz-fork:"cancun"` +} + +type ExecutionPayloadMonolith2 struct { + ParentHash Hash + FeeRecipient Address + StateRoot Hash + ReceiptsRoot Hash + LogsBloom LogsBloom + PrevRandao Hash + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` + BaseFeePerGas *big.Int `ssz-fork:"unknown"` BlockHash Hash Transactions [][]byte `ssz-max:"1048576,1073741824"` Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` @@ -98,3 +153,14 @@ type ExecutionPayloadHeaderMonolith struct { BlobGasUsed *uint64 `ssz-fork:"cancun"` ExcessBlobGas *uint64 `ssz-fork:"cancun"` } + +type ValidatorMonolith struct { + Pubkey [48]byte + WithdrawalCredentials [32]byte + EffectiveBalance uint64 + Slashed *bool `ssz-fork:"unknown"` + ActivationEligibilityEpoch uint64 + ActivationEpoch uint64 + ExitEpoch uint64 + WithdrawableEpoch uint64 +} From de6bf27c1b73385adaf229e9368f4c260438a137 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 17 Sep 2024 11:53:52 +0300 Subject: [PATCH 11/12] ssz, tests: fix weird false failure reporting --- codec.go | 2 +- hasher.go | 7 +------ tests/consensus_specs_test.go | 2 +- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/codec.go b/codec.go index 58e2a11..5cf8ecf 100644 --- a/codec.go +++ b/codec.go @@ -209,7 +209,7 @@ func DefineUint256OnFork(c *Codec, n **uint256.Int, filter ForkFilter) { DecodeUint256OnFork(c.dec, n, filter) return } - HashUint256OnFork(c.has, *n, filter) // TODO(karalabe): Interesting bug, duplciate, weird place fails, explore + HashUint256OnFork(c.has, *n, filter) } // DefineUint256BigInt defines the next field as a uint256. diff --git a/hasher.go b/hasher.go index 941574e..e83fe92 100644 --- a/hasher.go +++ b/hasher.go @@ -68,11 +68,6 @@ type groupStats struct { chunks int // Number of chunks in this group } -// Fork retrieves the current fork (if any) that the hasher is operating in. -func (h *Hasher) Fork() Fork { - return h.codec.fork -} - // HashBool hashes a boolean. func HashBool[T ~bool](h *Hasher, v T) { if !v { @@ -530,7 +525,7 @@ func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems u defer h.ascendMixinLayer(uint64(len(objects)), maxItems) // If threading is disabled, or hashing nothing, do it sequentially - if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0], h.Fork())) < concurrencyThreshold { + if !h.threads || len(objects) == 0 || len(objects)*int(Size(objects[0], h.codec.fork)) < concurrencyThreshold { for _, obj := range objects { h.descendLayer() obj.DefineSSZ(h.codec) diff --git a/tests/consensus_specs_test.go b/tests/consensus_specs_test.go index 1b7acdb..77f4540 100644 --- a/tests/consensus_specs_test.go +++ b/tests/consensus_specs_test.go @@ -325,7 +325,7 @@ func testConsensusSpecType[T newableObject[U], U any](t *testing.T, kind string, // Run all the subtests found in the folder for _, test := range tests { - t.Run(fmt.Sprintf("%s/%s/%s", fork, kind, test.Name()), func(t *testing.T) { + t.Run(fmt.Sprintf("%s/%s/%s", fork, reflect.TypeFor[U]().Name(), test.Name()), func(t *testing.T) { // Parse the input SSZ data and the expected root for the test inSnappy, err := os.ReadFile(filepath.Join(path, test.Name(), "serialized.ssz_snappy")) if err != nil { From 65907337624909b850c60741a871294cee04f84f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= Date: Tue, 17 Sep 2024 12:41:00 +0300 Subject: [PATCH 12/12] cmd, ssz, tests: add a fe wmore missing monolith field encoders --- cmd/sszgen/opset.go | 14 ++++- codec.go | 60 ++++++++++++++++++- decoder.go | 55 ++++++++++++++++- encoder.go | 58 +++++++++++++++++- hasher.go | 49 ++++++++++++++- .../gen_beacon_state_monolith_ssz.go | 7 ++- .../gen_bits_struct_monolith_ssz.go | 14 ++--- .../gen_execution_payload_monolith_ssz.go | 49 ++++++++------- .../consensus-spec-tests/types_monoliths.go | 6 +- 9 files changed, 269 insertions(+), 43 deletions(-) diff --git a/cmd/sszgen/opset.go b/cmd/sszgen/opset.go index 9dc681b..6a7bb55 100644 --- a/cmd/sszgen/opset.go +++ b/cmd/sszgen/opset.go @@ -199,7 +199,12 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag []int{size}, }, nil } else { - return nil, fmt.Errorf("pointer of array of bits not supported") + return &opsetStatic{ + fmt.Sprintf("DefineArrayOfBitsPointer({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("EncodeArrayOfBitsPointer({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + fmt.Sprintf("DecodeArrayOfBitsPointer({{.Codec}}, &{{.Field}}, %d)", tags.size[0]), // inject bit-size directly + []int{size}, + }, nil } } // Not a bitvector, interpret as plain byte array @@ -242,7 +247,12 @@ func (p *parseContext) resolveArrayOpset(typ types.Type, size int, tags *sizeTag []int{size, 8}, }, nil } else { - return nil, fmt.Errorf("pointer of array of byte basic type not supported") + return &opsetStatic{ + "DefineArrayOfUint64sPointer({{.Codec}}, &{{.Field}})", + "EncodeArrayOfUint64sPointer({{.Codec}}, &{{.Field}})", + "DecodeArrayOfUint64sPointer({{.Codec}}, &{{.Field}})", + []int{size, 8}, + }, nil } default: return nil, fmt.Errorf("unsupported array item basic type: %s", typ) diff --git a/codec.go b/codec.go index 5cf8ecf..47a11ea 100644 --- a/codec.go +++ b/codec.go @@ -430,6 +430,20 @@ func DefineArrayOfBits[T commonBitsLengths](c *Codec, bits *T, size uint64) { HashArrayOfBits(c.has, bits) } +// DefineArrayOfBitsPointerOnFork defines the next field as a static array of +// (packed) bits if present in a fork. +func DefineArrayOfBitsPointerOnFork[T commonBitsLengths](c *Codec, bits **T, size uint64, filter ForkFilter) { + if c.enc != nil { + EncodeArrayOfBitsPointerOnFork(c.enc, *bits, filter) + return + } + if c.dec != nil { + DecodeArrayOfBitsPointerOnFork(c.dec, bits, size, filter) + return + } + HashArrayOfBitsPointerOnFork(c.has, *bits, filter) +} + // DefineSliceOfBitsOffset defines the next field as a dynamic slice of (packed) // bits. func DefineSliceOfBitsOffset(c *Codec, bits *bitfield.Bitlist, maxBits uint64) { @@ -499,6 +513,20 @@ func DefineArrayOfUint64s[T commonUint64sLengths](c *Codec, ns *T) { HashArrayOfUint64s(c.has, ns) } +// DefineArrayOfUint64sPointerOnFork defines the next field as a static array of +// uint64s if present in a fork. +func DefineArrayOfUint64sPointerOnFork[T commonUint64sLengths](c *Codec, ns **T, filter ForkFilter) { + if c.enc != nil { + EncodeArrayOfUint64sPointerOnFork(c.enc, *ns, filter) + return + } + if c.dec != nil { + DecodeArrayOfUint64sPointerOnFork(c.dec, ns, filter) + return + } + HashArrayOfUint64sPointerOnFork(c.has, *ns, filter) +} + // DefineSliceOfUint64sOffset defines the next field as a dynamic slice of uint64s. func DefineSliceOfUint64sOffset[T ~uint64](c *Codec, ns *[]T, maxItems uint64) { if c.enc != nil { @@ -654,8 +682,8 @@ func DefineSliceOfStaticBytesContentOnFork[T commonBytesLengths](c *Codec, blobs // No hashing, done at the offset position } -// DefineSliceOfDynamicBytesOffset defines the next field as a dynamic slice of dynamic -// binary blobs. +// DefineSliceOfDynamicBytesOffset defines the next field as a dynamic slice of +// dynamic binary blobs. func DefineSliceOfDynamicBytesOffset(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64) { if c.enc != nil { EncodeSliceOfDynamicBytesOffset(c.enc, *blobs) @@ -668,6 +696,20 @@ func DefineSliceOfDynamicBytesOffset(c *Codec, blobs *[][]byte, maxItems uint64, HashSliceOfDynamicBytes(c.has, *blobs, maxItems, maxSize) } +// DefineSliceOfDynamicBytesOffsetOnFork defines the next field as a dynamic slice +// of dynamic binary blobs if present in a fork. +func DefineSliceOfDynamicBytesOffsetOnFork(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicBytesOffsetOnFork(c.enc, *blobs, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicBytesOffsetOnFork(c.dec, blobs, filter) + return + } + HashSliceOfDynamicBytesOnFork(c.has, *blobs, maxItems, maxSize, filter) +} + // DefineSliceOfDynamicBytesContent defines the next field as a dynamic slice of // dynamic binary blobs. func DefineSliceOfDynamicBytesContent(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64) { @@ -682,6 +724,20 @@ func DefineSliceOfDynamicBytesContent(c *Codec, blobs *[][]byte, maxItems uint64 // No hashing, done at the offset position } +// DefineSliceOfDynamicBytesContentOnFork defines the next field as a dynamic +// slice of dynamic binary blobs. +func DefineSliceOfDynamicBytesContentOnFork(c *Codec, blobs *[][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + if c.enc != nil { + EncodeSliceOfDynamicBytesContentOnFork(c.enc, *blobs, filter) + return + } + if c.dec != nil { + DecodeSliceOfDynamicBytesContentOnFork(c.dec, blobs, maxItems, maxSize, filter) + return + } + // No hashing, done at the offset position +} + // DefineSliceOfStaticObjectsOffset defines the next field as a dynamic slice of // static ssz objects. func DefineSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](c *Codec, objects *[]T, maxItems uint64) { diff --git a/decoder.go b/decoder.go index 7ede075..b87a9e3 100644 --- a/decoder.go +++ b/decoder.go @@ -376,9 +376,6 @@ func DecodeStaticBytes[T commonBytesLengths](dec *Decoder, blob *T) { // DecodeStaticBytesPointerOnFork parses a static binary blob if present in a fork. // If not, the bytes are set to nil. -// -// This method is similar to DecodeStaticBytes, but will also initialize the -// pointer if it is not allocated yet. func DecodeStaticBytesPointerOnFork[T commonBytesLengths](dec *Decoder, blob **T, filter ForkFilter) { // If the field is not active in the current fork, clear out the output if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { @@ -576,6 +573,21 @@ func DecodeArrayOfBits[T commonBitsLengths](dec *Decoder, bits *T, size uint64) } } +// DecodeArrayOfBitsPointerOnFork parses a static array of (packed) bits if present +// in a fork. If not, the bit array pointer is set to nil. +func DecodeArrayOfBitsPointerOnFork[T commonBitsLengths](dec *Decoder, bits **T, size uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *bits = nil + return + } + // Otherwise fall back to the standard decoder + if *bits == nil { + *bits = new(T) + } + DecodeArrayOfBits(dec, *bits, size) +} + // DecodeSliceOfBitsOffset parses a dynamic slice of (packed) bits. func DecodeSliceOfBitsOffset(dec *Decoder, bitlist *bitfield.Bitlist) { dec.decodeOffset(false) @@ -681,6 +693,21 @@ func DecodeArrayOfUint64s[T commonUint64sLengths](dec *Decoder, ns *T) { } } +// DecodeArrayOfUint64sPointerOnFork parses a static array of uint64s if present +// in a fork. If not, the bit array pointer is set to nil. +func DecodeArrayOfUint64sPointerOnFork[T commonUint64sLengths](dec *Decoder, ns **T, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *ns = nil + return + } + // Otherwise fall back to the standard decoder + if *ns == nil { + *ns = new(T) + } + DecodeArrayOfUint64s(dec, *ns) +} + // DecodeSliceOfUint64sOffset parses a dynamic slice of uint64s. func DecodeSliceOfUint64sOffset[T ~uint64](dec *Decoder, ns *[]T) { dec.decodeOffset(false) @@ -919,6 +946,17 @@ func DecodeSliceOfDynamicBytesOffset(dec *Decoder, blobs *[][]byte) { dec.decodeOffset(false) } +// DecodeSliceOfDynamicBytesOffsetOnFork parses a dynamic slice of dynamic binary +// blobs if present in a fork. +func DecodeSliceOfDynamicBytesOffsetOnFork(dec *Decoder, blobs *[][]byte, filter ForkFilter) { + // If the field is not active in the current fork, skip parsing the offset + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicBytesOffset(dec, blobs) +} + // DecodeSliceOfDynamicBytesContent is the lazy data reader of DecodeSliceOfDynamicBytesOffset. func DecodeSliceOfDynamicBytesContent(dec *Decoder, blobs *[][]byte, maxItems uint64, maxSize uint64) { if dec.err != nil { @@ -974,6 +1012,17 @@ func DecodeSliceOfDynamicBytesContent(dec *Decoder, blobs *[][]byte, maxItems ui } } +// DecodeSliceOfDynamicBytesContentOnFork is the lazy data reader of DecodeSliceOfDynamicBytesOffsetOnFork. +func DecodeSliceOfDynamicBytesContentOnFork(dec *Decoder, blobs *[][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, clear out the output + if dec.codec.fork < filter.Added || (filter.Removed > ForkUnknown && dec.codec.fork >= filter.Removed) { + *blobs = nil + return + } + // Otherwise fall back to the standard decoder + DecodeSliceOfDynamicBytesContent(dec, blobs, maxItems, maxSize) +} + // DecodeSliceOfStaticObjectsOffset parses a dynamic slice of static ssz objects. func DecodeSliceOfStaticObjectsOffset[T newableStaticObject[U], U any](dec *Decoder, objects *[]T) { dec.decodeOffset(false) diff --git a/encoder.go b/encoder.go index a0d0501..3885723 100644 --- a/encoder.go +++ b/encoder.go @@ -527,6 +527,23 @@ func EncodeArrayOfBits[T commonBitsLengths](enc *Encoder, bits *T) { } } +// EncodeArrayOfBitsPointerOnFork serializes a static array of (packed) bits if +// present in a fork. +// +// Note, a nil pointer is serialized as a zero-value bit array. +func EncodeArrayOfBitsPointerOnFork[T commonBitsLengths](enc *Encoder, bits *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if bits == nil { + enc.encodeZeroes(reflect.TypeFor[T]().Len()) + return + } + EncodeArrayOfBits(enc, bits) +} + // EncodeSliceOfBitsOffset serializes a dynamic slice of (packed) bits. // // Note, a nil slice of bits is serialized as an empty bit list. @@ -625,6 +642,23 @@ func EncodeArrayOfUint64s[T commonUint64sLengths](enc *Encoder, ns *T) { } } +// EncodeArrayOfUint64sPointerOnFork serializes a static array of uint64s if +// present in a fork. +// +// Note, a nil pointer is serialized as a uint64 array filled with zeroes. +func EncodeArrayOfUint64sPointerOnFork[T commonUint64sLengths](enc *Encoder, ns *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + if ns == nil { + enc.encodeZeroes(reflect.TypeFor[T]().Len() * 8) + return + } + EncodeArrayOfUint64s(enc, ns) +} + // EncodeSliceOfUint64sOffset serializes a dynamic slice of uint64s. func EncodeSliceOfUint64sOffset[T ~uint64](enc *Encoder, ns []T) { // Nope, dive into actual encoding @@ -810,7 +844,8 @@ func EncodeSliceOfStaticBytesContentOnFork[T commonBytesLengths](enc *Encoder, b EncodeSliceOfStaticBytesContent(enc, blobs) } -// EncodeSliceOfDynamicBytesOffset serializes a dynamic slice of dynamic binary blobs. +// EncodeSliceOfDynamicBytesOffset serializes a dynamic slice of dynamic binary +// blobs. func EncodeSliceOfDynamicBytesOffset(enc *Encoder, blobs [][]byte) { if enc.outWriter != nil { if enc.err != nil { @@ -827,6 +862,17 @@ func EncodeSliceOfDynamicBytesOffset(enc *Encoder, blobs [][]byte) { } } +// EncodeSliceOfDynamicBytesOffsetOnFork serializes a dynamic slice of dynamic +// binary blob if present in a fork. +func EncodeSliceOfDynamicBytesOffsetOnFork(enc *Encoder, blobs [][]byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicBytesOffset(enc, blobs) +} + // EncodeSliceOfDynamicBytesContent is the lazy data writer for EncodeSliceOfDynamicBytesOffset. func EncodeSliceOfDynamicBytesContent(enc *Encoder, blobs [][]byte) { // Nope, dive into actual encoding @@ -875,6 +921,16 @@ func EncodeSliceOfDynamicBytesContent(enc *Encoder, blobs [][]byte) { } } +// EncodeSliceOfDynamicBytesContentOnFork is the lazy data writer for EncodeSliceOfDynamicBytesOffsetOnFork. +func EncodeSliceOfDynamicBytesContentOnFork(enc *Encoder, blobs [][]byte, filter ForkFilter) { + // If the field is not active in the current fork, early return + if enc.codec.fork < filter.Added || (filter.Removed > ForkUnknown && enc.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard encoder + EncodeSliceOfDynamicBytesContent(enc, blobs) +} + // EncodeSliceOfStaticObjectsOffset serializes a dynamic slice of static ssz objects. func EncodeSliceOfStaticObjectsOffset[T StaticObject](enc *Encoder, objects []T) { if enc.outWriter != nil { diff --git a/hasher.go b/hasher.go index e83fe92..51d3b8a 100644 --- a/hasher.go +++ b/hasher.go @@ -259,7 +259,7 @@ func HashStaticBytesPointerOnFork[T commonBytesLengths](h *Hasher, blob *T, filt // costs, or we use reflect. Both is kind of crappy. // // https://github.com/golang/go/issues/69100 - h.hashBytesEmpty(reflect.TypeOf(blob).Elem().Len()) + h.hashBytesEmpty(reflect.TypeFor[T]().Len()) return } HashStaticBytes(h, blob) @@ -338,6 +338,25 @@ func HashArrayOfBits[T commonBitsLengths](h *Hasher, bits *T) { h.hashBytes(unsafe.Slice(&(*bits)[0], len(*bits))) } +// HashArrayOfBitsPointerOnFork hashes a static array of (packed) bits if present +// in a fork. +func HashArrayOfBitsPointerOnFork[T commonBitsLengths](h *Hasher, bits *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if bits == nil { + // Go generics cannot do len(T{}), so we either allocate and bear the GC + // costs, or we use reflect. Both is kind of crappy. + // + // https://github.com/golang/go/issues/69100 + h.hashBytesEmpty(reflect.TypeFor[T]().Len()) + return + } + HashArrayOfBits(h, bits) +} + // HashSliceOfBits hashes a dynamic slice of (packed) bits. // // Note, a nil slice of bits is serialized as an empty bit list. @@ -418,6 +437,23 @@ func HashArrayOfUint64s[T commonUint64sLengths](h *Hasher, ns *T) { h.ascendLayer(0) } +// HashArrayOfUint64sPointerOnFork hashes a static array of uint64s if present +// in a fork. +func HashArrayOfUint64sPointerOnFork[T commonUint64sLengths](h *Hasher, ns *T, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + if ns == nil { + h.descendLayer() + h.insertBlobChunksEmpty(reflect.TypeFor[T]().Len() * 8) + h.ascendLayer(0) + return + } + HashArrayOfUint64s(h, ns) +} + // HashSliceOfUint64s hashes a dynamic slice of uint64s. func HashSliceOfUint64s[T ~uint64](h *Hasher, ns []T, maxItems uint64) { h.descendMixinLayer() @@ -519,6 +555,17 @@ func HashSliceOfDynamicBytes(h *Hasher, blobs [][]byte, maxItems uint64, maxSize h.ascendMixinLayer(uint64(len(blobs)), maxItems) } +// HashSliceOfDynamicBytesOnFork hashes a dynamic slice of dynamic binary blobs +// if present in a fork. +func HashSliceOfDynamicBytesOnFork(h *Hasher, blobs [][]byte, maxItems uint64, maxSize uint64, filter ForkFilter) { + // If the field is not active in the current fork, early return + if h.codec.fork < filter.Added || (filter.Removed > ForkUnknown && h.codec.fork >= filter.Removed) { + return + } + // Otherwise fall back to the standard hasher + HashSliceOfDynamicBytes(h, blobs, maxItems, maxSize) +} + // HashSliceOfStaticObjects hashes a dynamic slice of static ssz objects. func HashSliceOfStaticObjects[T StaticObject](h *Hasher, objects []T, maxItems uint64) { h.descendMixinLayer() diff --git a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go index a2fb232..123143a 100644 --- a/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_beacon_state_monolith_ssz.go @@ -14,7 +14,10 @@ func (obj *BeaconStateMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint if fork := int(sizer.Fork()); fork < len(staticSizeCacheBeaconStateMonolith) { size = staticSizeCacheBeaconStateMonolith[fork] } else { - size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + 8192*8 + size = 8 + 32 + 8 + (*Fork)(nil).SizeSSZ(sizer) + (*BeaconBlockHeader)(nil).SizeSSZ(sizer) + 8192*32 + 8192*32 + 4 + (*Eth1Data)(nil).SizeSSZ(sizer) + 4 + 8 + 4 + 4 + 65536*32 + if sizer.Fork() >= ssz.ForkUnknown { + size += 8192 * 8 + } if sizer.Fork() < ssz.ForkAltair { size += 4 + 4 } @@ -75,7 +78,7 @@ func (obj *BeaconStateMonolith) DefineSSZ(codec *ssz.Codec) { ssz.DefineSliceOfStaticObjectsOffset(codec, &obj.Validators, 1099511627776) // Offset (11) - Validators - 4 bytes ssz.DefineSliceOfUint64sOffset(codec, &obj.Balances, 1099511627776) // Offset (12) - Balances - 4 bytes ssz.DefineUnsafeArrayOfStaticBytes(codec, obj.RandaoMixes[:]) // Field (13) - RandaoMixes - 2097152 bytes - ssz.DefineArrayOfUint64s(codec, &obj.Slashings) // Field (14) - Slashings - 65536 bytes + ssz.DefineArrayOfUint64sPointerOnFork(codec, &obj.Slashings, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (14) - Slashings - 65536 bytes ssz.DefineSliceOfDynamicObjectsOffsetOnFork(codec, &obj.PreviousEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Offset (15) - PreviousEpochAttestations - 4 bytes ssz.DefineSliceOfDynamicObjectsOffsetOnFork(codec, &obj.CurrentEpochAttestations, 4096, ssz.ForkFilter{Removed: ssz.ForkAltair}) // Offset (16) - CurrentEpochAttestations - 4 bytes ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.PreviousEpochParticipation, 1099511627776, ssz.ForkFilter{Added: ssz.ForkAltair}) // Offset (17) - PreviousEpochParticipation - 4 bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go index 56d026c..44e6dfe 100644 --- a/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_bits_struct_monolith_ssz.go @@ -8,9 +8,9 @@ import "github.com/karalabe/ssz" // the total size otherwise. func (obj *BitsStructMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint32) { if sizer.Fork() >= ssz.ForkUnknown { - size += 4 + size += 4 + 1 } - size += 1 + 1 + 4 + 1 + size += 1 + 4 + 1 if fixed { return size } @@ -25,11 +25,11 @@ func (obj *BitsStructMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size uint3 // DefineSSZ defines how an object is encoded/decoded. func (obj *BitsStructMonolith) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineSliceOfBitsOffsetOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Offset (0) - A - 4 bytes - ssz.DefineArrayOfBits(codec, &obj.B, 2) // Field (1) - B - 1 bytes - ssz.DefineArrayOfBits(codec, &obj.C, 1) // Field (2) - C - 1 bytes - ssz.DefineSliceOfBitsOffset(codec, &obj.D, 6) // Offset (3) - D - 4 bytes - ssz.DefineArrayOfBits(codec, &obj.E, 8) // Field (4) - E - 1 bytes + ssz.DefineSliceOfBitsOffsetOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Offset (0) - A - 4 bytes + ssz.DefineArrayOfBitsPointerOnFork(codec, &obj.B, 2, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (1) - B - 1 bytes + ssz.DefineArrayOfBits(codec, &obj.C, 1) // Field (2) - C - 1 bytes + ssz.DefineSliceOfBitsOffset(codec, &obj.D, 6) // Offset (3) - D - 4 bytes + ssz.DefineArrayOfBits(codec, &obj.E, 8) // Field (4) - E - 1 bytes // Define the dynamic data (fields) ssz.DefineSliceOfBitsContentOnFork(codec, &obj.A, 5, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (0) - A - ? bytes diff --git a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go index 569b201..31fde12 100644 --- a/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go +++ b/tests/testtypes/consensus-spec-tests/gen_execution_payload_monolith_ssz.go @@ -14,7 +14,10 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size if sizer.Fork() >= ssz.ForkUnknown { size += 32 } - size += 32 + 4 + size += 32 + if sizer.Fork() >= ssz.ForkUnknown { + size += 4 + } if sizer.Fork() >= ssz.ForkShanghai { size += 4 } @@ -27,7 +30,9 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size if sizer.Fork() >= ssz.ForkFrontier { size += ssz.SizeDynamicBytes(sizer, obj.ExtraData) } - size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + if sizer.Fork() >= ssz.ForkUnknown { + size += ssz.SizeSliceOfDynamicBytes(sizer, obj.Transactions) + } if sizer.Fork() >= ssz.ForkShanghai { size += ssz.SizeSliceOfStaticObjects(sizer, obj.Withdrawals) } @@ -37,26 +42,26 @@ func (obj *ExecutionPayloadMonolith) SizeSSZ(sizer *ssz.Sizer, fixed bool) (size // DefineSSZ defines how an object is encoded/decoded. func (obj *ExecutionPayloadMonolith) DefineSSZ(codec *ssz.Codec) { // Define the static data (fields and dynamic offsets) - ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes - ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes - ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes - ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes - ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes - ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes - ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes - ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes - ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes - ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes - ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes - ssz.DefineUint256OnFork(codec, &obj.BaseFeePerGas, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (11) - BaseFeePerGas - 32 bytes - ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes - ssz.DefineSliceOfDynamicBytesOffset(codec, &obj.Transactions, 1048576, 1073741824) // Offset (13) - Transactions - 4 bytes - ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes - ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes - ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes + ssz.DefineStaticBytes(codec, &obj.ParentHash) // Field ( 0) - ParentHash - 32 bytes + ssz.DefineStaticBytes(codec, &obj.FeeRecipient) // Field ( 1) - FeeRecipient - 20 bytes + ssz.DefineStaticBytes(codec, &obj.StateRoot) // Field ( 2) - StateRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.ReceiptsRoot) // Field ( 3) - ReceiptsRoot - 32 bytes + ssz.DefineStaticBytes(codec, &obj.LogsBloom) // Field ( 4) - LogsBloom - 256 bytes + ssz.DefineStaticBytes(codec, &obj.PrevRandao) // Field ( 5) - PrevRandao - 32 bytes + ssz.DefineUint64(codec, &obj.BlockNumber) // Field ( 6) - BlockNumber - 8 bytes + ssz.DefineUint64(codec, &obj.GasLimit) // Field ( 7) - GasLimit - 8 bytes + ssz.DefineUint64(codec, &obj.GasUsed) // Field ( 8) - GasUsed - 8 bytes + ssz.DefineUint64(codec, &obj.Timestamp) // Field ( 9) - Timestamp - 8 bytes + ssz.DefineDynamicBytesOffsetOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Offset (10) - ExtraData - 4 bytes + ssz.DefineUint256OnFork(codec, &obj.BaseFeePerGas, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (11) - BaseFeePerGas - 32 bytes + ssz.DefineStaticBytes(codec, &obj.BlockHash) // Field (12) - BlockHash - 32 bytes + ssz.DefineSliceOfDynamicBytesOffsetOnFork(codec, &obj.Transactions, 1048576, 1073741824, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Offset (13) - Transactions - 4 bytes + ssz.DefineSliceOfStaticObjectsOffsetOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Offset (14) - Withdrawals - 4 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.BlobGasUsed, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (15) - BlobGasUsed - 8 bytes + ssz.DefineUint64PointerOnFork(codec, &obj.ExcessBlobGas, ssz.ForkFilter{Added: ssz.ForkCancun}) // Field (16) - ExcessBlobGas - 8 bytes // Define the dynamic data (fields) - ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes - ssz.DefineSliceOfDynamicBytesContent(codec, &obj.Transactions, 1048576, 1073741824) // Field (13) - Transactions - ? bytes - ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes + ssz.DefineDynamicBytesContentOnFork(codec, &obj.ExtraData, 32, ssz.ForkFilter{Added: ssz.ForkFrontier}) // Field (10) - ExtraData - ? bytes + ssz.DefineSliceOfDynamicBytesContentOnFork(codec, &obj.Transactions, 1048576, 1073741824, ssz.ForkFilter{Added: ssz.ForkUnknown}) // Field (13) - Transactions - ? bytes + ssz.DefineSliceOfStaticObjectsContentOnFork(codec, &obj.Withdrawals, 16, ssz.ForkFilter{Added: ssz.ForkShanghai}) // Field (14) - Withdrawals - ? bytes } diff --git a/tests/testtypes/consensus-spec-tests/types_monoliths.go b/tests/testtypes/consensus-spec-tests/types_monoliths.go index 5388e7d..4b2917b 100644 --- a/tests/testtypes/consensus-spec-tests/types_monoliths.go +++ b/tests/testtypes/consensus-spec-tests/types_monoliths.go @@ -40,7 +40,7 @@ type FixedTestStructMonolith struct { type BitsStructMonolith struct { A bitfield.Bitlist `ssz-max:"5" ssz-fork:"unknown"` - B [1]byte `ssz-size:"2" ssz:"bits"` + B *[1]byte `ssz-size:"2" ssz:"bits" ssz-fork:"unknown"` C [1]byte `ssz-size:"1" ssz:"bits"` D bitfield.Bitlist `ssz-max:"6"` E [1]byte `ssz-size:"8" ssz:"bits"` @@ -76,7 +76,7 @@ type BeaconStateMonolith struct { Validators []*Validator `ssz-max:"1099511627776"` Balances []uint64 `ssz-max:"1099511627776"` RandaoMixes [65536][32]byte - Slashings [8192]uint64 + Slashings *[8192]uint64 `ssz-fork:"unknown"` PreviousEpochAttestations []*PendingAttestation `ssz-max:"4096" ssz-fork:"!altair"` CurrentEpochAttestations []*PendingAttestation `ssz-max:"4096" ssz-fork:"!altair"` PreviousEpochParticipation []byte `ssz-max:"1099511627776" ssz-fork:"altair"` @@ -108,7 +108,7 @@ type ExecutionPayloadMonolith struct { ExtraData []byte `ssz-max:"32" ssz-fork:"frontier"` BaseFeePerGas *uint256.Int `ssz-fork:"unknown"` BlockHash Hash - Transactions [][]byte `ssz-max:"1048576,1073741824"` + Transactions [][]byte `ssz-max:"1048576,1073741824" ssz-fork:"unknown"` Withdrawals []*Withdrawal `ssz-max:"16" ssz-fork:"shanghai"` BlobGasUsed *uint64 ` ssz-fork:"cancun"` ExcessBlobGas *uint64 ` ssz-fork:"cancun"`