Skip to content

Commit

Permalink
fix pr comments & resolve build issues
Browse files Browse the repository at this point in the history
  • Loading branch information
Keszey Dániel authored and Keszey Dániel committed Jul 18, 2024
1 parent 3abed62 commit 0e9a241
Show file tree
Hide file tree
Showing 11 changed files with 143 additions and 176 deletions.
27 changes: 14 additions & 13 deletions packages/protocol/contracts/L1/ChainProver.sol
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ import "./verifiers/IVerifier.sol";
contract ChainProver is EssentialContract, TaikoErrors {
using LibAddress for address;


/// @dev Struct representing transition to be proven.
struct ProofData {
IVerifier verifier;
Expand All @@ -27,14 +26,16 @@ contract ChainProver is EssentialContract, TaikoErrors {

/// @dev Struct representing transition to be proven.
struct ProofBatch {
TaikoData.BlockMetadata blockMetadata; //Maybe needed (?)
bytes32 newStateHash; // keccak(new_l1_blockhash, new_root))
// These 2 keccak(new_l1_blockhash, new_root)) will be the new state (hash)
// and the transition hash it the old and the new, hashed together.
uint64 newL1BlockNumber; // Which L1 block is "covered" (proved) with this transaction
bytes32 newL1Root; // The new root hash
ProofData[] proofs;
address prover;
}

// New, and only state var
bytes32 public currentStateHash; //keccak(l1_blockhash, root)
bytes32 public currentStateHash; //equals to: keccak(newL1BlockNumber, newL1Root)

function init(address _owner, address _addressManager) external initializer {
if (_addressManager == address(0)) {
Expand All @@ -43,10 +44,13 @@ contract ChainProver is EssentialContract, TaikoErrors {
__Essential_init(_owner, _addressManager);
}

/// @dev Proposes a Taiko L2 block.
function proveBlock(bytes calldata data) external nonReentrant whenNotPaused {
/// @dev Proves up until a specific L1 block
function prove(bytes calldata data) external nonReentrant whenNotPaused {
// Decode the block data
ProofBatch memory proofBatch = abi.decode(data, (ProofBatch));
// This is hwo we get the transition hash
bytes32 l1BlockHash = blockhash(proofBatch.newL1BlockNumber);
bytes32 newStateHash = keccak256(abi.encode(l1BlockHash, proofBatch.newL1Root));

VerifierRegistry verifierRegistry = VerifierRegistry(resolve("verifier_registry", false));
// Verify the proofs
Expand All @@ -61,8 +65,7 @@ contract ChainProver is EssentialContract, TaikoErrors {
require(verifierRegistry.isVerifier(address(verifier)), "invalid verifier");
// Verify the proof
verifier.verifyProof(
keccak256(abi.encode(proofBatch.blockMetadata)), //Maybe block metadata (?) also an input ?
keccak256(abi.encode(currentStateHash, proofBatch.newStateHash)),
keccak256(abi.encode(currentStateHash, newStateHash)),
proofBatch.prover,
proofBatch.proofs[i].proof
);
Expand All @@ -73,10 +76,8 @@ contract ChainProver is EssentialContract, TaikoErrors {
// Can use some custom logic here. but let's keep it simple
require(proofBatch.proofs.length >= 3, "insufficient number of proofs");

currentStateHash = proofBatch.newStateHash;

//todo(@Brecht) How do we detect (or poison) verifiers or allow the chain to be corrected if the proof verifiers are buggy ?


currentStateHash = newStateHash;
//todo(@Brecht, @Dani) If somebody still gets an invalid proof through, we have to have
// another safety mechanisms! (e.g.: guardians, etc.)
}
}
78 changes: 20 additions & 58 deletions packages/protocol/contracts/L1/TaikoL1.sol
Original file line number Diff line number Diff line change
Expand Up @@ -72,14 +72,14 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
for (uint256 i = 0; i < data.length; i++) {
if (txLists.length != 0) {
// If calldata, then pass forward the calldata
_blocks[i] =_proposeBlock(data[i], txLists[i]);
_blocks[i] = _proposeBlock(data[i], txLists[i]);
} else {
// Blob otherwise
_blocks[i] = _proposeBlock(data[i], bytes(""));
}

// Check if we have whitelisted proposers
if (!_isProposerPermitted(_blocks[i])) {
if (!_isProposerPermitted()) {
revert L1_INVALID_PROPOSER();
}
}
Expand All @@ -94,9 +94,7 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
bytes memory txList
)
private
returns (
TaikoData.BlockMetadata memory _block
)
returns (TaikoData.BlockMetadata memory _block)
{
TaikoData.Config memory config = getConfig();

Expand All @@ -114,7 +112,8 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
require(_block.blobUsed == (txList.length == 0), "INVALID_BLOB_USED");
// Verify DA data
if (_block.blobUsed) {
// Todo: Is blobHash posisble to be checked and pre-calculated in input metadata off-chain ?
// Todo: Is blobHash posisble to be checked and pre-calculated in input metadata
// off-chain ?
// or shall we do something with it to cross check ?
// require(_block.blobHash == blobhash(0), "invalid data blob");
require(
Expand All @@ -128,11 +127,13 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
}

// Check that the tx length is non-zero and within the supported range
require(
_block.txListByteSize != 0 || _block.txListByteSize < config.blockMaxTxListBytes,
"invalid txlist size"
);
require(_block.txListByteSize <= config.blockMaxTxListBytes, "invalid txlist size");

// Also since we dont write into storage this check is hard to do here + the
// parentBlock.l1StateBlockNumber too for the preconfs (checking the 4 epoch window)
// I just guess, but also during proving we can see if this condition is
// fulfilled OR not, and then resulting in an empty block (+slashing of the
// proposer/preconfer) ?
TaikoData.Block storage parentBlock = state.blocks[(state.numBlocks - 1)];

require(_block.parentMetaHash == parentBlock.metaHash, "invalid parentMetaHash");
Expand All @@ -142,6 +143,7 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
// We only allow the L1 block to be 4 epochs old.
// The other constraint is that the L1 block number needs to be larger than or equal the one
// in the previous L2 block.

if (
_block.l1StateBlockNumber + 128 < block.number
|| _block.l1StateBlockNumber >= block.number
Expand All @@ -161,52 +163,20 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
revert L1_INVALID_TIMESTAMP();
}

// So basically we do not store these anymore!

// // Create the block that will be stored onchain
// TaikoData.Block memory blk = TaikoData.Block({
// blockHash: _block.blockHash,
// metaHash: keccak256(data),
// blockId: state.numBlocks,
// timestamp: _block.timestamp,
// l1StateBlockNumber: _block.l1StateBlockNumber
// });

// // Store the block
// state.blocks[state.numBlocks] = blk;

// // Store the passed in block hash as is
// state.transitions[blk.blockId][_block.parentBlockHash].blockHash = _block.blockHash;
// // Big enough number so that we are sure we don't hit that deadline in the future.
// state.transitions[blk.blockId][_block.parentBlockHash].verifiableAfter = type(uint64).max;

// // Increment the counter (cursor) by 1.
// state.numBlocks++;

emit BlockProposed({ blockId: _block.l2BlockNumber, meta: _block });
}

/// @notice Gets the details of a block.
/// @param blockId Index of the block.
/// @return blk The block.
function getBlock(uint64 blockId) public view returns (TaikoData.Block memory) {
//Todo (Brecht): we needed for some things like: BlockMetadata, used when parentBlock() was needed etc.
return state.blocks[blockId];
}

function getLastVerifiedBlockId() public view returns (uint256) {
return uint256(state.lastVerifiedBlockId);
}

function getNumOfBlocks() public view returns (uint256) {
return uint256(state.numBlocks);
}
// These will be unknown in the smart contract
// Maybe possible to extract with ChainProver, but not directly from here.
// function getBlock(uint64 blockId) {}
// function getLastVerifiedBlockId() {}
// function getNumOfBlocks() {}

/// @notice Gets the configuration of the TaikoL1 contract.
/// @return Config struct containing configuration parameters.
function getConfig() public view virtual returns (TaikoData.Config memory) {
return TaikoData.Config({
chainId: 167_008,
chainId: 167_008, //Maybe use a range or just thro this shit away.
// Limited by the PSE zkEVM circuits.
blockMaxGasLimit: 15_000_000,
// Each go-ethereum transaction has a size limit of 128KB,
Expand All @@ -226,16 +196,8 @@ contract TaikoL1 is EssentialContract, TaikoEvents, TaikoErrors {
return true;
}

// Additinal proposer rules
function _isProposerPermitted(TaikoData.BlockMetadata memory _block) private returns (bool) {
if (_block.l2BlockNumber == 1) {
// Only proposer_one can propose the first block after genesis
address proposerOne = resolve("proposer_one", true);
if (proposerOne != address(0) && msg.sender != proposerOne) {
return false;
}
}

// Additinal proposer rules
function _isProposerPermitted() private returns (bool) {
// If there's a sequencer registry, check if the block can be proposed by the current
// proposer
ISequencerRegistry sequencerRegistry =
Expand Down
60 changes: 37 additions & 23 deletions packages/protocol/contracts/L1/VerifierBattleRoyale.sol
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,15 @@ contract VerifierBattleRoyale is EssentialContract {
/// @dev Struct representing transition to be proven.
struct ProofData {
IVerifier verifier;
bytes32 newStatHashTransitionHash; // This differs from BasedOperator ! Mainly because of
// transition comparison for the battle!!
bytes32 postRoot; // post root from this hashing: keccak(new_l1_blockhash, new_root)
bytes proof;
}

struct ProofBatch {
TaikoData.BlockMetadata blockMetadata;
bytes32 preTransitionHash; //(l1BlockHash and root) // This has to be same for all
// proofData, and we need to prove that we can achieve different post state -> which
// should not be allowed.
bytes32 postL1BlockHash;
ProofData[] proofs;
address prover;
}
Expand Down Expand Up @@ -67,23 +69,34 @@ contract VerifierBattleRoyale is EssentialContract {
for (uint256 i = 0; i < proofBatch.proofs.length; i++) {
IVerifier verifier = proofBatch.proofs[i].verifier;
require(verifierRegistry.isVerifier(address(verifier)), "invalid verifier");

bytes32 transitionToBeVerified = keccak256(
abi.encode(
proofBatch.preTransitionHash,
keccak256(abi.encode(proofBatch.postL1BlockHash, proofBatch.proofs[i].postRoot))
)
);

verifier.verifyProof(
keccak256(abi.encode(proofBatch.blockMetadata)),
proofBatch.proofs[i].newStatHashTransitionHash,
proofBatch.prover,
proofBatch.proofs[i].proof
transitionToBeVerified, proofBatch.prover, proofBatch.proofs[i].proof
);
}

if (proofBatch.proofs.length == 2) {
/* Same verifier, same block, but different blockhashes/signalroots */

require(
proofBatch.proofs[0].verifier == proofBatch.proofs[1].verifier,
"verifiers not the same"
);
require(
address(proofBatch.proofs[0].verifier) == brokenVerifier,
"incorrect broken verifier address"
);

require(proofBatch.proofs[0].newStatHashTransitionHash != proofBatch.proofs[1].newStatHashTransitionHash, "blockhash the same");
require(
proofBatch.proofs[0].postRoot != proofBatch.proofs[1].postRoot,
"post state is the same"
);
} else if (proofBatch.proofs.length == 3) {
/* Multiple verifiers in a consensus show that another verifier is faulty */

Expand All @@ -100,20 +113,21 @@ contract VerifierBattleRoyale is EssentialContract {

// Reference proofs need to be placed first in the array, the faulty proof is listed
// last
for (uint256 i = 0; i < proofBatch.proofs.length - 1; i++) {
bytes32 transitionA = proofBatch.proofs[i].newStatHashTransitionHash;
bytes32 transitionB = proofBatch.proofs[i + 1].newStatHashTransitionHash;
// Need to figure out this part later
// require(
// transitionA.parentBlockHash == transitionB.parentBlockHash,
// "parentHash not the same"
// );
// if (i < proofBatch.proofs.length - 2) {
// require(transitionA.blockHash == transitionB.blockHash, "blockhash the same");
// } else {
// require(transitionA.blockHash != transitionB.blockHash, "blockhash the same");
// }
}
require(
proofBatch.proofs[0].postRoot == proofBatch.proofs[1].postRoot, "incorrect order"
);
require(
proofBatch.proofs[1].postRoot != proofBatch.proofs[2].postRoot, "incorrect order"
);

//require also that brokenVerifier is the same as the 3rd's verifier address
require(
proofBatch.proofs[1].postRoot != proofBatch.proofs[2].postRoot, "incorrect order"
);
require(
address(proofBatch.proofs[1].verifier) == brokenVerifier,
"incorrect broken verifier address"
);
} else {
revert("unsupported claim");
}
Expand Down
3 changes: 2 additions & 1 deletion packages/protocol/contracts/L1/actors/ProverPayment.sol
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ contract ProverPayment {

// // Propose the block
// _blocks =
// taikoL1.proposeBlock{ value: taikoL1.PROVER_BOND() }(data, txLists, assignment.prover);
// taikoL1.proposeBlock{ value: taikoL1.PROVER_BOND() }(data, txLists,
// assignment.prover);

uint64 highestl2BlockNumber = _blocks[_blocks.length - 1].l2BlockNumber;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ contract GuardianVerifier is EssentialContract, IVerifier {

/// @inheritdoc IVerifier
function verifyProof(
bytes32, /*blockMetaHash*/
bytes32, /*newStateHashTransition*/
bytes32, /*transitionHash*/
address prover,
bytes calldata /*proof*/
)
Expand Down
32 changes: 2 additions & 30 deletions packages/protocol/contracts/L1/verifiers/IVerifier.sol
Original file line number Diff line number Diff line change
Expand Up @@ -11,39 +11,11 @@ import "../TaikoData.sol";
/// @title IVerifier Interface
/// @notice Defines the function that handles proof verification.
interface IVerifier {
// Todo(Brecht/Dani):
// This interface differs from taiko-mono's latest verifyProof(), mainly because we dont have
// contestation for example, so no need to have TierProof structure. But further bundling the
// structs into 1, and incorporate to TaikoData might be desireable, depending on how we need to
// use.
// See the taiko-mono used interface below this function signature.
function verifyProof(
bytes32 blockMetaHash, //We dont need to post the full BlockMetadata struct
bytes32 newStateHashTransition, // keccak(keccak(current_l1_blockhash, current_root), keccak(new_l1_blockhash, new_root))
bytes32 transitionHash, // keccak(keccak(current_l1_blockhash, current_root),
// keccak(new_l1_blockhash, new_root))
address prover,
bytes calldata proof
)
external;

// As a reference, used by taiko-mono currently:
// struct Context {
// bytes32 metaHash;
// bytes32 blobHash;
// address prover;
// uint64 blockId;
// bool isContesting;
// bool blobUsed;
// address msgSender;
// }

// /// @notice Verifies a proof.
// /// @param _ctx The context of the proof verification.
// /// @param _tran The transition to verify.
// /// @param _proof The proof to verify.
// function verifyProof(
// Context calldata _ctx,
// TaikoData.Transition calldata _tran,
// TaikoData.TierProof calldata _proof
// )
// external;
}
5 changes: 2 additions & 3 deletions packages/protocol/contracts/L1/verifiers/MockSgxVerifier.sol
Original file line number Diff line number Diff line change
Expand Up @@ -136,10 +136,9 @@ contract MockSgxVerifier is EssentialContract, IVerifier {
}

/// @inheritdoc IVerifier
/* MODIFIED- TO RETURN TRUE WITHOUT REAL VERIFICATION!!! */
/* MODIFIED - TO RETURN TRUE WITHOUT REAL VERIFICATION!!! */
function verifyProof(
bytes32, /*blockMetaHash*/
bytes32, /*newStateHashTransition*/
bytes32, /*transitionHash*/
address, /*prover*/
bytes calldata /*proof*/
)
Expand Down
Loading

0 comments on commit 0e9a241

Please sign in to comment.