Skip to content

Raid poc #104

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,6 @@
[submodule "lib/openzeppelin-contracts"]
path = lib/openzeppelin-contracts
url = https://github.com/OpenZeppelin/openzeppelin-contracts
[submodule "lib/urc"]
path = lib/urc
url = https://github.com/eth-fabric/urc
1 change: 1 addition & 0 deletions lib/urc
Submodule urc added at e0c5c8
77 changes: 77 additions & 0 deletions src/protocol/fabric/EIP-4788.sol
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
// SPDX-License-Identifier: MIT

// Adapted from https://github.com/NethermindEth/Taiko-Preconf-AVS/blob/master/SmartContracts/src/libraries/EIP4788.sol
// Referenced from: https://ethresear.ch/t/slashing-proofoor-on-chain-slashed-validator-proofs/19421
pragma solidity ^0.8.28;

import {MerkleUtils} from "./MerkleUtils.sol";

library EIP4788 {
struct ValidatorProof {
// `Chunks` of the SSZ encoded validator
bytes32[8] validator;
// Index of the validator in the beacon state validator list
uint256 validatorIndex;
// Proof of inclusion of validator in beacon state validator list
bytes32[] validatorProof;
// Root of the validator list in the beacon state
bytes32 validatorsRoot;
// Proof of inclusion of validator list in the beacon state
bytes32[] beaconStateProof;
// Root of the beacon state
bytes32 beaconStateRoot;
// Proof of inclusion of beacon state in the beacon block
bytes32[] beaconBlockProofForState;
// Proof of inclusion of the validator index in the beacon block
bytes32[] beaconBlockProofForProposerIndex;
}

function verifyValidatorProof(bytes32 beaconBlockRoot, ValidatorProof memory validatorProof)
internal
pure
returns (bool)
{
// Validator is verified against the validator list in the beacon state
bytes32 validatorHashTreeRoot = MerkleUtils.merkleize(validatorProof.validator);
if (
!MerkleUtils.verifyProof(
validatorProof.validatorProof,
validatorProof.validatorsRoot,
validatorHashTreeRoot,
validatorProof.validatorIndex
)
) {
return false;
}

if (
!MerkleUtils.verifyProof(
validatorProof.beaconStateProof, validatorProof.beaconStateRoot, validatorProof.validatorsRoot, 11
)
) {
return false;
}

// Beacon state is verified against the beacon block
if (
!MerkleUtils.verifyProof(
validatorProof.beaconBlockProofForState, beaconBlockRoot, validatorProof.beaconStateRoot, 3
)
) {
return false;
}

// Validator index is verified against the beacon block
if (
!MerkleUtils.verifyProof(
validatorProof.beaconBlockProofForProposerIndex,
beaconBlockRoot,
MerkleUtils.toLittleEndian(validatorProof.validatorIndex),
1
)
) {
return false;
}
return true;
}
}
262 changes: 262 additions & 0 deletions src/protocol/fabric/FabricInbox.sol
Original file line number Diff line number Diff line change
@@ -0,0 +1,262 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.28;

import {IBlobRefRegistry} from "../../blobs/IBlobRefRegistry.sol";

import {IDelayedInclusionStore} from "../IDelayedInclusionStore.sol";
import {IProposerFees} from "../IProposerFees.sol";
import {IPublicationFeed} from "../IPublicationFeed.sol";
import {DelayedInclusionStore} from "../taiko_alethia/DelayedInclusionStore.sol";

import {EIP4788} from "./EIP-4788.sol";
import {IRegistry} from "urc/src/IRegistry.sol";
import {BLS} from "urc/src/lib/BLS.sol";

contract FabricInbox is DelayedInclusionStore {
struct Metadata {
uint256 anchorBlockId;
bytes32 anchorBlockHash;
bool isDelayedInclusion;
}

// Contracts
IPublicationFeed public immutable publicationFeed;
IProposerFees public immutable proposerFees;
IRegistry public immutable registry;

// Publication ID trackers
uint64 public unsafeHead;
uint64 public safeHead;

// URC-related parameters
address public immutable slasher;
uint256 public immutable requiredCollateralWei;

// EIP-4788
address private constant beaconRootsContract = 0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02;
uint256 private immutable GENESIS_TIMESTAMP;

// attributes associated with the publication
uint256 private constant METADATA = 0;
uint256 private constant LAST_PUBLICATION = 1;
uint256 private constant BLOB_REFERENCE = 2;
uint256 private constant VALIDATOR_PUBKEY = 3;

event NewSafeHead(uint64 indexed newSafeHead);

constructor(
address _publicationFeed,
address _blobRefRegistry,
address _registry,
address _slasher,
address _proposerFees,
uint256 _inclusionDelay,
uint256 _requiredCollateralWei,
uint256 _genesisTimestamp
) DelayedInclusionStore(_inclusionDelay, _blobRefRegistry) {
publicationFeed = IPublicationFeed(_publicationFeed);
registry = IRegistry(_registry);
slasher = _slasher;
proposerFees = IProposerFees(_proposerFees);
requiredCollateralWei = _requiredCollateralWei;
GENESIS_TIMESTAMP = _genesisTimestamp;
}

// @notice Publish a new publication and update the heads
// @param nBlobs The number of blobs being published
// @param registrationProof The Merkle proof that msg.sender is registered with the URC
// @param validatorProof The EIP-4788 proof that proves who the validator was at a given slot
// @param unsafeHeader The PublicationHeader from the previous publication
// @param unsafeAttributeHashes The attribute hashes of the previous publication
// @param replaceUnsafeHead Whether to replace the unsafe head or update the safe head
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So we only have one branch of unsafe head? I was thinking we can have the unsafe "branches" and retroactively choose one later so the "eventually-safe" branch can't be kicked out by a malicious actor. But I guess that adds the gas cost and complexity?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

so the "eventually-safe" branch can't be kicked out by a malicious actor

I guess this can be mitigated by slashing the malicious actor for submitting an "non-eventually-safe" head. However, without EIP-7917 that introduces the risk of slashing an innocent actor when the lookahead changes due to EB change.

function publish(
uint256 nBlobs,
IRegistry.RegistrationProof calldata registrationProof,
EIP4788.ValidatorProof calldata validatorProof,
IPublicationFeed.PublicationHeader calldata unsafeHeader,
bytes32[] calldata unsafeAttributeHashes,
bool replaceUnsafeHead
) external payable {
// Verify the proposer is allowed to publish
_isAllowedProposer(registrationProof);

// Publish the attributes
(uint64 newPublicationId, Metadata memory metadata) =
_publishAttributes(nBlobs, _hashBLSPubKey(registrationProof.registration.pubkey));

// Update the heads
(unsafeHead, safeHead) =
_updateHeads(replaceUnsafeHead, newPublicationId, unsafeHeader, unsafeAttributeHashes, validatorProof);

// Process delayed inclusions
_forceInclusions(metadata);
}

function _buildBlobIndices(uint256 nBlobs) private pure returns (uint256[] memory blobIndices) {
blobIndices = new uint256[](nBlobs);
for (uint256 i; i < nBlobs; ++i) {
blobIndices[i] = i;
}
}

function _isAllowedProposer(IRegistry.RegistrationProof calldata registrationProof) internal view {
// Get the URC's config
IRegistry.Config memory config = registry.getConfig();

// Get the data of the operator from the URC
// This will revert if the proof is invalid
IRegistry.OperatorData memory operator = registry.getVerifiedOperatorData(registrationProof);

// Perform sanity checks on the operator's data
require(operator.collateralWei >= requiredCollateralWei, "Insufficient collateral");

require(operator.slashedAt == 0, "Operator has been slashed");

// Verify operator has not unregistered
if (operator.unregisteredAt != type(uint48).max) {
require(block.number < operator.unregisteredAt + config.unregistrationDelay, "Operator unregistered");
}

// Get information about the operator's commitment to the rollup's specified slasher
IRegistry.SlasherCommitment memory slasherCommitment =
registry.getSlasherCommitment(registrationProof.registrationRoot, slasher);

// Perform sanity checks on the slasher commitment
require(slasherCommitment.optedOutAt < slasherCommitment.optedInAt, "Not opted into slasher");

require(slasherCommitment.slashed == false, "Operator has been slashed");

require(slasherCommitment.committer == msg.sender, "Wrong blob submitter address");

require(block.number > slasherCommitment.optedInAt + config.optInDelay, "Too early to make commitments");

// todo potentially check collateral history

// todo other checks?
}

function _publishAttributes(uint256 nBlobs, bytes32 pubkeyHash)
internal
returns (uint64 _publicationId, Metadata memory metadata)
{
// Construct metadata attribute
metadata =
Metadata({anchorBlockId: unsafeHead, anchorBlockHash: blockhash(unsafeHead), isDelayedInclusion: false});
require(metadata.anchorBlockHash != 0, "blockhash not found");

// Construct publication attributes
bytes[] memory attributes = new bytes[](4);
attributes[METADATA] = abi.encode(metadata);
attributes[LAST_PUBLICATION] = abi.encode(unsafeHead);
attributes[BLOB_REFERENCE] = abi.encode(blobRefRegistry.getRef(_buildBlobIndices(nBlobs)));
attributes[VALIDATOR_PUBKEY] = abi.encode(pubkeyHash);

// Pay the publication fee
(uint256 publicationFee,) = proposerFees.getCurrentFees();
proposerFees.payPublicationFee{value: publicationFee}(msg.sender, false);

// Publish the attributes and save the publication id
_publicationId = uint64(publicationFeed.publish(attributes).id);
}

function _updateHeads(
bool replaceUnsafeHead,
uint64 newPublicationId,
IPublicationFeed.PublicationHeader calldata unsafeHeader,
bytes32[] calldata unsafeAttributeHashes,
EIP4788.ValidatorProof calldata validatorProof
) internal returns (uint64 _unsafeHead, uint64 _safeHead) {
// Validate the unsafe header matches what's in the publication feed
// and that the header is for the unsafe head
require(
publicationFeed.validateHeader(unsafeHeader) && unsafeHeader.id == unsafeHead, "unsafeHeader is invalid"
);

// Verify the supplied attribute hashes match what's in the publication feed
require(
keccak256(abi.encode(unsafeAttributeHashes)) == unsafeHeader.attributesHash,
"unsafeAttributeHashes are invalid"
);

// Ensure that publish is not called twice in the same block
// The unsafeHead is updated every successful publish so this timestamp
// should never be the same as the current block timestamp unless publish
// is called more than once
require(unsafeHeader.timestamp != block.timestamp, "publish called twice in the same block");

// Verify the supplied validator proof is valid. Note this doesn't check
// the validator public key during the slot, that's done below
require(
EIP4788.verifyValidatorProof(getBeaconBlockRootFromTimestamp(unsafeHeader.timestamp), validatorProof),
Copy link
Collaborator

@linoscope linoscope Apr 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How do we handle "early inclusion" of batches? If we are at slot N, and the next preconfer is proposer of slot N+31 in lookahead, then we would want to let that preconfer submit batches during slot N~N+31 and not just N+31. I guess one way would be to:

  • Enable building upon unsafe "branches" without providing validatorProof as validatorProof is only available after N+32
  • At N+32 not only provide proof for N+31 but also prove that there were no preconfers during all slots between N and N+30

but seems expensive? Also proving that a validator is not opted-in seems hard. Maybe there is a better way, not sure.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also at maximum the next preconfer would be 64 slots ahead. So at max the unsafe branch will have to stay unsafe for 64 slots. But this probably isn't an issue as ZK proving takes time anyway.

Copy link
Collaborator

@linoscope linoscope Apr 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also proving that a validator is not opted-in seems hard.

Hmm, this seems the most difficult part. At first glance some fraud-proof mechanism is needed, but needs more thinking.

Copy link
Collaborator

@linoscope linoscope Apr 18, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Well, I guess you can say that, if no one proposed for N~N+31, even if proposer of N+31 is NOT strictly the next preconfer, it is fine to accept their head as valid?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, but if N+31 is indeed the next opted in proposer, but there is a proposed block by proposer at N+3, then we need to somehow prove that proposer of slot N+3 is actually NOT a preconfer.

"validator proof is invalid"
);

// Reconstruct the pubkeyhash by hashing it as the attribute was
bytes32 _provenPubkeyHash = keccak256(abi.encode(validatorProof.validator[0]));

if (replaceUnsafeHead) {
// If the committed pubkeyhash doesn't match what was proven by the
// validatorProof, it means the L1 proposer during that slot wasn't
// the unsafeHead proposer, allowing the sender to replace it
require(_provenPubkeyHash != unsafeAttributeHashes[VALIDATOR_PUBKEY], "unsafeHead should not be replaced");

// Replace the unsafe head
_unsafeHead = newPublicationId;

// Keep the safe head the same
_safeHead = safeHead;
} else {
// If the committed pubkeyhash matches what was proven by the
// validatorProof, it means the L1 proposer during that slot was the
// unsafeHead proposer, so we promote the unsafeHead to safeHead
require(_provenPubkeyHash == unsafeAttributeHashes[VALIDATOR_PUBKEY], "unsafeHead should be replaced");

// Update the safe head
_safeHead = unsafeHead;
emit NewSafeHead(_safeHead);

// Replace the unsafe head
_unsafeHead = newPublicationId;
}
}

function _forceInclusions(Metadata memory metadata) internal {
IDelayedInclusionStore.Inclusion[] memory inclusions = processDueInclusions();
// unsafeHead is always the latest publication id
uint64 _lastPublicationId = unsafeHead;
metadata.isDelayedInclusion = true;

(, uint256 delayedPublicationFee) = proposerFees.getCurrentFees();

// Metadata is fixed for all inclusions
bytes[] memory attributes = new bytes[](3);
attributes[METADATA] = abi.encode(metadata);
for (uint256 i; i < inclusions.length; ++i) {
attributes[LAST_PUBLICATION] = abi.encode(_lastPublicationId);
attributes[BLOB_REFERENCE] = abi.encode(inclusions[i]);

// Pay the publication fee
proposerFees.payPublicationFee{value: delayedPublicationFee}(msg.sender, true);

// Publish the inclusion
_lastPublicationId = uint64(publicationFeed.publish(attributes).id);

// Update the safe head
safeHead = _lastPublicationId;
emit NewSafeHead(safeHead);
}
}

// source: https://github.com/nerolation/slashing-proofoor/blob/main/src/SlashingProofoor.sol
function getBeaconBlockRootFromTimestamp(uint256 timestamp) public returns (bytes32) {
(bool ret, bytes memory data) = beaconRootsContract.call(bytes.concat(bytes32(timestamp)));
require(ret);
return bytes32(data);
}

// @notice Compress the BLS public key to size 48B then compute the hash tree root as saved in the beacon state
function _hashBLSPubKey(BLS.G1Point memory validatorBLSPubKey) internal pure returns (bytes32 pubKeyHashTreeRoot) {
// todo endianness is correct
pubKeyHashTreeRoot = sha256(abi.encode(BLS.compress(validatorBLSPubKey)));
}
}
Loading