Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Para threads to MMR Root #1288

Merged
merged 8 commits into from
Oct 2, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
133 changes: 54 additions & 79 deletions relayer/chain/relaychain/connection.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package relaychain
import (
"context"
"fmt"
"sort"

gsrpc "github.com/snowfork/go-substrate-rpc-client/v4"
"github.com/snowfork/go-substrate-rpc-client/v4/types"
Expand Down Expand Up @@ -130,37 +131,6 @@ type ParaHead struct {
Data types.Bytes
}

// Fetches heads for each parachain Id filtering out para threads.
func (conn *Connection) FetchParachainHeads(relayChainBlockHash types.Hash) ([]ParaHead, error) {
// Fetch para heads
paraHeads, err := conn.fetchParaHeads(relayChainBlockHash)
if err != nil {
log.WithError(err).Error("Cannot fetch para heads.")
return nil, err
}

// fetch ids of parachains (not including parathreads)
var parachainIDs []uint32
parachainsKey, err := types.CreateStorageKey(conn.Metadata(), "Paras", "Parachains", nil, nil)
if err != nil {
return nil, err
}

_, err = conn.API().RPC.State.GetStorage(parachainsKey, &parachainIDs, relayChainBlockHash)
if err != nil {
return nil, err
}

// filter out parathreads
var parachainHeads []ParaHead
for _, v := range parachainIDs {
if head, ok := paraHeads[v]; ok {
parachainHeads = append(parachainHeads, head)
}
}
return parachainHeads, nil
}

func (co *Connection) FetchParachainHead(relayBlockhash types.Hash, paraID uint32, header *types.Header) (bool, error) {
encodedParaID, err := types.EncodeToBytes(paraID)
if err != nil {
Expand Down Expand Up @@ -221,58 +191,14 @@ func (co *Connection) FetchMMRLeafCount(relayBlockhash types.Hash) (uint64, erro
return mmrLeafCount, nil
}

func (co *Connection) fetchKeys(keyPrefix []byte, blockHash types.Hash) ([]types.StorageKey, error) {
alistair-singh marked this conversation as resolved.
Show resolved Hide resolved
const pageSize = 200
var startKey *types.StorageKey

if pageSize < 1 {
return nil, fmt.Errorf("page size cannot be zero")
}

var results []types.StorageKey
log.WithFields(log.Fields{
"keyPrefix": keyPrefix,
"blockHash": blockHash.Hex(),
"pageSize": pageSize,
}).Trace("Fetching paged keys.")

pageIndex := 0
for {
response, err := co.API().RPC.State.GetKeysPaged(keyPrefix, pageSize, startKey, blockHash)
if err != nil {
return nil, err
}

log.WithFields(log.Fields{
"keysInPage": len(response),
"pageIndex": pageIndex,
}).Trace("Fetched a page of keys.")

results = append(results, response...)
if uint32(len(response)) < pageSize {
break
} else {
startKey = &response[len(response)-1]
pageIndex++
}
}

log.WithFields(log.Fields{
"totalNumKeys": len(results),
"totalNumPages": pageIndex + 1,
}).Trace("Fetching of paged keys complete.")

return results, nil
}

// Offset of encoded para id in storage key.
// The key is of this format:
//
// ParaId: u32
// Key: hash_twox_128("Paras") + hash_twox_128("Heads") + hash_twox_64(ParaId) + Encode(ParaId)
const ParaIDOffset = 16 + 16 + 8

func (co *Connection) fetchParaHeads(blockHash types.Hash) (map[uint32]ParaHead, error) {
func (co *Connection) FetchParachainHeads(blockHash types.Hash) ([]ParaHead, error) {
keyPrefix := types.CreateStorageKeyPrefix("Paras", "Heads")
keys, err := co.fetchKeys(keyPrefix, blockHash)
if err != nil {
Expand All @@ -292,7 +218,8 @@ func (co *Connection) fetchParaHeads(blockHash types.Hash) (map[uint32]ParaHead,
return nil, err
}

heads := make(map[uint32]ParaHead)
const numParas = 16
alistair-singh marked this conversation as resolved.
Show resolved Hide resolved
heads := make([]ParaHead, 0, numParas)
for _, changeSet := range changeSets {
for _, change := range changeSet.Changes {
if change.StorageData.IsNone() {
Expand All @@ -313,12 +240,60 @@ func (co *Connection) fetchParaHeads(blockHash types.Hash) (map[uint32]ParaHead,
return nil, err
}

heads[paraID] = ParaHead{
heads = append(heads, ParaHead{
ParaID: paraID,
Data: headData,
}
})
}
}

sort.SliceStable(heads, func(i int, j int) bool {
return heads[i].ParaID < heads[j].ParaID
})

return heads, nil
}

func (co *Connection) fetchKeys(keyPrefix []byte, blockHash types.Hash) ([]types.StorageKey, error) {
const pageSize = 200
var startKey *types.StorageKey

if pageSize < 1 {
return nil, fmt.Errorf("page size cannot be zero")
}

var results []types.StorageKey
log.WithFields(log.Fields{
"keyPrefix": keyPrefix,
"blockHash": blockHash.Hex(),
"pageSize": pageSize,
}).Trace("Fetching paged keys.")

pageIndex := 0
for {
response, err := co.API().RPC.State.GetKeysPaged(keyPrefix, pageSize, startKey, blockHash)
if err != nil {
return nil, err
}

log.WithFields(log.Fields{
"keysInPage": len(response),
"pageIndex": pageIndex,
}).Trace("Fetched a page of keys.")

results = append(results, response...)
if uint32(len(response)) < pageSize {
break
} else {
startKey = &response[len(response)-1]
pageIndex++
}
}

log.WithFields(log.Fields{
"totalNumKeys": len(results),
"totalNumPages": pageIndex + 1,
}).Trace("Fetching of paged keys complete.")

return results, nil
}
11 changes: 9 additions & 2 deletions relayer/relays/parachain/beefy-listener.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,6 +217,10 @@ func (li *BeefyListener) fetchLatestBeefyBlock(ctx context.Context) (uint64, typ
return number, hash, nil
}

// The maximum paras that will be included in the proof.
// https://github.com/paritytech/polkadot-sdk/blob/d66dee3c3da836bcf41a12ca4e1191faee0b6a5b/polkadot/runtime/parachains/src/paras/mod.rs#L1225-L1232
const MaxParaHeads = 1024

// Generates a proof for an MMR leaf, and then generates a merkle proof for our parachain header, which should be verifiable against the
// parachains root in the mmr leaf.
func (li *BeefyListener) generateProof(ctx context.Context, input *ProofInput, header *types.Header) (*ProofOutput, error) {
Expand Down Expand Up @@ -258,8 +262,11 @@ func (li *BeefyListener) generateProof(ctx context.Context, input *ProofInput, h
// Generate a merkle proof for the parachain head with input ParaId
// and verify with merkle root hash of all parachain heads
// Polkadot uses the following code to generate merkle root from parachain headers:
// https://github.com/paritytech/polkadot/blob/2eb7672905d99971fc11ad7ff4d57e68967401d2/runtime/rococo/src/lib.rs#L706-L709
merkleProofData, err := CreateParachainMerkleProof(input.ParaHeads, input.ParaID)
// https://github.com/paritytech/polkadot-sdk/blob/d66dee3c3da836bcf41a12ca4e1191faee0b6a5b/polkadot/runtime/westend/src/lib.rs#L453-L460
// Truncate the ParaHeads to the 1024
// https://github.com/paritytech/polkadot-sdk/blob/d66dee3c3da836bcf41a12ca4e1191faee0b6a5b/polkadot/runtime/parachains/src/paras/mod.rs#L1305-L1311
numParas := min(MaxParaHeads, len(input.ParaHeads))
merkleProofData, err := CreateParachainMerkleProof(input.ParaHeads[:numParas], input.ParaID)
if err != nil {
return nil, fmt.Errorf("create parachain header proof: %w", err)
}
Expand Down
Loading