Skip to content

Commit

Permalink
add decoding mehods
Browse files Browse the repository at this point in the history
  • Loading branch information
NazariiDenha committed Jun 11, 2024
1 parent b842a0f commit 7665c1e
Show file tree
Hide file tree
Showing 4 changed files with 308 additions and 0 deletions.
24 changes: 24 additions & 0 deletions encoding/bitmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,27 @@ func ConstructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePo

return bitmapBytes, nextIndex, nil
}

// DecodeBitmap decodes skipped L1 message bitmap of the batch from bytes to big.Int's
func DecodeBitmap(skippedL1MessageBitmap []byte, totalL1MessagePopped int) ([]*big.Int, error) {
length := len(skippedL1MessageBitmap)
if length%32 != 0 {
return nil, fmt.Errorf("skippedL1MessageBitmap length doesn't match, skippedL1MessageBitmap length should be equal 0 modulo 32, length of skippedL1MessageBitmap: %v", length)
}
if length*8 < totalL1MessagePopped {
return nil, fmt.Errorf("skippedL1MessageBitmap length is too small, skippedL1MessageBitmap length should be at least %v, length of skippedL1MessageBitmap: %v", (totalL1MessagePopped+7)/8, length)
}
var skippedBitmap []*big.Int
for index := 0; index < length/32; index++ {
bitmap := big.NewInt(0).SetBytes(skippedL1MessageBitmap[index*32 : index*32+32])
skippedBitmap = append(skippedBitmap, bitmap)
}
return skippedBitmap, nil
}

// IsL1MessageSkipped checks if index is skipped in bitmap
func IsL1MessageSkipped(skippedBitmap []*big.Int, index uint64) bool {
quo := index / 256
rem := index % 256
return skippedBitmap[quo].Bit(int(rem)) != 0
}
64 changes: 64 additions & 0 deletions encoding/codecv0/codecv0.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ import (
"github.com/scroll-tech/da-codec/encoding"
)

const BlockContextByteSize = 60

// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Expand All @@ -32,6 +34,12 @@ type DAChunk struct {
Transactions [][]*types.TransactionData
}

// DAChunkRawTx groups consecutive DABlocks with their transactions.
type DAChunkRawTx struct {
Blocks []*DABlock
Transactions []types.Transactions
}

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
Version uint8
Expand Down Expand Up @@ -171,6 +179,62 @@ func (c *DAChunk) Encode() ([]byte, error) {
return chunkBytes, nil
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []DAChunkRawTx.
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions
currentIndex := 1 + numBlocks*BlockContextByteSize
for _, block := range blocks {
var blockTransactions types.Transactions
var txNum int = int(block.NumTransactions - block.NumL1Messages)
for i := 0; i < txNum; i++ {
if len(chunk) < currentIndex+4 {
return nil, fmt.Errorf("chunk size doesn't match, next tx size is less then 4, byte length of chunk: %v, expected length: %v", len(chunk), currentIndex+4)
}
txLen := int(binary.BigEndian.Uint32(chunk[currentIndex : currentIndex+4]))
if len(chunk) < currentIndex+4+txLen {
return nil, fmt.Errorf("chunk size doesn't match with next tx length, byte length of chunk: %v, expected length: %v", len(chunk), currentIndex+4+txLen)
}
txData := chunk[currentIndex+4 : currentIndex+4+txLen]
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tx, err: %w", err)
}
blockTransactions = append(blockTransactions, tx)
currentIndex += 4 + txLen
}
transactions = append(transactions, blockTransactions)
}

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions,
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
chunkBytes, err := c.Encode()
Expand Down
139 changes: 139 additions & 0 deletions encoding/codecv1/codecv1.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ var BLSModulus = new(big.Int).SetBytes(common.FromHex("0x73eda753299d7d483339d80
// MaxNumChunks is the maximum number of chunks that a batch can contain.
const MaxNumChunks = 15

const BlockContextByteSize = 60

// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Expand All @@ -42,6 +44,12 @@ type DAChunk struct {
Transactions [][]*types.TransactionData
}

// DAChunkRawTx groups consecutive DABlocks with their transactions.
type DAChunkRawTx struct {
Blocks []*DABlock
Transactions []types.Transactions
}

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
// header
Expand Down Expand Up @@ -156,6 +164,39 @@ func (c *DAChunk) Encode() []byte {
return chunkBytes
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []DAChunkRawTx.
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions,
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
var dataBytes []byte
Expand Down Expand Up @@ -374,6 +415,104 @@ func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
return &blob, nil
}

// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
func DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
blobBytes := BytesFromBlobCanonical(blob)
numChunks := int(binary.BigEndian.Uint16(blobBytes[0:2]))
if numChunks != len(chunks) {
return fmt.Errorf("blob chunk number is not same as calldata, blob num chunks: %d, calldata num chunks: %d", numChunks, len(chunks))
}
index := 2 + MaxNumChunks*4
for chunkID, chunk := range chunks {
var transactions []types.Transactions
chunkSize := int(binary.BigEndian.Uint32(blobBytes[2+4*chunkID : 2+4*chunkID+4]))

chunkBytes := blobBytes[index : index+chunkSize]
curIndex := 0
for _, block := range chunk.Blocks {
var blockTransactions types.Transactions
var txNum = int(block.NumTransactions - block.NumL1Messages)
for i := 0; i < txNum; i++ {
tx, nextIndex, err := GetNextTx(chunkBytes, curIndex)
if err != nil {
return fmt.Errorf("couldn't decode next tx from blob bytes: %w, index: %d", err, index+curIndex+4)
}
curIndex = nextIndex
blockTransactions = append(blockTransactions, tx)
}
transactions = append(transactions, blockTransactions)
}
chunk.Transactions = transactions
index += chunkSize
}
return nil
}

var errSmallLength error = fmt.Errorf("length of blob bytes is too small")

// GetNextTx parses blob bytes to find length of payload of next Tx and decode it
func GetNextTx(bytes []byte, index int) (*types.Transaction, int, error) {
var nextIndex int
length := len(bytes)
if length < index+1 {
return nil, 0, errSmallLength
}
var txBytes []byte
if bytes[index] <= 0x7f {
// the first byte is transaction type, rlp encoding begins from next byte
txBytes = append(txBytes, bytes[index])
index++

}
if length < index+1 {
return nil, 0, errSmallLength
}
if bytes[index] >= 0xc0 && bytes[index] <= 0xf7 {
// length of payload is simply bytes[index] - 0xc0
payloadLen := int(bytes[index] - 0xc0)
if length < index+1+payloadLen {
return nil, 0, errSmallLength
}
txBytes = append(txBytes, bytes[index:index+1+payloadLen]...)
nextIndex = index + 1 + payloadLen
} else if bytes[index] > 0xf7 {
// the length of payload is encoded in next bytes[index] - 0xf7 bytes
// length of bytes representation of length of payload
lenPayloadLen := int(bytes[index] - 0xf7)
if length < index+1+lenPayloadLen {
return nil, 0, errSmallLength
}
lenBytes := bytes[index+1 : index+1+lenPayloadLen]
for len(lenBytes) < 8 {
lenBytes = append([]byte{0x0}, lenBytes...)
}
payloadLen := binary.BigEndian.Uint64(lenBytes)

if length < index+1+lenPayloadLen+int(payloadLen) {
return nil, 0, errSmallLength
}
txBytes = append(txBytes, bytes[index:index+1+lenPayloadLen+int(payloadLen)]...)
nextIndex = index + 1 + lenPayloadLen + int(payloadLen)
} else {
return nil, 0, fmt.Errorf("incorrect format of rlp encoding")
}
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txBytes)
if err != nil {
return nil, 0, fmt.Errorf("failed to unmarshal tx, err: %w", err)
}
return tx, nextIndex, nil
}

// BytesFromBlobCanonical converts the canonical blob representation into the raw blob data
func BytesFromBlobCanonical(blob *kzg4844.Blob) [126976]byte {
var blobBytes [126976]byte
for from := 0; from < len(blob); from += 32 {
copy(blobBytes[from/32*31:], blob[from+1:from+32])
}
return blobBytes
}

// NewDABatchFromBytes attempts to decode the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
Expand Down
81 changes: 81 additions & 0 deletions encoding/codecv2/codecv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ import (
// MaxNumChunks is the maximum number of chunks that a batch can contain.
const MaxNumChunks = 45

const BlockContextByteSize = 60

// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Expand All @@ -45,6 +47,12 @@ type DAChunk struct {
Transactions [][]*types.TransactionData
}

// DAChunkRawTx groups consecutive DABlocks with their transactions.
type DAChunkRawTx struct {
Blocks []*DABlock
Transactions []types.Transactions
}

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
// header
Expand Down Expand Up @@ -159,6 +167,39 @@ func (c *DAChunk) Encode() []byte {
return chunkBytes
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []DAChunkRawTx.
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions,
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
var dataBytes []byte
Expand Down Expand Up @@ -341,6 +382,46 @@ func constructBlobPayload(chunks []*encoding.Chunk, useMockTxData bool) (*kzg484
return blob, blobVersionedHash, &z, nil
}

// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
func DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
blobBytes := codecv1.BytesFromBlobCanonical(blob)

// todo: decompress
// blobBytes, err := decompressScrollBatchBytes(blobBytes)
// if err != nil {
// return err
// }

numChunks := int(binary.BigEndian.Uint16(blobBytes[0:2]))
if numChunks != len(chunks) {
return fmt.Errorf("blob chunk number is not same as calldata, blob num chunks: %d, calldata num chunks: %d", numChunks, len(chunks))
}
index := 2 + MaxNumChunks*4
for chunkID, chunk := range chunks {
var transactions []types.Transactions
chunkSize := int(binary.BigEndian.Uint32(blobBytes[2+4*chunkID : 2+4*chunkID+4]))

chunkBytes := blobBytes[index : index+chunkSize]
curIndex := 0
for _, block := range chunk.Blocks {
var blockTransactions types.Transactions
var txNum = int(block.NumTransactions - block.NumL1Messages)
for i := 0; i < txNum; i++ {
tx, nextIndex, err := codecv1.GetNextTx(chunkBytes, curIndex)
if err != nil {
return fmt.Errorf("couldn't decode next tx from blob bytes: %w, index: %d", err, index+curIndex+4)
}
curIndex = nextIndex
blockTransactions = append(blockTransactions, tx)
}
transactions = append(transactions, blockTransactions)
}
chunk.Transactions = transactions
index += chunkSize
}
return nil
}

// NewDABatchFromBytes attempts to decode the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
Expand Down

0 comments on commit 7665c1e

Please sign in to comment.