Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add decoding methods #10

Merged
merged 24 commits into from
Sep 2, 2024
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
7665c1e
add decoding mehods
NazariiDenha Jun 11, 2024
129fde6
add tests for codecv0 and ccodecv1
NazariiDenha Jun 12, 2024
0ee1e04
decompressing
NazariiDenha Jun 14, 2024
4cb6ab7
add decompressing for codecv2
NazariiDenha Jun 14, 2024
c3e19b3
fix
NazariiDenha Jun 14, 2024
4d86363
change zstd library from c binding to full go port
NazariiDenha Jun 14, 2024
e74a7b9
handle error
NazariiDenha Jun 27, 2024
91c4fce
Merge branch 'main' of github.com:scroll-tech/da-codec into feat/add-…
NazariiDenha Jun 27, 2024
7998e80
sync with main
NazariiDenha Jun 27, 2024
9b420c9
Merge branch 'main' into feat/add-decoding-methods
NazariiDenha Jul 8, 2024
cb02d63
add v3 decoding
NazariiDenha Jul 8, 2024
bbbbdea
Merge branch 'main' of github.com:scroll-tech/da-codec into feat/add-…
NazariiDenha Jul 15, 2024
15d6aa4
refactor: make DAChunkRawTx an alias
jonastheis Jul 25, 2024
13e0b29
Merge branch 'feat/add-decoding-methods' of github.com:scroll-tech/da…
NazariiDenha Jul 29, 2024
46a287b
address comments
NazariiDenha Jul 29, 2024
0ba3d76
Merge branch 'main' of github.com:scroll-tech/da-codec into feat/add-…
NazariiDenha Jul 29, 2024
31de320
comment
NazariiDenha Jul 29, 2024
d7c9669
comment
NazariiDenha Aug 2, 2024
6162bbb
Merge branch 'main' of github.com:scroll-tech/da-codec into feat/add-…
NazariiDenha Aug 2, 2024
76f6081
address comments
NazariiDenha Aug 20, 2024
c41a465
fix test
NazariiDenha Aug 20, 2024
aece065
Merge branch 'main' of github.com:scroll-tech/da-codec into feat/add-…
NazariiDenha Aug 26, 2024
b7214d9
support v4
NazariiDenha Aug 26, 2024
eb98626
address renaming nit-picks
NazariiDenha Sep 2, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions encoding/bitmap.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,3 +63,30 @@ func ConstructSkippedBitmap(batchIndex uint64, chunks []*Chunk, totalL1MessagePo

return bitmapBytes, nextIndex, nil
}

// DecodeBitmap decodes skipped L1 message bitmap of the batch from bytes to big.Int's
func DecodeBitmap(skippedL1MessageBitmap []byte, totalL1MessagePopped int) ([]*big.Int, error) {
length := len(skippedL1MessageBitmap)
if length%32 != 0 {
return nil, fmt.Errorf("skippedL1MessageBitmap length doesn't match, skippedL1MessageBitmap length should be equal 0 modulo 32, length of skippedL1MessageBitmap: %v", length)
}
if length*8 < totalL1MessagePopped {
return nil, fmt.Errorf("skippedL1MessageBitmap length is too small, skippedL1MessageBitmap length should be at least %v, length of skippedL1MessageBitmap: %v", (totalL1MessagePopped+7)/8, length)
}
var skippedBitmap []*big.Int
for index := 0; index < length/32; index++ {
bitmap := big.NewInt(0).SetBytes(skippedL1MessageBitmap[index*32 : index*32+32])
skippedBitmap = append(skippedBitmap, bitmap)
}
return skippedBitmap, nil
}

// IsL1MessageSkipped checks if index is skipped in bitmap
func IsL1MessageSkipped(skippedBitmap []*big.Int, index uint64) bool {
if index > uint64(len(skippedBitmap))*256 {
return false
}
quo := index / 256
rem := index % 256
return skippedBitmap[quo].Bit(int(rem)) != 0
}
65 changes: 65 additions & 0 deletions encoding/codecv0/codecv0.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ import (
"github.com/scroll-tech/da-codec/encoding"
)

const BlockContextByteSize = 60

// DABlock represents a Data Availability Block.
type DABlock struct {
BlockNumber uint64
Expand All @@ -32,6 +34,12 @@ type DAChunk struct {
Transactions [][]*types.TransactionData
}

// DAChunkRawTx groups consecutive DABlocks with their transactions.
type DAChunkRawTx struct {
Blocks []*DABlock
Transactions []types.Transactions
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
}

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
Version uint8
Expand Down Expand Up @@ -179,6 +187,63 @@ func (c *DAChunk) Encode() ([]byte, error) {
return chunkBytes, nil
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
blocks[i] = &DABlock{}
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions
currentIndex := 1 + numBlocks*BlockContextByteSize
for _, block := range blocks {
var blockTransactions types.Transactions
txNum := int(block.NumTransactions - block.NumL1Messages)
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
for i := 0; i < txNum; i++ {
if len(chunk) < currentIndex+4 {
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
return nil, fmt.Errorf("chunk size doesn't match, next tx size is less then 4, byte length of chunk: %v, expected length: %v", len(chunk), currentIndex+4)
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
}
txLen := int(binary.BigEndian.Uint32(chunk[currentIndex : currentIndex+4]))
if len(chunk) < currentIndex+4+txLen {
return nil, fmt.Errorf("chunk size doesn't match with next tx length, byte length of chunk: %v, expected length: %v", len(chunk), currentIndex+4+txLen)
}
txData := chunk[currentIndex+4 : currentIndex+4+txLen]
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tx, err: %w", err)
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
}
blockTransactions = append(blockTransactions, tx)
currentIndex += 4 + txLen
}
transactions = append(transactions, blockTransactions)
}

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions,
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
chunkBytes, err := c.Encode()
Expand Down
45 changes: 45 additions & 0 deletions encoding/codecv0/codecv0_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,38 @@ func TestCodecV0(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, 61, len(chunkBytes2))

daChunksRawTx, err := DecodeDAChunksRawTx([][]byte{chunkBytes1, chunkBytes2})
assert.NoError(t, err)
// assert number of chunks
assert.Equal(t, 2, len(daChunksRawTx))

// assert block in first chunk
assert.Equal(t, 3, len(daChunksRawTx[0].Blocks))
assert.Equal(t, daChunk1.Blocks[0], daChunksRawTx[0].Blocks[0])
assert.Equal(t, daChunk1.Blocks[1], daChunksRawTx[0].Blocks[1])
daChunksRawTx[0].Blocks[2].BaseFee = nil
assert.Equal(t, daChunk1.Blocks[2], daChunksRawTx[0].Blocks[2])

// assert block in second chunk
assert.Equal(t, 1, len(daChunksRawTx[1].Blocks))
daChunksRawTx[1].Blocks[0].BaseFee = nil
assert.Equal(t, daChunk2.Blocks[0], daChunksRawTx[1].Blocks[0])

// assert transactions in first chunk
assert.Equal(t, 3, len(daChunksRawTx[0].Transactions))
// here number of transactions in encoded and decoded chunks may be different, because decodec chunks doesn't contain l1msgs
assert.Equal(t, 2, len(daChunksRawTx[0].Transactions[0]))
assert.Equal(t, 1, len(daChunksRawTx[0].Transactions[1]))
assert.Equal(t, 1, len(daChunksRawTx[0].Transactions[2]))

assert.EqualValues(t, daChunk1.Transactions[0][0].TxHash, daChunksRawTx[0].Transactions[0][0].Hash().String())
assert.EqualValues(t, daChunk1.Transactions[0][1].TxHash, daChunksRawTx[0].Transactions[0][1].Hash().String())

// assert transactions in second chunk
assert.Equal(t, 1, len(daChunksRawTx[1].Transactions))
// here number of transactions in encoded and decoded chunks may be different, because decodec chunks doesn't contain l1msgs
assert.Equal(t, 0, len(daChunksRawTx[1].Transactions[0]))

batch = &encoding.Batch{
Index: 1,
TotalL1MessagePoppedBefore: 0,
Expand Down Expand Up @@ -297,6 +329,19 @@ func TestCodecV0(t *testing.T) {
decodedBatchHexString = hex.EncodeToString(decodedBatchBytes)
assert.Equal(t, batchHexString, decodedBatchHexString)

decodedBitmap, err := encoding.DecodeBitmap(decodedDABatch.SkippedL1MessageBitmap, int(decodedDABatch.L1MessagePopped))
assert.NoError(t, err)
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 0))
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 9))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 10))
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 11))
assert.True(t, encoding.IsL1MessageSkipped(decodedBitmap, 36))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 37))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 38))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 39))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 40))
assert.False(t, encoding.IsL1MessageSkipped(decodedBitmap, 41))

// Test case: many consecutive L1 Msgs in 1 bitmap, no leading skipped msgs.
chunk = &encoding.Chunk{
Blocks: []*encoding.Block{block4},
Expand Down
137 changes: 137 additions & 0 deletions encoding/codecv1/codecv1.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,17 @@ var BLSModulus = new(big.Int).SetBytes(common.FromHex("0x73eda753299d7d483339d80
// MaxNumChunks is the maximum number of chunks that a batch can contain.
const MaxNumChunks = 15

const BlockContextByteSize = 60
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved

// DABlock represents a Data Availability Block.
type DABlock = codecv0.DABlock

// DAChunk groups consecutive DABlocks with their transactions.
type DAChunk codecv0.DAChunk

// DAChunkRawTx groups consecutive DABlocks with their transactions.
type DAChunkRawTx = codecv0.DAChunkRawTx

// DABatch contains metadata about a batch of DAChunks.
type DABatch struct {
// header
Expand Down Expand Up @@ -98,6 +103,40 @@ func (c *DAChunk) Encode() []byte {
return chunkBytes
}

// DecodeDAChunksRawTx takes a byte slice and decodes it into a []*DAChunkRawTx.
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
func DecodeDAChunksRawTx(bytes [][]byte) ([]*DAChunkRawTx, error) {
var chunks []*DAChunkRawTx
for _, chunk := range bytes {
if len(chunk) < 1 {
return nil, fmt.Errorf("invalid chunk, length is less than 1")
}

numBlocks := int(chunk[0])
if len(chunk) < 1+numBlocks*BlockContextByteSize {
return nil, fmt.Errorf("chunk size doesn't match with numBlocks, byte length of chunk: %v, expected length: %v", len(chunk), 1+numBlocks*BlockContextByteSize)
}

blocks := make([]*DABlock, numBlocks)
for i := 0; i < numBlocks; i++ {
startIdx := 1 + i*BlockContextByteSize // add 1 to skip numBlocks byte
endIdx := startIdx + BlockContextByteSize
blocks[i] = &DABlock{}
err := blocks[i].Decode(chunk[startIdx:endIdx])
if err != nil {
return nil, err
}
}

var transactions []types.Transactions

chunks = append(chunks, &DAChunkRawTx{
Blocks: blocks,
Transactions: transactions, // Transactions field is still empty in the phase of DecodeDAChunksRawTx.
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
})
}
return chunks, nil
}

// Hash computes the hash of the DAChunk data.
func (c *DAChunk) Hash() (common.Hash, error) {
var dataBytes []byte
Expand Down Expand Up @@ -316,6 +355,104 @@ func MakeBlobCanonical(blobBytes []byte) (*kzg4844.Blob, error) {
return &blob, nil
}

// DecodeTxsFromBlob decodes txs from blob bytes and writes to chunks
func DecodeTxsFromBlob(blob *kzg4844.Blob, chunks []*DAChunkRawTx) error {
blobBytes := BytesFromBlobCanonical(blob)
numChunks := int(binary.BigEndian.Uint16(blobBytes[0:2]))
if numChunks != len(chunks) {
return fmt.Errorf("blob chunk number is not same as calldata, blob num chunks: %d, calldata num chunks: %d", numChunks, len(chunks))
}
index := 2 + MaxNumChunks*4
for chunkID, chunk := range chunks {
var transactions []types.Transactions
chunkSize := int(binary.BigEndian.Uint32(blobBytes[2+4*chunkID : 2+4*chunkID+4]))

chunkBytes := blobBytes[index : index+chunkSize]
curIndex := 0
for _, block := range chunk.Blocks {
var blockTransactions types.Transactions
txNum := int(block.NumTransactions - block.NumL1Messages)
for i := 0; i < txNum; i++ {
tx, nextIndex, err := GetNextTx(chunkBytes, curIndex)
if err != nil {
return fmt.Errorf("couldn't decode next tx from blob bytes: %w, index: %d", err, index+curIndex+4)
}
curIndex = nextIndex
blockTransactions = append(blockTransactions, tx)
}
transactions = append(transactions, blockTransactions)
}
chunk.Transactions = transactions
index += chunkSize
}
return nil
}

var errSmallLength error = fmt.Errorf("length of blob bytes is too small")

// GetNextTx parses blob bytes to find length of payload of next Tx and decode it
func GetNextTx(bytes []byte, index int) (*types.Transaction, int, error) {
NazariiDenha marked this conversation as resolved.
Show resolved Hide resolved
var nextIndex int
length := len(bytes)
if length < index+1 {
return nil, 0, errSmallLength
}
var txBytes []byte
if bytes[index] <= 0x7f {
// the first byte is transaction type, rlp encoding begins from next byte
txBytes = append(txBytes, bytes[index])
index++

}
if length < index+1 {
return nil, 0, errSmallLength
}
if bytes[index] >= 0xc0 && bytes[index] <= 0xf7 {
// length of payload is simply bytes[index] - 0xc0
payloadLen := int(bytes[index] - 0xc0)
if length < index+1+payloadLen {
return nil, 0, errSmallLength
}
txBytes = append(txBytes, bytes[index:index+1+payloadLen]...)
nextIndex = index + 1 + payloadLen
} else if bytes[index] > 0xf7 {
// the length of payload is encoded in next bytes[index] - 0xf7 bytes
// length of bytes representation of length of payload
lenPayloadLen := int(bytes[index] - 0xf7)
if length < index+1+lenPayloadLen {
return nil, 0, errSmallLength
}
lenBytes := bytes[index+1 : index+1+lenPayloadLen]
for len(lenBytes) < 8 {
lenBytes = append([]byte{0x0}, lenBytes...)
}
payloadLen := binary.BigEndian.Uint64(lenBytes)

if length < index+1+lenPayloadLen+int(payloadLen) {
return nil, 0, errSmallLength
}
txBytes = append(txBytes, bytes[index:index+1+lenPayloadLen+int(payloadLen)]...)
nextIndex = index + 1 + lenPayloadLen + int(payloadLen)
} else {
return nil, 0, fmt.Errorf("incorrect format of rlp encoding")
}
tx := &types.Transaction{}
err := tx.UnmarshalBinary(txBytes)
if err != nil {
return nil, 0, fmt.Errorf("failed to unmarshal tx, err: %w", err)
}
return tx, nextIndex, nil
}

// BytesFromBlobCanonical converts the canonical blob representation into the raw blob data
func BytesFromBlobCanonical(blob *kzg4844.Blob) [126976]byte {
var blobBytes [126976]byte
for from := 0; from < len(blob); from += 32 {
copy(blobBytes[from/32*31:], blob[from+1:from+32])
}
return blobBytes
}

// NewDABatchFromBytes decodes the given byte slice into a DABatch.
// Note: This function only populates the batch header, it leaves the blob-related fields empty.
func NewDABatchFromBytes(data []byte) (*DABatch, error) {
Expand Down
Loading
Loading