Skip to content

Commit

Permalink
chore: fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
simlecode committed Oct 16, 2024
1 parent 09243e0 commit 52fdf7c
Show file tree
Hide file tree
Showing 9 changed files with 71 additions and 73 deletions.
4 changes: 2 additions & 2 deletions app/submodule/eth/eth_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -486,12 +486,12 @@ func (a *ethAPI) EthGetTransactionReceiptLimited(ctx context.Context, txHash typ
}

// The tx is located in the parent tipset
parentTs, err := a.em.chainModule.ChainReader.GetTipSet(ctx, ts.Parents())
parentTS, err := a.em.chainModule.ChainReader.GetTipSet(ctx, ts.Parents())
if err != nil {
return nil, fmt.Errorf("failed to lookup tipset %s when constructing the eth txn receipt: %w", ts.Parents(), err)
}

baseFee := parentTs.Blocks()[0].ParentBaseFee
baseFee := parentTS.Blocks()[0].ParentBaseFee

receipt, err := newEthTxReceipt(ctx, tx, baseFee, msgLookup.Receipt, a.EthEventHandler)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion app/submodule/eth/eth_event_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,7 @@ func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []types.Et
}

// func ethFilterResultFromEvents(evs []*filter.CollectedEvent, ms *chain.MessageStore) (*types.EthFilterResult, error) {
func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, ms *chain.MessageStore) ([]types.EthLog, error) {
func ethFilterLogsFromEvents(_ context.Context, evs []*filter.CollectedEvent, ms *chain.MessageStore) ([]types.EthLog, error) {
var logs []types.EthLog
for _, ev := range evs {
log := types.EthLog{
Expand Down
4 changes: 2 additions & 2 deletions app/submodule/eth/eth_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -659,7 +659,7 @@ func newEthTxFromMessageLookup(ctx context.Context, msgLookup *types.MsgLookup,
func newEthTx(ctx context.Context,
state tree.Tree,
blockHeight abi.ChainEpoch,
msgTsCid cid.Cid,
msgTSCid cid.Cid,
msgCid cid.Cid,
txIdx int,
ms *chain.MessageStore,
Expand All @@ -679,7 +679,7 @@ func newEthTx(ctx context.Context,
ti = types.EthUint64(txIdx)
)

blkHash, err := types.EthHashFromCid(msgTsCid)
blkHash, err := types.EthHashFromCid(msgTSCid)
if err != nil {
return types.EthTx{}, err
}
Expand Down
26 changes: 13 additions & 13 deletions pkg/events/filter/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever
addr, found := addressLookups[ev.Emitter]
if !found {
var ok bool
addr, ok = resolver(ctx, ev.Emitter, te.rctTs)
addr, ok = resolver(ctx, ev.Emitter, te.rctTS)
if !ok {
// not an address we will be able to match against
continue
Expand All @@ -125,8 +125,8 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever
EmitterAddr: addr,
EventIdx: eventCount,
Reverted: revert,
Height: te.msgTs.Height(),
TipSetKey: te.msgTs.Key(),
Height: te.msgTS.Height(),
TipSetKey: te.msgTS.Key(),
MsgCid: em.Message().Cid(),
MsgIdx: msgIdx,
}
Expand Down Expand Up @@ -254,28 +254,28 @@ func (f *eventFilter) matchKeys(ees []types.EventEntry) bool {
}

type TipSetEvents struct {
rctTs *types.TipSet // rctTs is the tipset containing the receipts of executed messages
msgTs *types.TipSet // msgTs is the tipset containing the messages that have been executed
rctTS *types.TipSet // rctTS is the tipset containing the receipts of executed messages
msgTS *types.TipSet // msgTS is the tipset containing the messages that have been executed

load func(ctx context.Context, msgTs, rctTs *types.TipSet) ([]executedMessage, error)
load func(ctx context.Context, msgTS, rctTS *types.TipSet) ([]executedMessage, error)

once sync.Once // for lazy population of ems
ems []executedMessage
err error
}

func (te *TipSetEvents) Height() abi.ChainEpoch {
return te.msgTs.Height()
return te.msgTS.Height()
}

func (te *TipSetEvents) Cid() (cid.Cid, error) {
return te.msgTs.Key().Cid()
return te.msgTS.Key().Cid()
}

func (te *TipSetEvents) messages(ctx context.Context) ([]executedMessage, error) {
te.once.Do(func() {
// populate executed message list
ems, err := te.load(ctx, te.msgTs, te.rctTs)
ems, err := te.load(ctx, te.msgTS, te.rctTS)
if err != nil {
te.err = err
return
Expand Down Expand Up @@ -326,8 +326,8 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet)
}

tse := &TipSetEvents{
msgTs: from,
rctTs: to,
msgTS: from,
rctTS: to,
load: m.loadExecutedMessages,
}

Expand Down Expand Up @@ -357,8 +357,8 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet)
}

tse := &TipSetEvents{
msgTs: to,
rctTs: from,
msgTS: to,
rctTS: from,
load: m.loadExecutedMessages,
}

Expand Down
14 changes: 7 additions & 7 deletions pkg/events/filter/event_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func TestEventFilterCollectEvents(t *testing.T) {
}

events14000 := buildTipSetEvents(t, rng, 14000, em)
cid14000, err := events14000.msgTs.Key().Cid()
cid14000, err := events14000.msgTS.Key().Cid()
require.NoError(t, err, "tipset cid")

noCollectedEvents := []*CollectedEvent{}
Expand All @@ -77,7 +77,7 @@ func TestEventFilterCollectEvents(t *testing.T) {
EventIdx: 0,
Reverted: false,
Height: 14000,
TipSetKey: events14000.msgTs.Key(),
TipSetKey: events14000.msgTS.Key(),
MsgIdx: 0,
MsgCid: em.msg.Cid(),
},
Expand Down Expand Up @@ -421,13 +421,13 @@ func newStore() adt.Store {
func buildTipSetEvents(tb testing.TB, rng *pseudo.Rand, h abi.ChainEpoch, em executedMessage) *TipSetEvents {
tb.Helper()

msgTs := fakeTipSet(tb, rng, h, []cid.Cid{})
rctTs := fakeTipSet(tb, rng, h+1, msgTs.Cids())
msgTS := fakeTipSet(tb, rng, h, []cid.Cid{})
rctTS := fakeTipSet(tb, rng, h+1, msgTS.Cids())

return &TipSetEvents{
msgTs: msgTs,
rctTs: rctTs,
load: func(ctx context.Context, msgTs, rctTs *types.TipSet) ([]executedMessage, error) {
msgTS: msgTS,
rctTS: rctTS,
load: func(ctx context.Context, msgTS, rctTS *types.TipSet) ([]executedMessage, error) {
return []executedMessage{em}, nil
},
}
Expand Down
34 changes: 17 additions & 17 deletions pkg/events/filter/index.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"github.com/filecoin-project/venus/venus-shared/types"
)

const DefaultDbFilename = "events.db"
const DefaultDBFilename = "events.db"

// Any changes to this schema should be matched for the `lotus-shed indexes backfill-events` command

Expand Down Expand Up @@ -51,7 +51,7 @@ var ddls = []string{

createTableEventsSeen,

createIndexEventEntryEventId,
createIndexEventEntryEventID,
createIndexEventsSeenHeight,
createIndexEventsSeenTipsetKeyCid,
}
Expand Down Expand Up @@ -101,7 +101,7 @@ const (
createIndexEventTipsetKeyCid = `CREATE INDEX IF NOT EXISTS event_tipset_key_cid ON event (tipset_key_cid);`
createIndexEventHeight = `CREATE INDEX IF NOT EXISTS event_height ON event (height);`

createIndexEventEntryEventId = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);`
createIndexEventEntryEventID = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);`

createIndexEventsSeenHeight = `CREATE INDEX IF NOT EXISTS events_seen_height ON events_seen (height);`
createIndexEventsSeenTipsetKeyCid = `CREATE INDEX IF NOT EXISTS events_seen_tipset_key_cid ON events_seen (tipset_key_cid);`
Expand Down Expand Up @@ -148,7 +148,7 @@ type EventIndex struct {
stmt *preparedStatements

mu sync.Mutex
subIdCounter uint64
subIDCounter uint64
updateSubs map[uint64]*updateSub
}

Expand Down Expand Up @@ -225,8 +225,8 @@ func (ei *EventIndex) SubscribeUpdates() (chan EventIndexUpdated, func()) {
}

ei.mu.Lock()
subId := ei.subIdCounter
ei.subIdCounter++
subId := ei.subIDCounter

Check failure on line 228 in pkg/events/filter/index.go

View workflow job for this annotation

GitHub Actions / check

ST1003: var subId should be subID (stylecheck)
ei.subIDCounter++
ei.updateSubs[subId] = tSub
ei.mu.Unlock()

Expand Down Expand Up @@ -277,19 +277,19 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
// rollback the transaction (a no-op if the transaction was already committed)
defer func() { _ = tx.Rollback() }()

tsKeyCid, err := te.msgTs.Key().Cid()
tsKeyCid, err := te.msgTS.Key().Cid()
if err != nil {
return fmt.Errorf("tipset key cid: %w", err)
}

// lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return
if revert {
_, err = tx.Stmt(ei.stmt.revertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes())
_, err = tx.Stmt(ei.stmt.revertEventsInTipset).Exec(te.msgTS.Height(), te.msgTS.Key().Bytes())
if err != nil {
return fmt.Errorf("revert event: %w", err)
}

_, err = tx.Stmt(ei.stmt.revertEventSeen).Exec(te.msgTs.Height(), tsKeyCid.Bytes())
_, err = tx.Stmt(ei.stmt.revertEventSeen).Exec(te.msgTS.Height(), tsKeyCid.Bytes())
if err != nil {
return fmt.Errorf("revert event seen: %w", err)
}
Expand Down Expand Up @@ -336,7 +336,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
addr, found := addressLookups[ev.Emitter]
if !found {
var ok bool
addr, ok = resolver(ctx, ev.Emitter, te.rctTs)
addr, ok = resolver(ctx, ev.Emitter, te.rctTS)
if !ok {
// not an address we will be able to match against
continue
Expand All @@ -347,8 +347,8 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
// check if this event already exists in the database
var entryID sql.NullInt64
err = tx.Stmt(ei.stmt.eventExists).QueryRow(
te.msgTs.Height(), // height
te.msgTs.Key().Bytes(), // tipset_key
te.msgTS.Height(), // height
te.msgTS.Key().Bytes(), // tipset_key
tsKeyCid.Bytes(), // tipset_key_cid
addr.Bytes(), // emitter_addr
eventCount, // event_index
Expand All @@ -362,8 +362,8 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
if !entryID.Valid {
// event does not exist, lets insert it
res, err := tx.Stmt(ei.stmt.insertEvent).Exec(
te.msgTs.Height(), // height
te.msgTs.Key().Bytes(), // tipset_key
te.msgTS.Height(), // height
te.msgTS.Key().Bytes(), // tipset_key
tsKeyCid.Bytes(), // tipset_key_cid
addr.Bytes(), // emitter_addr
eventCount, // event_index
Expand Down Expand Up @@ -397,8 +397,8 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
} else {
// event already exists, lets mark it as not reverted
res, err := tx.Stmt(ei.stmt.restoreEvent).Exec(
te.msgTs.Height(), // height
te.msgTs.Key().Bytes(), // tipset_key
te.msgTS.Height(), // height
te.msgTS.Key().Bytes(), // tipset_key
tsKeyCid.Bytes(), // tipset_key_cid
addr.Bytes(), // emitter_addr
eventCount, // event_index
Expand Down Expand Up @@ -426,7 +426,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
// this statement will mark the tipset as processed and will insert a new row if it doesn't exist
// or update the reverted field to false if it does
_, err = tx.Stmt(ei.stmt.upsertEventsSeen).Exec(
te.msgTs.Height(),
te.msgTS.Height(),
tsKeyCid.Bytes(),
)
if err != nil {
Expand Down
32 changes: 16 additions & 16 deletions pkg/events/filter/index_migrations.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,34 +49,34 @@ func migrationVersion2(db *sql.DB, chainStore *chain.Store) sqlite.MigrationFunc
}
log.Infof("Migrating events from head to %d", minHeight.Int64)

currTs := chainStore.GetHead()
currTS := chainStore.GetHead()

for int64(currTs.Height()) >= minHeight.Int64 {
if currTs.Height()%1000 == 0 {
log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64)
for int64(currTS.Height()) >= minHeight.Int64 {
if currTS.Height()%1000 == 0 {
log.Infof("Migrating height %d (remaining %d)", currTS.Height(), int64(currTS.Height())-minHeight.Int64)
}

tsKey := currTs.Parents()
currTs, err = chainStore.GetTipSet(ctx, tsKey)
tsKey := currTS.Parents()
currTS, err = chainStore.GetTipSet(ctx, tsKey)
if err != nil {
return fmt.Errorf("get tipset from key: %w", err)
}
log.Debugf("Migrating height %d", currTs.Height())
log.Debugf("Migrating height %d", currTS.Height())

tsKeyCid, err := currTs.Key().Cid()
tsKeyCid, err := currTS.Key().Cid()
if err != nil {
return fmt.Errorf("tipset key cid: %w", err)
}

// delete all events that are not in the canonical chain
_, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTs.Height())
_, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTS.Height())
if err != nil {
return fmt.Errorf("delete off chain event: %w", err)
}

// find the first eventId from the last time the tipset was applied
var eventId sql.NullInt64
err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId)
// find the first eventID from the last time the tipset was applied
var eventID sql.NullInt64
err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventID)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
continue
Expand All @@ -85,12 +85,12 @@ func migrationVersion2(db *sql.DB, chainStore *chain.Store) sqlite.MigrationFunc
}

// this tipset might not have any events which is ok
if !eventId.Valid {
if !eventID.Valid {
continue
}
log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height())
log.Debugf("Deleting all events with id < %d at height %d", eventID.Int64, currTS.Height())

res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64)
res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventID.Int64)
if err != nil {
return fmt.Errorf("delete event: %w", err)
}
Expand Down Expand Up @@ -168,7 +168,7 @@ func migrationVersion4(ctx context.Context, tx *sql.Tx) error {
{"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"},
{"create index event_tipset_key_cid", createIndexEventTipsetKeyCid},
{"create index event_height", createIndexEventHeight},
{"create index event_entry_event_id", createIndexEventEntryEventId},
{"create index event_entry_event_id", createIndexEventEntryEventID},
} {
if _, err := tx.ExecContext(ctx, create.query); err != nil {
return fmt.Errorf("%s: %w", create.desc, err)
Expand Down
Loading

0 comments on commit 52fdf7c

Please sign in to comment.