diff --git a/common/client/multi_node.go b/common/client/multi_node.go index dfd6585b642..7d55784e68f 100644 --- a/common/client/multi_node.go +++ b/common/client/multi_node.go @@ -86,7 +86,7 @@ type multiNode[ sendonlys []SendOnlyNode[CHAIN_ID, RPC_CLIENT] chainID CHAIN_ID chainType config.ChainType - lggr logger.Logger + lggr logger.SugaredLogger selectionMode string noNewHeadsThreshold time.Duration nodeSelector NodeSelector[CHAIN_ID, HEAD, RPC_CLIENT] @@ -118,7 +118,7 @@ func NewMultiNode[ HEAD types.Head[BLOCK_HASH], RPC_CLIENT RPC[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD], ]( - l logger.Logger, + lggr logger.Logger, selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, @@ -131,9 +131,6 @@ func NewMultiNode[ ) MultiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OPS, TX_RECEIPT, FEE, HEAD, RPC_CLIENT] { nodeSelector := newNodeSelector(selectionMode, nodes) - lggr := logger.Named(l, "MultiNode") - lggr = logger.With(lggr, "chainID", chainID.String()) - // Prometheus' default interval is 15s, set this to under 7.5s to avoid // aliasing (see: https://en.wikipedia.org/wiki/Nyquist_frequency) const reportInterval = 6500 * time.Millisecond @@ -142,7 +139,7 @@ func NewMultiNode[ sendonlys: sendonlys, chainID: chainID, chainType: chainType, - lggr: lggr, + lggr: logger.Sugared(lggr).Named("MultiNode").With("chainID", chainID.String()), selectionMode: selectionMode, noNewHeadsThreshold: noNewHeadsThreshold, nodeSelector: nodeSelector, @@ -249,7 +246,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP c.activeNode = c.nodeSelector.Select() if c.activeNode == nil { - logger.Criticalw(c.lggr, "No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) + c.lggr.Criticalw("No live RPC nodes available", "NodeSelectionMode", c.nodeSelector.Name()) errmsg := fmt.Errorf("no live nodes available for chain %s", c.chainID.String()) c.SvcErrBuffer.Append(errmsg) err = ErroringNodeError @@ -351,10 +348,10 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP } live := total - dead - logger.Tracew(c.lggr, fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + c.lggr.Tracew(fmt.Sprintf("MultiNode state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) if total == dead { rerr := fmt.Errorf("no primary nodes available: 0/%d nodes are alive", total) - logger.Criticalw(c.lggr, rerr.Error(), "nodeStates", nodeStates) + c.lggr.Criticalw(rerr.Error(), "nodeStates", nodeStates) c.SvcErrBuffer.Append(rerr) } else if dead > 0 { c.lggr.Errorw(fmt.Sprintf("At least one primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) @@ -405,7 +402,7 @@ func (c *multiNode[CHAIN_ID, SEQ, ADDR, BLOCK_HASH, TX, TX_HASH, EVENT, EVENT_OP if err != nil { c.lggr.Debugw("Secondary node BatchCallContext failed", "err", err) } else { - logger.Trace(c.lggr, "Secondary node BatchCallContext success") + c.lggr.Trace("Secondary node BatchCallContext success") } }(n) } diff --git a/common/client/node_lifecycle.go b/common/client/node_lifecycle.go index eda137d5100..af8f27d498d 100644 --- a/common/client/node_lifecycle.go +++ b/common/client/node_lifecycle.go @@ -93,9 +93,8 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() pollInterval := n.nodePoolCfg.PollInterval() - lggr := logger.Named(n.lfcLog, "Alive") - lggr = logger.With(lggr, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) - logger.Tracew(lggr, "Alive loop starting", "nodeState", n.State()) + lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) + lggr.Tracew("Alive loop starting", "nodeState", n.State()) headsC := make(chan HEAD) sub, err := n.rpc.Subscribe(n.nodeCtx, headsC, rpcSubscriptionMethodNewHeads) @@ -146,7 +145,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { case <-pollCh: var version string promPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - logger.Tracew(lggr, "Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) + lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) version, err := n.RPC().ClientVersion(ctx) cancel() @@ -166,7 +165,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { - logger.Criticalf(lggr, "RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) + lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) continue } } @@ -178,7 +177,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { // note: there must be another live node for us to be out of sync lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State()) if liveNodes < 2 { - logger.Criticalf(lggr, "RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) + lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue } n.declareOutOfSync(n.isOutOfSync) @@ -191,13 +190,13 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { return } promPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() - logger.Tracew(lggr, "Got head", "head", bh) + lggr.Tracew("Got head", "head", bh) if bh.BlockNumber() > highestReceivedBlockNumber { promPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.BlockNumber())) - logger.Tracew(lggr, "Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) highestReceivedBlockNumber = bh.BlockNumber() } else { - logger.Tracew(lggr, "Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.BlockNumber(), "nodeState", n.State()) } if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) @@ -213,7 +212,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) aliveLoop() { lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { - logger.Criticalf(lggr, "RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) + lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) // We don't necessarily want to wait the full timeout to check again, we should // check regularly and log noisily in this state outOfSyncT.Reset(zombieNodeCheckInterval(n.noNewHeadsThreshold)) @@ -279,7 +278,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td outOfSyncAt := time.Now() - lggr := logger.Named(n.lfcLog, "OutOfSync") + lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) // Need to redial since out-of-sync nodes are automatically disconnected @@ -296,7 +295,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td return } - logger.Tracew(lggr, "Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) ch := make(chan HEAD) sub, err := n.rpc.Subscribe(n.nodeCtx, ch, rpcSubscriptionMethodNewHeads) @@ -328,7 +327,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) outOfSyncLoop(isOutOfSync func(num int64, td case <-time.After(zombieNodeCheckInterval(n.noNewHeadsThreshold)): if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 1 { - logger.Critical(lggr, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") n.declareInSync() return } @@ -358,7 +357,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { unreachableAt := time.Now() - lggr := logger.Named(n.lfcLog, "Unreachable") + lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) dialRetryBackoff := iutils.NewRedialBackoff() @@ -368,7 +367,7 @@ func (n *node[CHAIN_ID, HEAD, RPC]) unreachableLoop() { case <-n.nodeCtx.Done(): return case <-time.After(dialRetryBackoff.Duration()): - logger.Tracew(lggr, "Trying to re-dial RPC node", "nodeState", n.State()) + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) err := n.rpc.Dial(n.nodeCtx) if err != nil { diff --git a/common/headtracker/head_tracker.go b/common/headtracker/head_tracker.go index 373aa5a958f..4cc152fb9fe 100644 --- a/common/headtracker/head_tracker.go +++ b/common/headtracker/head_tracker.go @@ -40,7 +40,7 @@ type HeadTracker[ BLOCK_HASH types.Hashable, ] struct { services.StateMachine - log logger.Logger + log logger.SugaredLogger headBroadcaster types.HeadBroadcaster[HTH, BLOCK_HASH] headSaver types.HeadSaver[HTH, BLOCK_HASH] mailMon *mailbox.Monitor @@ -81,7 +81,7 @@ func NewHeadTracker[ chainID: client.ConfiguredChainID(), config: config, htConfig: htConfig, - log: lggr, + log: logger.Sugared(lggr), backfillMB: mailbox.NewSingle[HTH](), broadcastMB: mailbox.New[HTH](HeadsBufferSize), chStop: chStop, @@ -227,7 +227,7 @@ func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) handleNewHead(ctx context.Context prevUnFinalizedHead := prevHead.BlockNumber() - int64(ht.config.FinalityDepth()) if head.BlockNumber() < prevUnFinalizedHead { promOldHead.WithLabelValues(ht.chainID.String()).Inc() - logger.Criticalf(ht.log, "Got very old block with number %d (highest seen was %d). This is a problem and either means a very deep re-org occurred, one of the RPC nodes has gotten far out of sync, or the chain went backwards in block numbers. This node may not function correctly without manual intervention.", head.BlockNumber(), prevHead.BlockNumber()) + ht.log.Criticalf("Got very old block with number %d (highest seen was %d). This is a problem and either means a very deep re-org occurred, one of the RPC nodes has gotten far out of sync, or the chain went backwards in block numbers. This node may not function correctly without manual intervention.", head.BlockNumber(), prevHead.BlockNumber()) ht.SvcErrBuffer.Append(errors.New("got very old block")) } } @@ -310,7 +310,7 @@ func (ht *HeadTracker[HTH, S, ID, BLOCK_HASH]) backfill(ctx context.Context, hea } mark := time.Now() fetched := 0 - l := logger.With(ht.log, "blockNumber", headBlockNumber, + l := ht.log.With("blockNumber", headBlockNumber, "n", headBlockNumber-baseHeight, "fromBlockHeight", baseHeight, "toBlockHeight", headBlockNumber-1) diff --git a/common/txmgr/broadcaster.go b/common/txmgr/broadcaster.go index dba2b976c33..a62b3df8699 100644 --- a/common/txmgr/broadcaster.go +++ b/common/txmgr/broadcaster.go @@ -82,7 +82,7 @@ type TransmitChecker[ // is returned. Errors should only be returned if the checker can confirm that a transaction // should not be sent, other errors (for example connection or other unexpected errors) should // be logged and swallowed. - Check(ctx context.Context, l logger.Logger, tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], a txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error + Check(ctx context.Context, l logger.SugaredLogger, tx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], a txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) error } // Broadcaster monitors txes for transactions that need to @@ -108,7 +108,7 @@ type Broadcaster[ FEE feetypes.Fee, ] struct { services.StateMachine - lggr logger.Logger + lggr logger.SugaredLogger txStore txmgrtypes.TransactionStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, SEQ, FEE] client txmgrtypes.TransactionClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] @@ -172,7 +172,7 @@ func NewBroadcaster[ ) *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] { lggr = logger.Named(lggr, "Broadcaster") b := &Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]{ - lggr: lggr, + lggr: logger.Sugared(lggr), txStore: txStore, client: client, TxAttemptBuilder: txAttemptBuilder, @@ -311,7 +311,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) getS if err == nil { return seq, nil } - logger.Criticalw(eb.lggr, "failed to retrieve next sequence from on-chain for address: ", "address", address.String()) + eb.lggr.Criticalw("failed to retrieve next sequence from on-chain for address: ", "address", address.String()) return seq, err } @@ -399,7 +399,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Sync localSequence, err := eb.GetNextSequence(ctx, addr) // Address not found in map so skip sync if err != nil { - logger.Criticalw(eb.lggr, "Failed to retrieve local next sequence for address", "address", addr.String(), "err", err) + eb.lggr.Criticalw("Failed to retrieve local next sequence for address", "address", addr.String(), "err", err) return } @@ -414,7 +414,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) Sync newNextSequence, err := eb.sequenceSyncer.Sync(ctx, addr, localSequence) if err != nil { if attempt > 5 { - logger.Criticalw(eb.lggr, "Failed to sync with on-chain sequence", "address", addr.String(), "attempt", attempt, "err", err) + eb.lggr.Criticalw("Failed to sync with on-chain sequence", "address", addr.String(), "attempt", attempt, "err", err) eb.SvcErrBuffer.Append(err) } else { eb.lggr.Warnw("Failed to sync with on-chain sequence", "address", addr.String(), "attempt", attempt, "err", err) @@ -537,7 +537,7 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand return fmt.Errorf("building transmit checker: %w", err), false } - lgr := etx.GetLogger(logger.With(eb.lggr, "fee", attempt.TxFee)) + lgr := etx.GetLogger(eb.lggr.With("fee", attempt.TxFee)) // If the transmit check does not complete within the timeout, the transaction will be sent // anyway. @@ -647,14 +647,14 @@ func (eb *Broadcaster[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) hand // If there is only one RPC node, or all RPC nodes have the same // configured cap, this transaction will get stuck and keep repeating // forever until the issue is resolved. - logger.Criticalw(lgr, `RPC node rejected this tx as outside Fee Cap`) + lgr.Criticalw(`RPC node rejected this tx as outside Fee Cap`) fallthrough default: // Every error that doesn't fall under one of the above categories will be treated as Unknown. fallthrough case client.Unknown: eb.SvcErrBuffer.Append(err) - logger.Criticalw(lgr, `Unknown error occurred while handling tx queue in ProcessUnstartedTxs. This chain/RPC client may not be supported. `+ + lgr.Criticalw(`Unknown error occurred while handling tx queue in ProcessUnstartedTxs. This chain/RPC client may not be supported. `+ `Urgent resolution required, Chainlink is currently operating in a degraded state and may miss transactions`, "err", err, "etx", etx, "attempt", attempt) nextSequence, e := eb.client.PendingSequenceAt(ctx, etx.FromAddress) if e != nil { diff --git a/common/txmgr/confirmer.go b/common/txmgr/confirmer.go index aabdf45ae32..fbc6ea8a108 100644 --- a/common/txmgr/confirmer.go +++ b/common/txmgr/confirmer.go @@ -118,7 +118,7 @@ type Confirmer[ ] struct { services.StateMachine txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] - lggr logger.Logger + lggr logger.SugaredLogger client txmgrtypes.TxmClient[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] txmgrtypes.TxAttemptBuilder[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE] resumeCallback ResumeCallback @@ -166,7 +166,7 @@ func NewConfirmer[ lggr = logger.Named(lggr, "Confirmer") return &Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{ txStore: txStore, - lggr: lggr, + lggr: logger.Sugared(lggr), client: client, TxAttemptBuilder: txAttemptBuilder, resumeCallback: nil, @@ -520,8 +520,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bat } } - lggr := logger.Named(ec.lggr, "BatchFetchReceipts") - lggr = logger.With(lggr, "blockNum", blockNum) + lggr := ec.lggr.Named("BatchFetchReceipts").With("blockNum", blockNum) txReceipts, txErrs, err := ec.client.BatchGetReceipts(ctx, attempts) if err != nil { @@ -533,9 +532,9 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bat receipt := txReceipts[i] err := txErrs[i] - l := logger.Sugared(logger.With(attempt.Tx.GetLogger(lggr), "txHash", attempt.Hash.String(), "txAttemptID", attempt.ID, + l := attempt.Tx.GetLogger(lggr).With("txHash", attempt.Hash.String(), "txAttemptID", attempt.ID, "txID", attempt.TxID, "err", err, "sequence", attempt.Tx.Sequence, - )) + ) if err != nil { l.Error("FetchReceipt failed") @@ -554,7 +553,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bat continue } - l = logger.Sugared(logger.With(l, "blockHash", receipt.GetBlockHash().String(), "status", receipt.GetStatus(), "transactionIndex", receipt.GetTransactionIndex())) + l = l.With("blockHash", receipt.GetBlockHash().String(), "status", receipt.GetStatus(), "transactionIndex", receipt.GetTransactionIndex()) if receipt.IsUnmined() { l.Debug("Got receipt for transaction but it's still in the mempool and not included in a block yet") @@ -811,7 +810,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) bum return bumpedAttempt, fmt.Errorf("error bumping gas: %w", err) } -func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleInProgressAttempt(ctx context.Context, lggr logger.Logger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], blockHeight int64) error { +func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) handleInProgressAttempt(ctx context.Context, lggr logger.SugaredLogger, etx txmgrtypes.Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt txmgrtypes.TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], blockHeight int64) error { if attempt.State != txmgrtypes.TxAttemptInProgress { return fmt.Errorf("invariant violation: expected tx_attempt %v to be in_progress, it was %s", attempt.ID, attempt.State) @@ -833,12 +832,12 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) han } if len(etx.TxAttempts) == 0 { err := errors.New("expected to find at least 1 attempt") - logger.Sugared(ec.lggr).AssumptionViolationw(err.Error(), "err", err, "attempt", attempt) + ec.lggr.AssumptionViolationw(err.Error(), "err", err, "attempt", attempt) return err } if attempt.ID != etx.TxAttempts[0].ID { err := errors.New("expected highest priced attempt to be the current in_progress attempt") - logger.Sugared(ec.lggr).AssumptionViolationw(err.Error(), "err", err, "attempt", attempt, "txAttempts", etx.TxAttempts) + ec.lggr.AssumptionViolationw(err.Error(), "err", err, "attempt", attempt, "txAttempts", etx.TxAttempts) return err } replacementAttempt, err := ec.bumpGas(ctx, etx, etx.TxAttempts) @@ -846,7 +845,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) han return fmt.Errorf("could not bump gas for terminally underpriced transaction: %w", err) } promNumGasBumps.WithLabelValues(ec.chainID.String()).Inc() - logger.With(lggr, + lggr.With( "sendError", sendError, "maxGasPriceConfig", ec.feeConfig.MaxFeePrice(), "previousAttempt", attempt, @@ -867,7 +866,7 @@ func (ec *Confirmer[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) han // Should NEVER be fatal this is an invariant violation. The // Broadcaster can never create a TxAttempt that will // fatally error. - logger.Criticalw(lggr, "Invariant violation: fatal error while re-attempting transaction", + lggr.Criticalw("Invariant violation: fatal error while re-attempting transaction", "err", sendError, "fee", attempt.TxFee, "feeLimit", etx.FeeLimit, diff --git a/common/txmgr/resender.go b/common/txmgr/resender.go index 74cf3d1389c..dded59f55d4 100644 --- a/common/txmgr/resender.go +++ b/common/txmgr/resender.go @@ -53,7 +53,7 @@ type Resender[ interval time.Duration config txmgrtypes.ResenderChainConfig txConfig txmgrtypes.ResenderTransactionsConfig - logger logger.Logger + logger logger.SugaredLogger lastAlertTimestamps map[string]time.Time ctx context.Context @@ -93,7 +93,7 @@ func NewResender[ pollInterval, config, txConfig, - logger.Named(lggr, "Resender"), + logger.Sugared(logger.Named(lggr, "Resender")), make(map[string]time.Time), ctx, cancel, diff --git a/common/txmgr/txmgr.go b/common/txmgr/txmgr.go index e43a16b29ef..36a6a1304ae 100644 --- a/common/txmgr/txmgr.go +++ b/common/txmgr/txmgr.go @@ -82,7 +82,7 @@ type Txm[ FEE feetypes.Fee, ] struct { services.StateMachine - logger logger.Logger + logger logger.SugaredLogger txStore txmgrtypes.TxStore[ADDR, CHAIN_ID, TX_HASH, BLOCK_HASH, R, SEQ, FEE] config txmgrtypes.TransactionManagerChainConfig txConfig txmgrtypes.TransactionManagerTransactionsConfig @@ -142,7 +142,7 @@ func NewTxm[ tracker *Tracker[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE], ) *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE] { b := Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]{ - logger: lggr, + logger: logger.Sugared(lggr), txStore: txStore, config: cfg, txConfig: txCfg, @@ -349,7 +349,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() select { case <-time.After(backoff.Duration()): if err := b.broadcaster.startInternal(ctx); err != nil { - logger.Criticalw(b.logger, "Failed to start Broadcaster", "err", err) + b.logger.Criticalw("Failed to start Broadcaster", "err", err) b.SvcErrBuffer.Append(err) continue } @@ -368,7 +368,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() select { case <-time.After(backoff.Duration()): if err := b.confirmer.startInternal(); err != nil { - logger.Criticalw(b.logger, "Failed to start Confirmer", "err", err) + b.logger.Criticalw("Failed to start Confirmer", "err", err) b.SvcErrBuffer.Append(err) continue } @@ -433,7 +433,7 @@ func (b *Txm[CHAIN_ID, HEAD, ADDR, TX_HASH, BLOCK_HASH, R, SEQ, FEE]) runLoop() } enabledAddresses, err := b.keyStore.EnabledAddressesForChain(b.chainID) if err != nil { - logger.Criticalf(b.logger, "Failed to reload key states after key change") + b.logger.Critical("Failed to reload key states after key change") b.SvcErrBuffer.Append(err) continue } diff --git a/common/txmgr/types/client.go b/common/txmgr/types/client.go index b44c41e4176..0db50e97ad3 100644 --- a/common/txmgr/types/client.go +++ b/common/txmgr/types/client.go @@ -47,7 +47,7 @@ type TransactionClient[ ctx context.Context, attempts []TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], bathSize int, - lggr logger.Logger, + lggr logger.SugaredLogger, ) ( txCodes []client.SendTxReturnCode, txErrs []error, @@ -58,7 +58,7 @@ type TransactionClient[ ctx context.Context, tx Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], attempt TxAttempt[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE], - lggr logger.Logger, + lggr logger.SugaredLogger, ) (client.SendTxReturnCode, error) SendEmptyTransaction( ctx context.Context, diff --git a/common/txmgr/types/tx.go b/common/txmgr/types/tx.go index caac763fc0f..0f5d651ae29 100644 --- a/common/txmgr/types/tx.go +++ b/common/txmgr/types/tx.go @@ -254,7 +254,7 @@ func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetMeta() (*TxMeta[A } // GetLogger returns a new logger with metadata fields. -func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetLogger(lgr logger.Logger) logger.Logger { +func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetLogger(lgr logger.Logger) logger.SugaredLogger { lgr = logger.With(lgr, "txID", e.ID, "sequence", e.Sequence, @@ -265,7 +265,7 @@ func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetLogger(lgr logger meta, err := e.GetMeta() if err != nil { lgr.Errorw("failed to get meta of the transaction", "err", err) - return lgr + return logger.Sugared(lgr) } if meta != nil { @@ -315,7 +315,7 @@ func (e *Tx[CHAIN_ID, ADDR, TX_HASH, BLOCK_HASH, SEQ, FEE]) GetLogger(lgr logger } } - return lgr + return logger.Sugared(lgr) } // GetChecker returns an Tx's transmit checker spec in struct form, unmarshalling it from JSON diff --git a/core/chains/evm/client/chain_client.go b/core/chains/evm/client/chain_client.go index 3efc5645e22..255d802720a 100644 --- a/core/chains/evm/client/chain_client.go +++ b/core/chains/evm/client/chain_client.go @@ -37,11 +37,11 @@ type chainClient struct { *evmtypes.Head, RPCCLient, ] - logger logger.Logger + logger logger.SugaredLogger } func NewChainClient( - logger logger.Logger, + lggr logger.Logger, selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, @@ -64,7 +64,7 @@ func NewChainClient( *evmtypes.Head, RPCCLient, ]( - logger, + lggr, selectionMode, leaseDuration, noNewHeadsThreshold, @@ -77,7 +77,7 @@ func NewChainClient( ) return &chainClient{ multiNode: multiNode, - logger: logger, + logger: logger.Sugared(lggr), } } diff --git a/core/chains/evm/client/client.go b/core/chains/evm/client/client.go index b85331a62a1..f32ec011445 100644 --- a/core/chains/evm/client/client.go +++ b/core/chains/evm/client/client.go @@ -102,7 +102,7 @@ func ContextWithDefaultTimeout() (ctx context.Context, cancel context.CancelFunc // client represents an abstract client that manages connections to // multiple nodes for a single chain id type client struct { - logger logger.Logger + logger logger.SugaredLogger pool *Pool } @@ -113,10 +113,10 @@ var _ htrktypes.Client[*evmtypes.Head, ethereum.Subscription, *big.Int, common.H // Currently only supports one primary // // Deprecated: use [NewChainClient] -func NewClientWithNodes(logger logger.Logger, selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, primaryNodes []Node, sendOnlyNodes []SendOnlyNode, chainID *big.Int, chainType config.ChainType) (*client, error) { - pool := NewPool(logger, selectionMode, leaseDuration, noNewHeadsThreshold, primaryNodes, sendOnlyNodes, chainID, chainType) +func NewClientWithNodes(lggr logger.Logger, selectionMode string, leaseDuration time.Duration, noNewHeadsThreshold time.Duration, primaryNodes []Node, sendOnlyNodes []SendOnlyNode, chainID *big.Int, chainType config.ChainType) (*client, error) { + pool := NewPool(lggr, selectionMode, leaseDuration, noNewHeadsThreshold, primaryNodes, sendOnlyNodes, chainID, chainType) return &client{ - logger: logger, + logger: logger.Sugared(lggr), pool: pool, }, nil } diff --git a/core/chains/evm/client/errors.go b/core/chains/evm/client/errors.go index 143a5f8806f..66cc30f74b4 100644 --- a/core/chains/evm/client/errors.go +++ b/core/chains/evm/client/errors.go @@ -413,13 +413,13 @@ func ExtractRPCError(baseErr error) (*JsonError, error) { return &jErr, nil } -func ClassifySendError(err error, lggr logger.Logger, tx *types.Transaction, fromAddress common.Address, isL2 bool) (commonclient.SendTxReturnCode, error) { +func ClassifySendError(err error, lggr logger.SugaredLogger, tx *types.Transaction, fromAddress common.Address, isL2 bool) (commonclient.SendTxReturnCode, error) { sendError := NewSendError(err) if sendError == nil { return commonclient.Successful, err } if sendError.Fatal() { - logger.Criticalw(lggr, "Fatal error sending transaction", "err", sendError, "etx", tx) + lggr.Criticalw("Fatal error sending transaction", "err", sendError, "etx", tx) // Attempt is thrown away in this case; we don't need it since it never got accepted by a node return commonclient.Fatal, err } @@ -462,7 +462,7 @@ func ClassifySendError(err error, lggr logger.Logger, tx *types.Transaction, fro return commonclient.Retryable, err } if sendError.IsInsufficientEth() { - logger.Criticalw(lggr, fmt.Sprintf("Tx %x with type 0x%d was rejected due to insufficient eth: %s\n"+ + lggr.Criticalw(fmt.Sprintf("Tx %x with type 0x%d was rejected due to insufficient eth: %s\n"+ "ACTION REQUIRED: Chainlink wallet with address 0x%x is OUT OF FUNDS", tx.Hash(), tx.Type(), sendError.Error(), fromAddress, ), "err", sendError) @@ -472,7 +472,7 @@ func ClassifySendError(err error, lggr logger.Logger, tx *types.Transaction, fro return commonclient.Retryable, errors.Wrapf(sendError, "timeout while sending transaction %s", tx.Hash().Hex()) } if sendError.IsTxFeeExceedsCap() { - logger.Criticalw(lggr, fmt.Sprintf("Sending transaction failed: %s", label.RPCTxFeeCapConfiguredIncorrectlyWarning), + lggr.Criticalw(fmt.Sprintf("Sending transaction failed: %s", label.RPCTxFeeCapConfiguredIncorrectlyWarning), "etx", tx, "err", sendError, "id", "RPCTxFeeCapExceeded", diff --git a/core/chains/evm/client/helpers_test.go b/core/chains/evm/client/helpers_test.go index 27b335534da..c2f60e13f55 100644 --- a/core/chains/evm/client/helpers_test.go +++ b/core/chains/evm/client/helpers_test.go @@ -42,7 +42,7 @@ func NewClientWithTestNode(t *testing.T, nodePoolCfg config.NodePool, noNewHeads return nil, errors.Errorf("ethereum url scheme must be websocket: %s", parsed.String()) } - lggr := logger.Test(t) + lggr := logger.Sugared(logger.Test(t)) n := NewNode(nodePoolCfg, noNewHeadsThreshold, lggr, *parsed, rpcHTTPURL, "eth-primary-0", id, chainID, 1) n.(*node).setLatestReceived(0, big.NewInt(0)) primaries := []Node{n} diff --git a/core/chains/evm/client/node.go b/core/chains/evm/client/node.go index a2c8b807ba2..a27321535ed 100644 --- a/core/chains/evm/client/node.go +++ b/core/chains/evm/client/node.go @@ -137,7 +137,7 @@ type rawclient struct { type node struct { services.StateMachine lfcLog logger.Logger - rpcLog logger.Logger + rpcLog logger.SugaredLogger name string id int32 chainID *big.Int @@ -206,7 +206,7 @@ func NewNode(nodeCfg config.NodePool, noNewHeadsThreshold time.Duration, lggr lo "mode", n.getNodeMode(), ) n.lfcLog = logger.Named(lggr, "Lifecycle") - n.rpcLog = logger.Named(lggr, "RPC") + n.rpcLog = logger.Sugared(lggr).Named("RPC") n.stateLatestBlockNumber = -1 return n @@ -453,7 +453,7 @@ func (n *node) CallContext(ctx context.Context, result interface{}, method strin return err } defer cancel() - lggr := logger.With(n.newRqLggr(), + lggr := n.newRqLggr().With( "method", method, "args", args, ) @@ -478,9 +478,9 @@ func (n *node) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { return err } defer cancel() - lggr := logger.With(n.newRqLggr(), "nBatchElems", len(b), "batchElems", b) + lggr := n.newRqLggr().With("nBatchElems", len(b), "batchElems", b) - logger.Trace(lggr, "RPC call: evmclient.Client#BatchCallContext") + lggr.Trace("RPC call: evmclient.Client#BatchCallContext") start := time.Now() if http != nil { err = n.wrapHTTP(http.rpc.BatchCallContext(ctx, b)) @@ -500,7 +500,7 @@ func (n *node) EthSubscribe(ctx context.Context, channel chan<- *evmtypes.Head, return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "args", args) + lggr := n.newRqLggr().With("args", args) lggr.Debug("RPC call: evmclient.Client#EthSubscribe") start := time.Now() @@ -523,7 +523,7 @@ func (n *node) TransactionReceipt(ctx context.Context, txHash common.Hash) (rece return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "txHash", txHash) + lggr := n.newRqLggr().With("txHash", txHash) lggr.Debug("RPC call: evmclient.Client#TransactionReceipt") @@ -550,7 +550,7 @@ func (n *node) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *t return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "txHash", txHash) + lggr := n.newRqLggr().With("txHash", txHash) lggr.Debug("RPC call: evmclient.Client#TransactionByHash") @@ -577,7 +577,7 @@ func (n *node) HeaderByNumber(ctx context.Context, number *big.Int) (header *typ return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "number", number) + lggr := n.newRqLggr().With("number", number) lggr.Debug("RPC call: evmclient.Client#HeaderByNumber") start := time.Now() @@ -601,7 +601,7 @@ func (n *node) HeaderByHash(ctx context.Context, hash common.Hash) (header *type return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "hash", hash) + lggr := n.newRqLggr().With("hash", hash) lggr.Debug("RPC call: evmclient.Client#HeaderByHash") start := time.Now() @@ -627,7 +627,7 @@ func (n *node) SendTransaction(ctx context.Context, tx *types.Transaction) error return err } defer cancel() - lggr := logger.With(n.newRqLggr(), "tx", tx) + lggr := n.newRqLggr().With("tx", tx) lggr.Debug("RPC call: evmclient.Client#SendTransaction") start := time.Now() @@ -650,7 +650,7 @@ func (n *node) PendingNonceAt(ctx context.Context, account common.Address) (nonc return 0, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "account", account) + lggr := n.newRqLggr().With("account", account) lggr.Debug("RPC call: evmclient.Client#PendingNonceAt") start := time.Now() @@ -679,7 +679,7 @@ func (n *node) NonceAt(ctx context.Context, account common.Address, blockNumber return 0, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "account", account, "blockNumber", blockNumber) + lggr := n.newRqLggr().With("account", account, "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#NonceAt") start := time.Now() @@ -705,7 +705,7 @@ func (n *node) PendingCodeAt(ctx context.Context, account common.Address) (code return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "account", account) + lggr := n.newRqLggr().With("account", account) lggr.Debug("RPC call: evmclient.Client#PendingCodeAt") start := time.Now() @@ -731,7 +731,7 @@ func (n *node) CodeAt(ctx context.Context, account common.Address, blockNumber * return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "account", account, "blockNumber", blockNumber) + lggr := n.newRqLggr().With("account", account, "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#CodeAt") start := time.Now() @@ -757,7 +757,7 @@ func (n *node) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint return 0, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "call", call) + lggr := n.newRqLggr().With("call", call) lggr.Debug("RPC call: evmclient.Client#EstimateGas") start := time.Now() @@ -809,7 +809,7 @@ func (n *node) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumb return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "callMsg", msg, "blockNumber", blockNumber) + lggr := n.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#CallContract") start := time.Now() @@ -836,7 +836,7 @@ func (n *node) BlockByNumber(ctx context.Context, number *big.Int) (b *types.Blo return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "number", number) + lggr := n.newRqLggr().With("number", number) lggr.Debug("RPC call: evmclient.Client#BlockByNumber") start := time.Now() @@ -862,7 +862,7 @@ func (n *node) BlockByHash(ctx context.Context, hash common.Hash) (b *types.Bloc return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "hash", hash) + lggr := n.newRqLggr().With("hash", hash) lggr.Debug("RPC call: evmclient.Client#BlockByHash") start := time.Now() @@ -914,7 +914,7 @@ func (n *node) BalanceAt(ctx context.Context, account common.Address, blockNumbe return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "account", account.Hex(), "blockNumber", blockNumber) + lggr := n.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#BalanceAt") start := time.Now() @@ -940,7 +940,7 @@ func (n *node) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l []type return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "q", q) + lggr := n.newRqLggr().With("q", q) lggr.Debug("RPC call: evmclient.Client#FilterLogs") start := time.Now() @@ -966,7 +966,7 @@ func (n *node) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, return nil, err } defer cancel() - lggr := logger.With(n.newRqLggr(), "q", q) + lggr := n.newRqLggr().With("q", q) lggr.Debug("RPC call: evmclient.Client#SubscribeFilterLogs") start := time.Now() @@ -1011,10 +1011,8 @@ func (n *node) SuggestGasTipCap(ctx context.Context) (tipCap *big.Int, err error func (n *node) ChainID() (chainID *big.Int) { return n.chainID } // newRqLggr generates a new logger with a unique request ID -func (n *node) newRqLggr() logger.Logger { - return logger.With(n.rpcLog, - "requestID", uuid.New(), - ) +func (n *node) newRqLggr() logger.SugaredLogger { + return n.rpcLog.With("requestID", uuid.New()) } func (n *node) logResult( @@ -1025,17 +1023,14 @@ func (n *node) logResult( callName string, results ...interface{}, ) { - lggr = logger.With(lggr, "duration", callDuration, "rpcDomain", rpcDomain, "callName", callName) + slggr := logger.Sugared(lggr).With("duration", callDuration, "rpcDomain", rpcDomain, "callName", callName) promEVMPoolRPCNodeCalls.WithLabelValues(n.chainID.String(), n.name).Inc() if err == nil { promEVMPoolRPCNodeCallsSuccess.WithLabelValues(n.chainID.String(), n.name).Inc() - logger.Tracew(lggr, - fmt.Sprintf("evmclient.Client#%s RPC call success", callName), - results..., - ) + slggr.Tracew(fmt.Sprintf("evmclient.Client#%s RPC call success", callName), results...) } else { promEVMPoolRPCNodeCallsFailed.WithLabelValues(n.chainID.String(), n.name).Inc() - lggr.Debugw( + slggr.Debugw( fmt.Sprintf("evmclient.Client#%s RPC call failure", callName), append(results, "err", err)..., ) @@ -1062,7 +1057,7 @@ func (n *node) wrapHTTP(err error) error { if err != nil { n.rpcLog.Debugw("Call failed", "err", err) } else { - logger.Trace(n.rpcLog, "Call succeeded") + n.rpcLog.Trace("Call succeeded") } return err } diff --git a/core/chains/evm/client/node_lifecycle.go b/core/chains/evm/client/node_lifecycle.go index 4e984de00f0..f2232a14935 100644 --- a/core/chains/evm/client/node_lifecycle.go +++ b/core/chains/evm/client/node_lifecycle.go @@ -92,9 +92,8 @@ func (n *node) aliveLoop() { pollFailureThreshold := n.nodePoolCfg.PollFailureThreshold() pollInterval := n.nodePoolCfg.PollInterval() - lggr := logger.Named(n.lfcLog, "Alive") - lggr = logger.With(lggr, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) - logger.Tracew(lggr, "Alive loop starting", "nodeState", n.State()) + lggr := logger.Sugared(n.lfcLog).Named("Alive").With("noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold, "pollInterval", pollInterval, "pollFailureThreshold", pollFailureThreshold) + lggr.Tracew("Alive loop starting", "nodeState", n.State()) headsC := make(chan *evmtypes.Head) sub, err := n.EthSubscribe(n.nodeCtx, headsC, "newHeads") @@ -143,7 +142,7 @@ func (n *node) aliveLoop() { case <-pollCh: var version string promEVMPoolRPCNodePolls.WithLabelValues(n.chainID.String(), n.name).Inc() - logger.Tracew(lggr, "Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) + lggr.Tracew("Polling for version", "nodeState", n.State(), "pollFailures", pollFailures) ctx, cancel := context.WithTimeout(n.nodeCtx, pollInterval) ctx, cancel2 := n.makeQueryCtx(ctx) err := n.CallContext(ctx, &version, "web3_clientVersion") @@ -165,7 +164,7 @@ func (n *node) aliveLoop() { lggr.Errorw(fmt.Sprintf("RPC endpoint failed to respond to %d consecutive polls", pollFailures), "pollFailures", pollFailures, "nodeState", n.State()) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { - logger.Criticalf(lggr, "RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) + lggr.Criticalf("RPC endpoint failed to respond to polls; %s %s", msgCannotDisable, msgDegradedState) continue } } @@ -177,7 +176,7 @@ func (n *node) aliveLoop() { // note: there must be another live node for us to be out of sync lggr.Errorw("RPC endpoint has fallen behind", "blockNumber", num, "totalDifficulty", td, "nodeState", n.State()) if liveNodes < 2 { - logger.Criticalf(lggr, "RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) + lggr.Criticalf("RPC endpoint has fallen behind; %s %s", msgCannotDisable, msgDegradedState) continue } n.declareOutOfSync(n.isOutOfSync) @@ -190,13 +189,13 @@ func (n *node) aliveLoop() { return } promEVMPoolRPCNodeNumSeenBlocks.WithLabelValues(n.chainID.String(), n.name).Inc() - logger.Tracew(lggr, "Got head", "head", bh) + lggr.Tracew("Got head", "head", bh) if bh.Number > highestReceivedBlockNumber { promEVMPoolRPCNodeHighestSeenBlock.WithLabelValues(n.chainID.String(), n.name).Set(float64(bh.Number)) - logger.Tracew(lggr, "Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.Number, "nodeState", n.State()) + lggr.Tracew("Got higher block number, resetting timer", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.Number, "nodeState", n.State()) highestReceivedBlockNumber = bh.Number } else { - logger.Tracew(lggr, "Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.Number, "nodeState", n.State()) + lggr.Tracew("Ignoring previously seen block number", "latestReceivedBlockNumber", highestReceivedBlockNumber, "blockNumber", bh.Number, "nodeState", n.State()) } if outOfSyncT != nil { outOfSyncT.Reset(noNewHeadsTimeoutThreshold) @@ -212,7 +211,7 @@ func (n *node) aliveLoop() { lggr.Errorw(fmt.Sprintf("RPC endpoint detected out of sync; no new heads received for %s (last head received was %v)", noNewHeadsTimeoutThreshold, highestReceivedBlockNumber), "nodeState", n.State(), "latestReceivedBlockNumber", highestReceivedBlockNumber, "noNewHeadsTimeoutThreshold", noNewHeadsTimeoutThreshold) if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 2 { - logger.Criticalf(lggr, "RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) + lggr.Criticalf("RPC endpoint detected out of sync; %s %s", msgCannotDisable, msgDegradedState) // We don't necessarily want to wait the full timeout to check again, we should // check regularly and log noisily in this state outOfSyncT.Reset(zombieNodeCheckInterval(n.noNewHeadsThreshold)) @@ -278,7 +277,7 @@ func (n *node) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { outOfSyncAt := time.Now() - lggr := logger.Named(n.lfcLog, "OutOfSync") + lggr := logger.Sugared(logger.Named(n.lfcLog, "OutOfSync")) lggr.Debugw("Trying to revive out-of-sync RPC node", "nodeState", n.State()) // Need to redial since out-of-sync nodes are automatically disconnected @@ -295,7 +294,7 @@ func (n *node) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { return } - logger.Tracew(lggr, "Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) + lggr.Tracew("Successfully subscribed to heads feed on out-of-sync RPC node", "nodeState", n.State()) ch := make(chan *evmtypes.Head) subCtx, cancel := n.makeQueryCtx(n.nodeCtx) @@ -330,7 +329,7 @@ func (n *node) outOfSyncLoop(isOutOfSync func(num int64, td *big.Int) bool) { case <-time.After(zombieNodeCheckInterval(n.noNewHeadsThreshold)): if n.nLiveNodes != nil { if l, _, _ := n.nLiveNodes(); l < 1 { - logger.Critical(lggr, "RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") + lggr.Critical("RPC endpoint is still out of sync, but there are no other available nodes. This RPC node will be forcibly moved back into the live pool in a degraded state") n.declareInSync() return } @@ -360,7 +359,7 @@ func (n *node) unreachableLoop() { unreachableAt := time.Now() - lggr := logger.Named(n.lfcLog, "Unreachable") + lggr := logger.Sugared(logger.Named(n.lfcLog, "Unreachable")) lggr.Debugw("Trying to revive unreachable RPC node", "nodeState", n.State()) dialRetryBackoff := utils.NewRedialBackoff() @@ -370,7 +369,7 @@ func (n *node) unreachableLoop() { case <-n.nodeCtx.Done(): return case <-time.After(dialRetryBackoff.Duration()): - logger.Tracew(lggr, "Trying to re-dial RPC node", "nodeState", n.State()) + lggr.Tracew("Trying to re-dial RPC node", "nodeState", n.State()) err := n.dial(n.nodeCtx) if err != nil { diff --git a/core/chains/evm/client/pool.go b/core/chains/evm/client/pool.go index afe592533c3..b2d5a4847a5 100644 --- a/core/chains/evm/client/pool.go +++ b/core/chains/evm/client/pool.go @@ -68,7 +68,7 @@ type Pool struct { sendonlys []SendOnlyNode chainID *big.Int chainType config.ChainType - logger logger.Logger + logger logger.SugaredLogger selectionMode string noNewHeadsThreshold time.Duration nodeSelector NodeSelector @@ -113,7 +113,7 @@ func NewPool(lggr logger.Logger, selectionMode string, leaseDuration time.Durati sendonlys: sendonlys, chainID: chainID, chainType: chainType, - logger: lggr, + logger: logger.Sugared(lggr), selectionMode: selectionMode, noNewHeadsThreshold: noNewHeadsTreshold, nodeSelector: nodeSelector, @@ -272,10 +272,10 @@ func (p *Pool) report() { } live := total - dead - logger.Tracew(p.logger, fmt.Sprintf("Pool state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) + p.logger.Tracew(fmt.Sprintf("Pool state: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) if total == dead { rerr := fmt.Errorf("no EVM primary nodes available: 0/%d nodes are alive", total) - logger.Criticalw(p.logger, rerr.Error(), "nodeStates", nodeStates) + p.logger.Criticalw(rerr.Error(), "nodeStates", nodeStates) p.SvcErrBuffer.Append(rerr) } else if dead > 0 { p.logger.Errorw(fmt.Sprintf("At least one EVM primary node is dead: %d/%d nodes are alive", live, total), "nodeStates", nodeStates) @@ -320,7 +320,7 @@ func (p *Pool) selectNode() (node Node) { p.activeNode = p.nodeSelector.Select() if p.activeNode == nil { - logger.Criticalw(p.logger, "No live RPC nodes available", "NodeSelectionMode", p.nodeSelector.Name()) + p.logger.Criticalw("No live RPC nodes available", "NodeSelectionMode", p.nodeSelector.Name()) errmsg := fmt.Errorf("no live nodes available for chain %s", p.chainID.String()) p.SvcErrBuffer.Append(errmsg) return &erroringNode{errMsg: errmsg.Error()} @@ -367,7 +367,7 @@ func (p *Pool) BatchCallContextAll(ctx context.Context, b []rpc.BatchElem) error if err != nil { p.logger.Debugw("Secondary node BatchCallContext failed", "err", err) } else { - logger.Trace(p.logger, "Secondary node BatchCallContext success") + p.logger.Trace("Secondary node BatchCallContext success") } }(n) } diff --git a/core/chains/evm/client/rpc_client.go b/core/chains/evm/client/rpc_client.go index 3cc90c4d8d9..627a2833109 100644 --- a/core/chains/evm/client/rpc_client.go +++ b/core/chains/evm/client/rpc_client.go @@ -55,7 +55,7 @@ type RPCCLient interface { } type rpcClient struct { - rpcLog logger.Logger + rpcLog logger.SugaredLogger name string id int32 chainID *big.Int @@ -106,7 +106,7 @@ func NewRPCClient( "client", r.String(), "evmChainID", chainID, ) - r.rpcLog = logger.Named(lggr, "RPC") + r.rpcLog = logger.Sugared(lggr).Named("RPC") return r } @@ -117,9 +117,9 @@ func (r *rpcClient) Dial(callerCtx context.Context) error { defer cancel() promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc() - lggr := logger.With(r.rpcLog, "wsuri", r.ws.uri.Redacted()) + lggr := r.rpcLog.With("wsuri", r.ws.uri.Redacted()) if r.http != nil { - lggr = logger.With(lggr, "httpuri", r.http.uri.Redacted()) + lggr = lggr.With("httpuri", r.http.uri.Redacted()) } lggr.Debugw("RPC dial: evmclient.Client#dial") @@ -148,7 +148,7 @@ func (r *rpcClient) Dial(callerCtx context.Context) error { // It can only return error if the URL is malformed. func (r *rpcClient) DialHTTP() error { promEVMPoolRPCNodeDials.WithLabelValues(r.chainID.String(), r.name).Inc() - lggr := logger.With(r.rpcLog, "httpuri", r.ws.uri.Redacted()) + lggr := r.rpcLog.With("httpuri", r.ws.uri.Redacted()) lggr.Debugw("RPC dial: evmclient.Client#dial") var httprpc *rpc.Client @@ -206,10 +206,7 @@ func (r *rpcClient) logResult( promEVMPoolRPCNodeCalls.WithLabelValues(r.chainID.String(), r.name).Inc() if err == nil { promEVMPoolRPCNodeCallsSuccess.WithLabelValues(r.chainID.String(), r.name).Inc() - logger.Tracew(lggr, - fmt.Sprintf("evmclient.Client#%s RPC call success", callName), - results..., - ) + logger.Sugared(lggr).Tracew(fmt.Sprintf("evmclient.Client#%s RPC call success", callName), results...) } else { promEVMPoolRPCNodeCallsFailed.WithLabelValues(r.chainID.String(), r.name).Inc() lggr.Debugw( @@ -299,7 +296,7 @@ func (r *rpcClient) CallContext(ctx context.Context, result interface{}, method return err } defer cancel() - lggr := logger.With(r.newRqLggr(), + lggr := r.newRqLggr().With( "method", method, "args", args, ) @@ -328,9 +325,9 @@ func (r *rpcClient) BatchCallContext(ctx context.Context, b []any) error { batch[i] = arg.(rpc.BatchElem) } defer cancel() - lggr := logger.With(r.newRqLggr(), "nBatchElems", len(b), "batchElems", b) + lggr := r.newRqLggr().With("nBatchElems", len(b), "batchElems", b) - logger.Trace(lggr, "RPC call: evmclient.Client#BatchCallContext") + lggr.Trace("RPC call: evmclient.Client#BatchCallContext") start := time.Now() if http != nil { err = r.wrapHTTP(http.rpc.BatchCallContext(ctx, batch)) @@ -350,7 +347,7 @@ func (r *rpcClient) Subscribe(ctx context.Context, channel chan<- *evmtypes.Head return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "args", args) + lggr := r.newRqLggr().With("args", args) lggr.Debug("RPC call: evmclient.Client#EthSubscribe") start := time.Now() @@ -385,7 +382,7 @@ func (r *rpcClient) TransactionReceiptGeth(ctx context.Context, txHash common.Ha return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "txHash", txHash) + lggr := r.newRqLggr().With("txHash", txHash) lggr.Debug("RPC call: evmclient.Client#TransactionReceipt") @@ -411,7 +408,7 @@ func (r *rpcClient) TransactionByHash(ctx context.Context, txHash common.Hash) ( return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "txHash", txHash) + lggr := r.newRqLggr().With("txHash", txHash) lggr.Debug("RPC call: evmclient.Client#TransactionByHash") @@ -438,7 +435,7 @@ func (r *rpcClient) HeaderByNumber(ctx context.Context, number *big.Int) (header return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "number", number) + lggr := r.newRqLggr().With("number", number) lggr.Debug("RPC call: evmclient.Client#HeaderByNumber") start := time.Now() @@ -462,7 +459,7 @@ func (r *rpcClient) HeaderByHash(ctx context.Context, hash common.Hash) (header return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "hash", hash) + lggr := r.newRqLggr().With("hash", hash) lggr.Debug("RPC call: evmclient.Client#HeaderByHash") start := time.Now() @@ -515,7 +512,7 @@ func (r *rpcClient) BlockByHashGeth(ctx context.Context, hash common.Hash) (bloc return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "hash", hash) + lggr := r.newRqLggr().With("hash", hash) lggr.Debug("RPC call: evmclient.Client#BlockByHash") start := time.Now() @@ -541,7 +538,7 @@ func (r *rpcClient) BlockByNumberGeth(ctx context.Context, number *big.Int) (blo return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "number", number) + lggr := r.newRqLggr().With("number", number) lggr.Debug("RPC call: evmclient.Client#BlockByNumber") start := time.Now() @@ -567,7 +564,7 @@ func (r *rpcClient) SendTransaction(ctx context.Context, tx *types.Transaction) return err } defer cancel() - lggr := logger.With(r.newRqLggr(), "tx", tx) + lggr := r.newRqLggr().With("tx", tx) lggr.Debug("RPC call: evmclient.Client#SendTransaction") start := time.Now() @@ -607,7 +604,7 @@ func (r *rpcClient) PendingSequenceAt(ctx context.Context, account common.Addres return 0, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "account", account) + lggr := r.newRqLggr().With("account", account) lggr.Debug("RPC call: evmclient.Client#PendingNonceAt") start := time.Now() @@ -639,7 +636,7 @@ func (r *rpcClient) SequenceAt(ctx context.Context, account common.Address, bloc return 0, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "account", account, "blockNumber", blockNumber) + lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#NonceAt") start := time.Now() @@ -668,7 +665,7 @@ func (r *rpcClient) PendingCodeAt(ctx context.Context, account common.Address) ( return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "account", account) + lggr := r.newRqLggr().With("account", account) lggr.Debug("RPC call: evmclient.Client#PendingCodeAt") start := time.Now() @@ -694,7 +691,7 @@ func (r *rpcClient) CodeAt(ctx context.Context, account common.Address, blockNum return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "account", account, "blockNumber", blockNumber) + lggr := r.newRqLggr().With("account", account, "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#CodeAt") start := time.Now() @@ -721,7 +718,7 @@ func (r *rpcClient) EstimateGas(ctx context.Context, c interface{}) (gas uint64, } defer cancel() call := c.(ethereum.CallMsg) - lggr := logger.With(r.newRqLggr(), "call", call) + lggr := r.newRqLggr().With("call", call) lggr.Debug("RPC call: evmclient.Client#EstimateGas") start := time.Now() @@ -773,7 +770,7 @@ func (r *rpcClient) CallContract(ctx context.Context, msg interface{}, blockNumb return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "callMsg", msg, "blockNumber", blockNumber) + lggr := r.newRqLggr().With("callMsg", msg, "blockNumber", blockNumber) message := msg.(ethereum.CallMsg) lggr.Debug("RPC call: evmclient.Client#CallContract") @@ -833,7 +830,7 @@ func (r *rpcClient) BalanceAt(ctx context.Context, account common.Address, block return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "account", account.Hex(), "blockNumber", blockNumber) + lggr := r.newRqLggr().With("account", account.Hex(), "blockNumber", blockNumber) lggr.Debug("RPC call: evmclient.Client#BalanceAt") start := time.Now() @@ -892,7 +889,7 @@ func (r *rpcClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) (l [ return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "q", q) + lggr := r.newRqLggr().With("q", q) lggr.Debug("RPC call: evmclient.Client#FilterLogs") start := time.Now() @@ -923,7 +920,7 @@ func (r *rpcClient) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQu return nil, err } defer cancel() - lggr := logger.With(r.newRqLggr(), "q", q) + lggr := r.newRqLggr().With("q", q) lggr.Debug("RPC call: evmclient.Client#SubscribeFilterLogs") start := time.Now() @@ -983,10 +980,8 @@ func (r *rpcClient) ChainID(ctx context.Context) (chainID *big.Int, err error) { } // newRqLggr generates a new logger with a unique request ID -func (r *rpcClient) newRqLggr() logger.Logger { - return logger.With(r.rpcLog, - "requestID", uuid.New(), - ) +func (r *rpcClient) newRqLggr() logger.SugaredLogger { + return r.rpcLog.With("requestID", uuid.New()) } func wrapCallError(err error, tp string) error { @@ -1009,7 +1004,7 @@ func (r *rpcClient) wrapHTTP(err error) error { if err != nil { r.rpcLog.Debugw("Call failed", "err", err) } else { - logger.Trace(r.rpcLog, "Call succeeded") + r.rpcLog.Trace("Call succeeded") } return err } diff --git a/core/chains/evm/gas/block_history_estimator.go b/core/chains/evm/gas/block_history_estimator.go index 844b9e547f2..dc95240fd42 100644 --- a/core/chains/evm/gas/block_history_estimator.go +++ b/core/chains/evm/gas/block_history_estimator.go @@ -198,7 +198,7 @@ func (b *BlockHistoryEstimator) getBlocks() []evmtypes.Block { // The provided context can be used to terminate Start sequence. func (b *BlockHistoryEstimator) Start(ctx context.Context) error { return b.StartOnce("BlockHistoryEstimator", func() error { - logger.Trace(b.logger, "Starting") + b.logger.Trace("Starting") if b.bhConfig.CheckInclusionBlocks() > 0 { b.logger.Infof("Inclusion checking enabled, bumping will be prevented on transactions that have been priced above the %d percentile for %d blocks", b.bhConfig.CheckInclusionPercentile(), b.bhConfig.CheckInclusionBlocks()) @@ -228,7 +228,7 @@ func (b *BlockHistoryEstimator) Start(ctx context.Context) error { b.wg.Add(1) go b.runLoop() - logger.Trace(b.logger, "Started") + b.logger.Trace("Started") return nil }) } @@ -291,7 +291,7 @@ func (b *BlockHistoryEstimator) BumpLegacyGas(_ context.Context, originalGasPric if b.bhConfig.CheckInclusionBlocks() > 0 { if err = b.checkConnectivity(attempts); err != nil { if errors.Is(err, commonfee.ErrConnectivity) { - logger.Criticalw(b.logger, BumpingHaltedLabel, "err", err) + b.logger.Criticalw(BumpingHaltedLabel, "err", err) b.SvcErrBuffer.Append(err) promBlockHistoryEstimatorConnectivityFailureCount.WithLabelValues(b.chainID.String(), "legacy").Inc() } @@ -467,7 +467,7 @@ func (b *BlockHistoryEstimator) BumpDynamicFee(_ context.Context, originalFee Dy if b.bhConfig.CheckInclusionBlocks() > 0 { if err = b.checkConnectivity(attempts); err != nil { if errors.Is(err, commonfee.ErrConnectivity) { - logger.Criticalw(b.logger, BumpingHaltedLabel, "err", err) + b.logger.Criticalw(BumpingHaltedLabel, "err", err) b.SvcErrBuffer.Append(err) promBlockHistoryEstimatorConnectivityFailureCount.WithLabelValues(b.chainID.String(), "eip1559").Inc() } @@ -508,7 +508,7 @@ func (b *BlockHistoryEstimator) FetchBlocksAndRecalculate(ctx context.Context, h func (b *BlockHistoryEstimator) Recalculate(head *evmtypes.Head) { percentile := int(b.bhConfig.TransactionPercentile()) - lggr := logger.With(b.logger, "head", head) + lggr := b.logger.With("head", head) blockHistory := b.getBlocks() if len(blockHistory) == 0 { @@ -630,9 +630,9 @@ func (b *BlockHistoryEstimator) FetchBlocks(ctx context.Context, head *evmtypes. reqs = append(reqs, req) } - lggr := logger.With(b.logger, "head", head) + lggr := b.logger.With("head", head) - logger.Tracew(lggr, fmt.Sprintf("Fetching %v blocks (%v in local history)", len(reqs), len(blocks)), "n", len(reqs), "inHistory", len(blocks), "blockNum", head.Number) + lggr.Tracew(fmt.Sprintf("Fetching %v blocks (%v in local history)", len(reqs), len(blocks)), "n", len(reqs), "inHistory", len(blocks), "blockNum", head.Number) if err := b.batchFetch(ctx, reqs); err != nil { return err } @@ -713,7 +713,7 @@ func (b *BlockHistoryEstimator) batchFetch(ctx context.Context, reqs []rpc.Batch j = len(reqs) } - logger.Tracew(b.logger, fmt.Sprintf("Batch fetching blocks %v thru %v", HexToInt64(reqs[i].Args[0]), HexToInt64(reqs[j-1].Args[0]))) + b.logger.Tracew(fmt.Sprintf("Batch fetching blocks %v thru %v", HexToInt64(reqs[i].Args[0]), HexToInt64(reqs[j-1].Args[0]))) err := b.ethClient.BatchCallContext(ctx, reqs[i:j]) if errors.Is(err, context.DeadlineExceeded) { diff --git a/core/chains/evm/gas/rollups/l1_gas_price_oracle.go b/core/chains/evm/gas/rollups/l1_gas_price_oracle.go index 6a384fa9c54..ce1a50aa320 100644 --- a/core/chains/evm/gas/rollups/l1_gas_price_oracle.go +++ b/core/chains/evm/gas/rollups/l1_gas_price_oracle.go @@ -31,7 +31,7 @@ type l1GasPriceOracle struct { services.StateMachine client ethClient pollPeriod time.Duration - logger logger.Logger + logger logger.SugaredLogger address string callArgs string @@ -94,7 +94,7 @@ func NewL1GasPriceOracle(lggr logger.Logger, ethClient ethClient, chainType conf return &l1GasPriceOracle{ client: ethClient, pollPeriod: PollPeriod, - logger: logger.Named(lggr, fmt.Sprintf("L1GasPriceOracle(%s)", chainType)), + logger: logger.Sugared(logger.Named(lggr, fmt.Sprintf("L1GasPriceOracle(%s)", chainType))), address: address, callArgs: callArgs, chInitialised: make(chan struct{}), @@ -159,7 +159,7 @@ func (o *l1GasPriceOracle) refresh() (t *time.Timer) { } if len(b) != 32 { // returns uint256; - logger.Criticalf(o.logger, "return data length (%d) different than expected (%d)", len(b), 32) + o.logger.Criticalf("return data length (%d) different than expected (%d)", len(b), 32) return } price := new(big.Int).SetBytes(b) diff --git a/core/chains/evm/log/helpers_test.go b/core/chains/evm/log/helpers_test.go index 13dfe1ffab6..cea2e361184 100644 --- a/core/chains/evm/log/helpers_test.go +++ b/core/chains/evm/log/helpers_test.go @@ -241,7 +241,7 @@ func (rec *received) logsOnBlocks() []logOnBlock { type simpleLogListener struct { name string - lggr logger.Logger + lggr logger.SugaredLogger cfg pg.QConfig received *received t *testing.T @@ -266,7 +266,7 @@ func (helper *broadcasterHelper) newLogListenerWithJob(name string) *simpleLogLi var rec received return &simpleLogListener{ db: db, - lggr: logger.Test(t), + lggr: logger.Sugared(logger.Test(t)), cfg: helper.config.Database(), name: name, received: &rec, @@ -282,7 +282,7 @@ func (listener *simpleLogListener) SkipMarkingConsumed(skip bool) { func (listener *simpleLogListener) HandleLog(lb log.Broadcast) { listener.received.Lock() defer listener.received.Unlock() - logger.Tracef(listener.lggr, "Listener %v HandleLog for block %v %v received at %v %v", listener.name, lb.RawLog().BlockNumber, lb.RawLog().BlockHash, lb.LatestBlockNumber(), lb.LatestBlockHash()) + listener.lggr.Tracef("Listener %v HandleLog for block %v %v received at %v %v", listener.name, lb.RawLog().BlockNumber, lb.RawLog().BlockHash, lb.LatestBlockNumber(), lb.LatestBlockHash()) listener.received.logs = append(listener.received.logs, lb.RawLog()) listener.received.broadcasts = append(listener.received.broadcasts, lb) diff --git a/core/chains/evm/log/registrations.go b/core/chains/evm/log/registrations.go index 73f197a6ab6..346a6776e86 100644 --- a/core/chains/evm/log/registrations.go +++ b/core/chains/evm/log/registrations.go @@ -46,7 +46,7 @@ type ( // handlersByConfs maps numConfirmations => *handler handlersByConfs map[uint32]*handler - logger logger.Logger + logger logger.SugaredLogger evmChainID big.Int // highest 'NumConfirmations' per all listeners, used to decide about deleting older logs if it's higher than EvmFinalityDepth @@ -57,7 +57,7 @@ type ( handler struct { lookupSubs map[common.Address]map[common.Hash]subscribers // contractAddress => logTopic => *subscriber => topicValueFilters evmChainID big.Int - logger logger.Logger + logger logger.SugaredLogger } // The Listener responds to log events through HandleLog. @@ -76,7 +76,7 @@ func newRegistrations(lggr logger.Logger, evmChainID big.Int) *registrations { jobIDAddrs: make(map[int32]map[common.Address]struct{}), handlersByConfs: make(map[uint32]*handler), evmChainID: evmChainID, - logger: logger.Named(lggr, "Registrations"), + logger: logger.Sugared(logger.Named(lggr, "Registrations")), } } @@ -85,7 +85,7 @@ func (r *registrations) addSubscriber(sub *subscriber) (needsResubscribe bool) { r.logger.Panicw(err.Error(), "err", err, "addr", sub.opts.Contract.Hex(), "jobID", sub.listener.JobID()) } - logger.Tracef(r.logger, "Added subscription %p with job ID %v", sub, sub.listener.JobID()) + r.logger.Tracef("Added subscription %p with job ID %v", sub, sub.listener.JobID()) handler, exists := r.handlersByConfs[sub.opts.MinIncomingConfirmations] if !exists { @@ -142,7 +142,7 @@ func (r *registrations) removeSubscriber(sub *subscriber) (needsResubscribe bool if err := r.checkRemoveSubscriber(sub); err != nil { r.logger.Panicw(err.Error(), "err", err, "addr", sub.opts.Contract.Hex(), "jobID", sub.listener.JobID()) } - logger.Tracef(r.logger, "Removed subscription %p with job ID %v", sub, sub.listener.JobID()) + r.logger.Tracef("Removed subscription %p with job ID %v", sub, sub.listener.JobID()) handlers, exists := r.handlersByConfs[sub.opts.MinIncomingConfirmations] if !exists { @@ -263,7 +263,7 @@ func filtersContainValues(topicValues []common.Hash, filters [][]Topic) bool { return true } -func newHandler(lggr logger.Logger, evmChainID big.Int) *handler { +func newHandler(lggr logger.SugaredLogger, evmChainID big.Int) *handler { return &handler{ lookupSubs: make(map[common.Address]map[common.Hash]subscribers), evmChainID: evmChainID, @@ -284,7 +284,7 @@ func (r *handler) addSubscriber(sub *subscriber, handlersWithGreaterConfs []*han for topic, topicValueFilters := range sub.opts.LogsWithTopics { if _, exists := r.lookupSubs[addr][topic]; !exists { - logger.Tracef(r.logger, "No existing sub for addr %s and topic %s at this MinIncomingConfirmations of %v", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + r.logger.Tracef("No existing sub for addr %s and topic %s at this MinIncomingConfirmations of %v", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) r.lookupSubs[addr][topic] = make(subscribers) func() { @@ -295,11 +295,11 @@ func (r *handler) addSubscriber(sub *subscriber, handlersWithGreaterConfs []*han // again since even the worst case lookback is already covered for _, existingHandler := range handlersWithGreaterConfs { if _, exists := existingHandler.lookupSubs[addr][topic]; exists { - logger.Tracef(r.logger, "Sub already exists for addr %s and topic %s at greater than this MinIncomingConfirmations of %v. Resubscribe is not required", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + r.logger.Tracef("Sub already exists for addr %s and topic %s at greater than this MinIncomingConfirmations of %v. Resubscribe is not required", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) return } } - logger.Tracef(r.logger, "No sub exists for addr %s and topic %s at this or greater MinIncomingConfirmations of %v. Resubscribe is required", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + r.logger.Tracef("No sub exists for addr %s and topic %s at this or greater MinIncomingConfirmations of %v. Resubscribe is required", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) needsResubscribe = true } }() @@ -332,7 +332,7 @@ func (r *handler) removeSubscriber(sub *subscriber, allHandlers map[uint32]*hand // cleanup and resubscribe if necessary if len(topicMap) == 0 { - logger.Tracef(r.logger, "No subs left for addr %s and topic %s at this MinIncomingConfirmations of %v", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) + r.logger.Tracef("No subs left for addr %s and topic %s at this MinIncomingConfirmations of %v", addr.Hex(), topic.Hex(), sub.opts.MinIncomingConfirmations) func() { if !needsResubscribe { @@ -344,12 +344,12 @@ func (r *handler) removeSubscriber(sub *subscriber, allHandlers map[uint32]*hand continue } if _, exists := otherHandler.lookupSubs[addr][topic]; exists { - logger.Tracef(r.logger, "Sub still exists for addr %s and topic %s. Resubscribe will not be performed", addr.Hex(), topic.Hex()) + r.logger.Tracef("Sub still exists for addr %s and topic %s. Resubscribe will not be performed", addr.Hex(), topic.Hex()) return } } - logger.Tracef(r.logger, "No sub exists for addr %s and topic %s. Resubscribe will be performed", addr.Hex(), topic.Hex()) + r.logger.Tracef("No sub exists for addr %s and topic %s. Resubscribe will be performed", addr.Hex(), topic.Hex()) needsResubscribe = true } }() diff --git a/core/chains/evm/logpoller/log_poller.go b/core/chains/evm/logpoller/log_poller.go index 991cc8d4309..7bc131afef4 100644 --- a/core/chains/evm/logpoller/log_poller.go +++ b/core/chains/evm/logpoller/log_poller.go @@ -98,7 +98,7 @@ type logPoller struct { services.StateMachine ec Client orm ORM - lggr logger.Logger + lggr logger.SugaredLogger pollPeriod time.Duration // poll period set by block production rate useFinalityTag bool // indicates whether logPoller should use chain's finality or pick a fixed depth for finality finalityDepth int64 // finality depth is taken to mean that block (head - finality) is finalized. If `useFinalityTag` is set to true, this value is ignored, because finalityDepth is fetched from chain @@ -138,7 +138,7 @@ func NewLogPoller(orm ORM, ec Client, lggr logger.Logger, pollPeriod time.Durati cancel: cancel, ec: ec, orm: orm, - lggr: logger.Named(lggr, "LogPoller"), + lggr: logger.Sugared(logger.Named(lggr, "LogPoller")), replayStart: make(chan int64), replayComplete: make(chan error), pollPeriod: pollPeriod, @@ -666,7 +666,7 @@ func (lp *logPoller) backfill(ctx context.Context, start, end int64) error { } } if batchSize == 1 { - logger.Criticalw(lp.lggr, "Too many log results in a single block, failed to retrieve logs! Node may be running in a degraded state.", "err", err, "from", from, "to", to, "LogBackfillBatchSize", lp.backfillBatchSize) + lp.lggr.Criticalw("Too many log results in a single block, failed to retrieve logs! Node may be running in a degraded state.", "err", err, "from", from, "to", to, "LogBackfillBatchSize", lp.backfillBatchSize) return err } batchSize /= 2 @@ -921,7 +921,7 @@ func (lp *logPoller) findBlockAfterLCA(ctx context.Context, current *evmtypes.He return nil, err } } - logger.Criticalw(lp.lggr, "Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber) + lp.lggr.Criticalw("Reorg greater than finality depth detected", "finalityTag", lp.useFinalityTag, "current", current.Number, "latestFinalized", latestFinalizedBlockNumber) rerr := errors.New("Reorg greater than finality depth") lp.SvcErrBuffer.Append(rerr) return nil, rerr diff --git a/core/chains/evm/txmgr/broadcaster_test.go b/core/chains/evm/txmgr/broadcaster_test.go index f676d3d18e2..68a81299bf9 100644 --- a/core/chains/evm/txmgr/broadcaster_test.go +++ b/core/chains/evm/txmgr/broadcaster_test.go @@ -2033,7 +2033,7 @@ type testChecker struct { func (t *testChecker) Check( _ context.Context, - _ logger.Logger, + _ logger.SugaredLogger, _ txmgr.Tx, _ txmgr.TxAttempt, ) error { diff --git a/core/chains/evm/txmgr/client.go b/core/chains/evm/txmgr/client.go index d08274f74b6..dc7b62647c0 100644 --- a/core/chains/evm/txmgr/client.go +++ b/core/chains/evm/txmgr/client.go @@ -43,7 +43,7 @@ func (c *evmTxmClient) BatchSendTransactions( ctx context.Context, attempts []TxAttempt, batchSize int, - lggr logger.Logger, + lggr logger.SugaredLogger, ) ( codes []commonclient.SendTxReturnCode, txErrs []error, @@ -62,7 +62,7 @@ func (c *evmTxmClient) BatchSendTransactions( if len(reqs) != len(attempts) { lenErr := fmt.Errorf("Returned request data length (%d) != number of tx attempts (%d)", len(reqs), len(attempts)) err = errors.Join(err, lenErr) - logger.Criticalw(lggr, "Mismatched length", "err", err) + lggr.Criticalw("Mismatched length", "err", err) return } @@ -88,10 +88,10 @@ func (c *evmTxmClient) BatchSendTransactions( return } -func (c *evmTxmClient) SendTransactionReturnCode(ctx context.Context, etx Tx, attempt TxAttempt, lggr logger.Logger) (commonclient.SendTxReturnCode, error) { +func (c *evmTxmClient) SendTransactionReturnCode(ctx context.Context, etx Tx, attempt TxAttempt, lggr logger.SugaredLogger) (commonclient.SendTxReturnCode, error) { signedTx, err := GetGethSignedTx(attempt.SignedRawTx) if err != nil { - logger.Criticalw(lggr, "Fatal error signing transaction", "err", err, "etx", etx) + lggr.Criticalw("Fatal error signing transaction", "err", err, "etx", etx) return commonclient.Fatal, err } return c.client.SendTransactionReturnCode(ctx, signedTx, etx.FromAddress) diff --git a/core/chains/evm/txmgr/evm_tx_store.go b/core/chains/evm/txmgr/evm_tx_store.go index bb2a30e51dd..f9014163675 100644 --- a/core/chains/evm/txmgr/evm_tx_store.go +++ b/core/chains/evm/txmgr/evm_tx_store.go @@ -75,7 +75,7 @@ type TestEvmTxStore interface { type evmTxStore struct { q pg.Q - logger logger.Logger + logger logger.SugaredLogger ctx context.Context ctxCancel context.CancelFunc } @@ -340,7 +340,7 @@ func NewTxStore( q := pg.NewQ(db, namedLogger, cfg, pg.WithParentCtx(ctx)) return &evmTxStore{ q: q, - logger: namedLogger, + logger: logger.Sugared(namedLogger), ctx: ctx, ctxCancel: cancel, } @@ -1499,7 +1499,7 @@ GROUP BY e.id txHashesHex[i] = common.BytesToAddress(r.TxHashes[i]) } - logger.Criticalw(o.logger, fmt.Sprintf("eth_tx with ID %v expired without ever getting a receipt for any of our attempts. "+ + o.logger.Criticalw(fmt.Sprintf("eth_tx with ID %v expired without ever getting a receipt for any of our attempts. "+ "Current block height is %v, transaction was broadcast before block height %v. This transaction may not have not been sent and will be marked as fatally errored. "+ "This can happen if there is another instance of chainlink running that is using the same private key, or if "+ "an external wallet has been used to send a transaction from account %s with nonce %v."+ diff --git a/core/chains/evm/txmgr/transmitchecker.go b/core/chains/evm/txmgr/transmitchecker.go index 76dfcb9d51c..919fb509fee 100644 --- a/core/chains/evm/txmgr/transmitchecker.go +++ b/core/chains/evm/txmgr/transmitchecker.go @@ -110,7 +110,7 @@ type noChecker struct{} // Check satisfies the TransmitChecker interface. func (noChecker) Check( _ context.Context, - _ logger.Logger, + _ logger.SugaredLogger, _ Tx, _ TxAttempt, ) error { @@ -125,7 +125,7 @@ type SimulateChecker struct { // Check satisfies the TransmitChecker interface. func (s *SimulateChecker) Check( ctx context.Context, - l logger.Logger, + l logger.SugaredLogger, tx Tx, a TxAttempt, ) error { @@ -148,7 +148,7 @@ func (s *SimulateChecker) Check( err := s.Client.CallContext(ctx, &b, "eth_call", callArg, evmclient.ToBlockNumArg(nil)) if err != nil { if jErr := evmclient.ExtractRPCErrorOrNil(err); jErr != nil { - logger.Criticalw(l, "Transaction reverted during simulation", + l.Criticalw("Transaction reverted during simulation", "ethTxAttemptID", a.ID, "txHash", a.Hash, "err", err, "rpcErr", jErr.String(), "returnValue", b.String()) return errors.Errorf("transaction reverted during simulation: %s", jErr.String()) } @@ -175,7 +175,7 @@ type VRFV1Checker struct { // Check satisfies the TransmitChecker interface. func (v *VRFV1Checker) Check( ctx context.Context, - l logger.Logger, + l logger.SugaredLogger, tx Tx, _ TxAttempt, ) error { @@ -284,7 +284,7 @@ type VRFV2Checker struct { // Check satisfies the TransmitChecker interface. func (v *VRFV2Checker) Check( ctx context.Context, - l logger.Logger, + l logger.SugaredLogger, tx Tx, _ TxAttempt, ) error { diff --git a/core/chains/evm/txmgr/transmitchecker_test.go b/core/chains/evm/txmgr/transmitchecker_test.go index 6dd4edd91c6..d2f668da11b 100644 --- a/core/chains/evm/txmgr/transmitchecker_test.go +++ b/core/chains/evm/txmgr/transmitchecker_test.go @@ -106,7 +106,7 @@ func TestFactory(t *testing.T) { func TestTransmitCheckers(t *testing.T) { client := evmtest.NewEthClientMockWithDefaultChain(t) - log := logger.Test(t) + log := logger.Sugared(logger.Test(t)) ctx := testutils.Context(t) t.Run("no checker", func(t *testing.T) { diff --git a/core/scripts/go.mod b/core/scripts/go.mod index c5327596272..db71535723e 100644 --- a/core/scripts/go.mod +++ b/core/scripts/go.mod @@ -237,7 +237,7 @@ require ( github.com/shirou/gopsutil/v3 v3.23.11 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 // indirect - github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad // indirect + github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 // indirect github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e // indirect github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1 // indirect github.com/smartcontractkit/chainlink-feeds v0.0.0-20231127231053-2232d3a6766d // indirect diff --git a/core/scripts/go.sum b/core/scripts/go.sum index 28750a076d5..1c48074ea3a 100644 --- a/core/scripts/go.sum +++ b/core/scripts/go.sum @@ -1148,8 +1148,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chainlink-automation v1.0.1 h1:vVjBFq2Zsz21kPy1Pb0wpjF9zrbJX+zjXphDeeR4XZk= github.com/smartcontractkit/chainlink-automation v1.0.1/go.mod h1:INSchkV3ntyDdlZKGWA030MPDpp6pbeuiRkRKYFCm2k= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad h1:ysPjfbCPJuVxxFZa1Ifv8OPE20pzvnEHjJrPDUo4gT0= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 h1:lSYiaiIfAA+5ac45/UD8ciytlNw/S6fnhK7bxFHYI88= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e h1:xvqffqFec2HkEcUKrCkm4FDJRnn/+gHmvrE/dz3Zlw8= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e/go.mod h1:soVgcl4CbfR6hC9UptjuCQhz19HJaFEjwnOpiySkxg0= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1 h1:xYqRgZO0nMSO8CBCMR0r3WA+LZ4kNL8a6bnbyk/oBtQ= diff --git a/core/services/pg/q.go b/core/services/pg/q.go index 050606c7937..e69e16ec48f 100644 --- a/core/services/pg/q.go +++ b/core/services/pg/q.go @@ -118,7 +118,7 @@ type Q struct { Queryer ParentCtx context.Context db *sqlx.DB - logger logger.Logger + logger logger.SugaredLogger config QConfig QueryTimeout time.Duration } @@ -130,7 +130,7 @@ func NewQ(db *sqlx.DB, lggr logger.Logger, config QConfig, qopts ...QOpt) (q Q) q.db = db // skip two levels since we use internal helpers and also want to point up the stack to the caller of the Q method. - q.logger = logger.Helper(lggr, 2) + q.logger = logger.Sugared(logger.Helper(lggr, 2)) q.config = config if q.Queryer == nil { @@ -356,7 +356,7 @@ func (q *queryLogger) postSqlLog(ctx context.Context, begin time.Time) { kvs := []any{"ms", elapsed.Milliseconds(), "timeout", timeout.Milliseconds(), "percent", strconv.FormatFloat(pct, 'f', 1, 64), "sql", q} if elapsed >= timeout { - logger.Criticalw(q.logger, "SLOW SQL QUERY", kvs...) + q.logger.Criticalw("SLOW SQL QUERY", kvs...) } else if errThreshold := timeout / 5; errThreshold > 0 && elapsed > errThreshold { q.logger.Errorw("SLOW SQL QUERY", kvs...) } else if warnThreshold := timeout / 10; warnThreshold > 0 && elapsed > warnThreshold { diff --git a/go.mod b/go.mod index e7b93c40bdb..c71705d9c92 100644 --- a/go.mod +++ b/go.mod @@ -65,7 +65,7 @@ require ( github.com/shopspring/decimal v1.3.1 github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 github.com/smartcontractkit/chainlink-automation v1.0.1 - github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad + github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1 github.com/smartcontractkit/chainlink-feeds v0.0.0-20231127231053-2232d3a6766d diff --git a/go.sum b/go.sum index e9a7f0267ad..b38b7ac2632 100644 --- a/go.sum +++ b/go.sum @@ -1134,8 +1134,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chainlink-automation v1.0.1 h1:vVjBFq2Zsz21kPy1Pb0wpjF9zrbJX+zjXphDeeR4XZk= github.com/smartcontractkit/chainlink-automation v1.0.1/go.mod h1:INSchkV3ntyDdlZKGWA030MPDpp6pbeuiRkRKYFCm2k= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad h1:ysPjfbCPJuVxxFZa1Ifv8OPE20pzvnEHjJrPDUo4gT0= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 h1:lSYiaiIfAA+5ac45/UD8ciytlNw/S6fnhK7bxFHYI88= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e h1:xvqffqFec2HkEcUKrCkm4FDJRnn/+gHmvrE/dz3Zlw8= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e/go.mod h1:soVgcl4CbfR6hC9UptjuCQhz19HJaFEjwnOpiySkxg0= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1 h1:xYqRgZO0nMSO8CBCMR0r3WA+LZ4kNL8a6bnbyk/oBtQ= diff --git a/integration-tests/go.mod b/integration-tests/go.mod index 84da65e9a5d..9f0ff1b384c 100644 --- a/integration-tests/go.mod +++ b/integration-tests/go.mod @@ -24,7 +24,7 @@ require ( github.com/segmentio/ksuid v1.0.4 github.com/slack-go/slack v0.12.2 github.com/smartcontractkit/chainlink-automation v1.0.1 - github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad + github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 github.com/smartcontractkit/chainlink-testing-framework v1.22.0 github.com/smartcontractkit/chainlink-vrf v0.0.0-20231120191722-fef03814f868 github.com/smartcontractkit/chainlink/v2 v2.0.0-00010101000000-000000000000 diff --git a/integration-tests/go.sum b/integration-tests/go.sum index 854978e3353..f8a9529a4cc 100644 --- a/integration-tests/go.sum +++ b/integration-tests/go.sum @@ -1411,7 +1411,6 @@ github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMT github.com/shirou/gopsutil/v3 v3.23.11 h1:i3jP9NjCPUz7FiZKxlMnODZkdSIp2gnzfrvsu9CuWEQ= github.com/shirou/gopsutil/v3 v3.23.11/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= @@ -1433,8 +1432,8 @@ github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704 h1:T3lFWumv github.com/smartcontractkit/caigo v0.0.0-20230621050857-b29a4ca8c704/go.mod h1:2QuJdEouTWjh5BDy5o/vgGXQtR4Gz8yH1IYB5eT7u4M= github.com/smartcontractkit/chainlink-automation v1.0.1 h1:vVjBFq2Zsz21kPy1Pb0wpjF9zrbJX+zjXphDeeR4XZk= github.com/smartcontractkit/chainlink-automation v1.0.1/go.mod h1:INSchkV3ntyDdlZKGWA030MPDpp6pbeuiRkRKYFCm2k= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad h1:ysPjfbCPJuVxxFZa1Ifv8OPE20pzvnEHjJrPDUo4gT0= -github.com/smartcontractkit/chainlink-common v0.1.7-0.20231206181640-faad3f11cfad/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490 h1:lSYiaiIfAA+5ac45/UD8ciytlNw/S6fnhK7bxFHYI88= +github.com/smartcontractkit/chainlink-common v0.1.7-0.20231213134506-b6c433e6c490/go.mod h1:IdlfCN9rUs8Q/hrOYe8McNBIwEOHEsi0jilb3Cw77xs= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e h1:xvqffqFec2HkEcUKrCkm4FDJRnn/+gHmvrE/dz3Zlw8= github.com/smartcontractkit/chainlink-cosmos v0.4.1-0.20231206164210-03f8b219402e/go.mod h1:soVgcl4CbfR6hC9UptjuCQhz19HJaFEjwnOpiySkxg0= github.com/smartcontractkit/chainlink-data-streams v0.0.0-20231204152908-a6e3fe8ff2a1 h1:xYqRgZO0nMSO8CBCMR0r3WA+LZ4kNL8a6bnbyk/oBtQ=