Skip to content

Commit

Permalink
Chore: make a few spelling corrections (#10952)
Browse files Browse the repository at this point in the history
  • Loading branch information
somnathb1 authored Jun 30, 2024
1 parent 5c30a0f commit f7dd226
Show file tree
Hide file tree
Showing 15 changed files with 23 additions and 23 deletions.
2 changes: 1 addition & 1 deletion cl/antiquary/antiquary.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func (a *Antiquary) Loop() error {
if a.downloader == nil || !a.blocks {
return nil // Just skip if we don't have a downloader
}
// Skip if we dont support backfilling for the current network
// Skip if we don't support backfilling for the current network
if !clparams.SupportBackfilling(a.cfg.DepositNetworkID) {
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion cl/beacon/handler/attestation_rewards.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ func (a *ApiHandler) computeAttestationsRewardsForAltair(validatorSet *solid.Val
idealReward.Target = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelyTargetFlagIndex] / rewardDenominator)
idealReward.Source = int64(baseReward * rewardMultipliers[a.beaconChainCfg.TimelySourceFlagIndex] / rewardDenominator)
}
// Note: for altair, we dont have the inclusion delay, always 0.
// Note: for altair, we don't have the inclusion delay, always 0.
for flagIdx := range weights {
if flagsUnslashedIndiciesSet[flagIdx][index] {
if flagIdx == int(a.beaconChainCfg.TimelyHeadFlagIndex) {
Expand Down
2 changes: 1 addition & 1 deletion cl/merkle_tree/merkle_root.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func HashByteSlice(out, in []byte) error {
}

func convertHeader(xs []byte) [][32]byte {
// i wont pretend to understand, but my solution for the problem is as so
// i won't pretend to understand, but my solution for the problem is as so

// first i grab the slice header of the input
header := (*reflect.SliceHeader)(unsafe.Pointer(&xs))
Expand Down
4 changes: 2 additions & 2 deletions core/state/database_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1152,7 +1152,7 @@ func TestWrongIncarnation2(t *testing.T) {
}

if knownContractAddress != contractAddress {
t.Errorf("Expexted contractAddress: %x, got %x", knownContractAddress, contractAddress)
t.Errorf("Expected contractAddress: %x, got %x", knownContractAddress, contractAddress)
}

// Create a longer chain, with 4 blocks (with higher total difficulty) that reverts the change of stroage self-destruction of the contract
Expand Down Expand Up @@ -1331,7 +1331,7 @@ func TestCacheCodeSizeSeparately(t *testing.T) {
assert.Equal(t, code, code2, "new code should be received")
}

// TestCacheCodeSizeInTrie makes sure that we dont just read from the DB all the time
// TestCacheCodeSizeInTrie makes sure that we don't just read from the DB all the time
func TestCacheCodeSizeInTrie(t *testing.T) {
t.Parallel()
//t.Skip("switch to TG state readers/writers")
Expand Down
2 changes: 1 addition & 1 deletion core/types/transaction_signing.go
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ func (sg Signer) SenderWithContext(context *secp256k1.Context, txn Transaction)
var V uint256.Int
var R, S *uint256.Int
signChainID := sg.chainID.ToBig() // This is reset to nil if txn is unprotected
// recoverPlain below will subract 27 from V
// recoverPlain below will subtract 27 from V
switch t := txn.(type) {
case *LegacyTx:
if !t.Protected() {
Expand Down
2 changes: 1 addition & 1 deletion erigon-lib/common/background/progress.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (s *ProgressSet) String() string {
return sb.String()
}

func (s *ProgressSet) DiagnossticsData() map[string]int {
func (s *ProgressSet) DiagnosticsData() map[string]int {
s.lock.RLock()
defer s.lock.RUnlock()
var arr = make(map[string]int, s.list.Len())
Expand Down
6 changes: 3 additions & 3 deletions erigon-lib/downloader/downloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ func (r *requestHandler) RoundTrip(req *http.Request) (resp *http.Response, err

// the first two statuses here have been observed from cloudflare
// during testing. The remainder are generally understood to be
// retriable http responses, calcBackoff will use the Retry-After
// header if its availible
// retry-able http responses, calcBackoff will use the Retry-After
// header if its available
case http.StatusInternalServerError, http.StatusBadGateway,
http.StatusRequestTimeout, http.StatusTooEarly,
http.StatusTooManyRequests, http.StatusServiceUnavailable,
Expand Down Expand Up @@ -2440,7 +2440,7 @@ func (d *Downloader) VerifyData(ctx context.Context, whiteList []string, failFas
return nil
}

// AddNewSeedableFile decides what we do depending on wether we have the .seg file or the .torrent file
// AddNewSeedableFile decides what we do depending on whether we have the .seg file or the .torrent file
// have .torrent no .seg => get .seg file from .torrent
// have .seg no .torrent => get .torrent from .seg
func (d *Downloader) AddNewSeedableFile(ctx context.Context, name string) error {
Expand Down
4 changes: 2 additions & 2 deletions erigon-lib/seg/decompress.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ const (

// Tables with bitlen greater than threshold will be condensed.
// Condensing reduces size of decompression table but leads to slower reads.
// To disable condesning at all set to 9 (we dont use tables larger than 2^9)
// To disable condesning at all set to 9 (we don't use tables larger than 2^9)
// To enable condensing for tables of size larger 64 = 6
// for all tables = 0
// There is no sense to condense tables of size [1 - 64] in terms of performance
Expand Down Expand Up @@ -535,7 +535,7 @@ func (d *Decompressor) EnableMadvWillNeed() *Decompressor {
return d
}

// Getter represent "reader" or "interator" that can move accross the data of the decompressor
// Getter represent "reader" or "iterator" that can move across the data of the decompressor
// The full state of the getter can be captured by saving dataP, and dataBit
type Getter struct {
patternDict *patternTable
Expand Down
2 changes: 1 addition & 1 deletion erigon-lib/state/aggregator.go
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ func (a *Aggregator) BuildMissedIndices(ctx context.Context, workers int) error
case <-logEvery.C:
var m runtime.MemStats
dbg.ReadMemStats(&m)
sendDiagnostics(startIndexingTime, ps.DiagnossticsData(), m.Alloc, m.Sys)
sendDiagnostics(startIndexingTime, ps.DiagnosticsData(), m.Alloc, m.Sys)
a.logger.Info("[snapshots] Indexing", "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys))
}
}
Expand Down
2 changes: 1 addition & 1 deletion polygon/bor/valset/validator.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ func ParseValidators(validatorsBytes []byte) ([]*Validator, error) {
// Used to send validator information to bor validator contract
type MinimalVal struct {
ID uint64 `json:"ID"`
VotingPower uint64 `json:"power"` // TODO add 10^-18 here so that we dont overflow easily
VotingPower uint64 `json:"power"` // TODO add 10^-18 here so that we don't overflow easily
Signer libcommon.Address `json:"signer"`
}

Expand Down
2 changes: 1 addition & 1 deletion turbo/adapter/ethapi/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -770,7 +770,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha
return fields, nil
}
// SendTxArgs represents the arguments to sumbit a new transaction into the transaction pool.
// SendTxArgs represents the arguments to submit a new transaction into the transaction pool.
type SendTxArgs struct {
From libcommon.Address `json:"from"`
To *libcommon.Address `json:"to"`
Expand Down
4 changes: 2 additions & 2 deletions turbo/snapshotsync/freezeblocks/block_snapshots.go
Original file line number Diff line number Diff line change
Expand Up @@ -789,7 +789,7 @@ func (s *RoSnapshots) buildMissedIndices(logPrefix string, ctx context.Context,
case <-logEvery.C:
var m runtime.MemStats
dbg.ReadMemStats(&m)
sendDiagnostics(startIndexingTime, ps.DiagnossticsData(), m.Alloc, m.Sys)
sendDiagnostics(startIndexingTime, ps.DiagnosticsData(), m.Alloc, m.Sys)
logger.Info(fmt.Sprintf("[%s] Indexing", logPrefix), "progress", ps.String(), "total-indexing-time", time.Since(startIndexingTime).Round(time.Second).String(), "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys))
case <-finish:
return
Expand Down Expand Up @@ -1757,7 +1757,7 @@ func DumpTxs(ctx context.Context, db kv.RoDB, chainConfig *chain.Config, blockFr
collections.Wait()
}

// first tx byte => sender adress => tx rlp
// first tx byte => sender address => tx rlp
if err := collect(valueBuf); err != nil {
return err
}
Expand Down
2 changes: 1 addition & 1 deletion turbo/snapshotsync/snapshotsync.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func buildBlackListForPruning(pruneMode bool, stepPrune, minBlockToDownload, blo
snapshotKindToNames := make(map[string][]snapshotFileData)
for _, p := range preverified {
name := p.Name
// Dont prune unprunable files
// Don't prune unprunable files
if !canSnapshotBePruned(name) {
continue
}
Expand Down
2 changes: 1 addition & 1 deletion turbo/stages/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ func testReorgShort(t *testing.T) {
t.Parallel()
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
// we need a fairly long chain of blocks with different difficulties for a short
// one to become heavyer than a long one. The 96 is an empirical value.
// one to become heavier than a long one. The 96 is an empirical value.
easy := make([]int64, 96)
for i := 0; i < len(easy); i++ {
easy[i] = 60
Expand Down
8 changes: 4 additions & 4 deletions turbo/stages/headerdownload/header_algos.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ func (hd *HeaderDownload) ReportBadHeader(headerHash libcommon.Hash) {
func (hd *HeaderDownload) UnlinkHeader(headerHash libcommon.Hash) {
hd.lock.Lock()
defer hd.lock.Unlock()
// Find the link, remove it and all its descendands from all the queues
// Find the link, remove it and all its descendants from all the queues
if link, ok := hd.links[headerHash]; ok {
hd.removeUpwards(link)
}
Expand Down Expand Up @@ -427,7 +427,7 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeo
return
}

hd.logger.Debug("[downloader] Request header", "numer", anchor.blockHeight-1, "length", 192)
hd.logger.Debug("[downloader] Request header", "number", anchor.blockHeight-1, "length", 192)

// Request ancestors
request = &HeaderRequest{
Expand Down Expand Up @@ -686,7 +686,7 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k
headerHash := sh.Hash

if headerHash != hd.posAnchor.parentHash {
// Code below prevented syncing from Nethermind nodes who discregarded Reverse parameter to GetBlockHeaders messages
// Code below prevented syncing from Nethermind nodes who disregarded Reverse parameter to GetBlockHeaders messages
// With this code commented out, the sync proceeds but very slowly (getting 1 header from the response of 192 headers)
/*
if hd.posAnchor.blockHeight != 1 && sh.Number != hd.posAnchor.blockHeight-1 {
Expand Down Expand Up @@ -912,7 +912,7 @@ func (hi *HeaderInserter) FeedHeaderPoW(db kv.StatelessRwTx, headerReader servic
// Calculate total difficulty of this header using parent's total difficulty
td = new(big.Int).Add(parentTd, header.Difficulty)

// Now we can decide wether this header will create a change in the canonical head
// Now we can decide whether this header will create a change in the canonical head
if td.Cmp(hi.localTd) >= 0 {
reorg := true

Expand Down

0 comments on commit f7dd226

Please sign in to comment.