@@ -65,9 +65,9 @@ bool BaseIndex::Init()
6565 LOCK (cs_main);
6666 CChain& active_chain = m_chainstate->m_chain ;
6767 if (locator.IsNull ()) {
68- m_best_block_index = nullptr ;
68+ SetBestBlockIndex ( nullptr ) ;
6969 } else {
70- m_best_block_index = m_chainstate->FindForkInGlobalIndex (locator);
70+ SetBestBlockIndex ( m_chainstate->FindForkInGlobalIndex (locator) );
7171 }
7272 m_synced = m_best_block_index.load () == active_chain.Tip ();
7373 if (!m_synced) {
@@ -134,7 +134,7 @@ void BaseIndex::ThreadSync()
134134 int64_t last_locator_write_time = 0 ;
135135 while (true ) {
136136 if (m_interrupt) {
137- m_best_block_index = pindex;
137+ SetBestBlockIndex ( pindex) ;
138138 // No need to handle errors in Commit. If it fails, the error will be already be
139139 // logged. The best way to recover is to continue, as index cannot be corrupted by
140140 // a missed commit to disk for an advanced index state.
@@ -146,7 +146,7 @@ void BaseIndex::ThreadSync()
146146 LOCK (cs_main);
147147 const CBlockIndex* pindex_next = NextSyncBlock (pindex, m_chainstate->m_chain );
148148 if (!pindex_next) {
149- m_best_block_index = pindex;
149+ SetBestBlockIndex ( pindex) ;
150150 m_synced = true ;
151151 // No need to handle errors in Commit. See rationale above.
152152 Commit ();
@@ -168,7 +168,7 @@ void BaseIndex::ThreadSync()
168168 }
169169
170170 if (last_locator_write_time + SYNC_LOCATOR_WRITE_INTERVAL < current_time) {
171- m_best_block_index = pindex;
171+ SetBestBlockIndex ( pindex) ;
172172 last_locator_write_time = current_time;
173173 // No need to handle errors in Commit. See rationale above.
174174 Commit ();
@@ -226,10 +226,10 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti
226226 // out of sync may be possible but a users fault.
227227 // In case we reorg beyond the pruned depth, ReadBlockFromDisk would
228228 // throw and lead to a graceful shutdown
229- m_best_block_index = new_tip;
229+ SetBestBlockIndex ( new_tip) ;
230230 if (!Commit ()) {
231231 // If commit fails, revert the best block index to avoid corruption.
232- m_best_block_index = current_tip;
232+ SetBestBlockIndex ( current_tip) ;
233233 return false ;
234234 }
235235
@@ -270,7 +270,7 @@ void BaseIndex::BlockConnected(const std::shared_ptr<const CBlock>& block, const
270270 }
271271
272272 if (WriteBlock (*block, pindex)) {
273- m_best_block_index = pindex;
273+ SetBestBlockIndex ( pindex) ;
274274 } else {
275275 FatalError (" %s: Failed to write block %s to index" ,
276276 __func__, pindex->GetBlockHash ().ToString ());
@@ -377,3 +377,14 @@ IndexSummary BaseIndex::GetSummary() const
377377 summary.best_block_height = m_best_block_index ? m_best_block_index.load ()->nHeight : 0 ;
378378 return summary;
379379}
380+
381+ void BaseIndex::SetBestBlockIndex (const CBlockIndex* block) {
382+ assert (!node::fPruneMode || AllowPrune ());
383+
384+ m_best_block_index = block;
385+ if (AllowPrune () && block) {
386+ node::PruneLockInfo prune_lock;
387+ prune_lock.height_first = block->nHeight ;
388+ WITH_LOCK (::cs_main, m_chainstate->m_blockman .UpdatePruneLock (GetName (), prune_lock));
389+ }
390+ }
0 commit comments