Skip to content

Commit eae83fd

Browse files
authored
Adjust testWaitForPendingSeqNo (#39404)
Since #39006, we should either remove `testWaitForPendingSeqNo` or adjust it not to wait for the pending operations. This change picks the latter. Relates #39006
1 parent c7769fd commit eae83fd

File tree

1 file changed

+2
-18
lines changed

1 file changed

+2
-18
lines changed

server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java

Lines changed: 2 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,13 @@
7575
import java.util.stream.Collectors;
7676

7777
import static org.hamcrest.Matchers.anyOf;
78-
import static org.hamcrest.Matchers.both;
7978
import static org.hamcrest.Matchers.empty;
8079
import static org.hamcrest.Matchers.equalTo;
8180
import static org.hamcrest.Matchers.everyItem;
8281
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
8382
import static org.hamcrest.Matchers.is;
8483
import static org.hamcrest.Matchers.isIn;
8584
import static org.hamcrest.Matchers.lessThan;
86-
import static org.hamcrest.Matchers.lessThanOrEqualTo;
8785
import static org.hamcrest.Matchers.not;
8886

8987
public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestCase {
@@ -440,17 +438,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception {
440438
}
441439
}
442440

443-
@TestLogging(
444-
"_root:DEBUG,"
445-
+ "org.elasticsearch.action.bulk:TRACE,"
446-
+ "org.elasticsearch.action.get:TRACE,"
447-
+ "org.elasticsearch.cluster.service:TRACE,"
448-
+ "org.elasticsearch.discovery:TRACE,"
449-
+ "org.elasticsearch.indices.cluster:TRACE,"
450-
+ "org.elasticsearch.indices.recovery:TRACE,"
451-
+ "org.elasticsearch.index.seqno:TRACE,"
452-
+ "org.elasticsearch.index.shard:TRACE")
453-
public void testWaitForPendingSeqNo() throws Exception {
441+
public void testDoNotWaitForPendingSeqNo() throws Exception {
454442
IndexMetaData metaData = buildIndexMetaData(1);
455443

456444
final int pendingDocs = randomIntBetween(1, 5);
@@ -524,7 +512,7 @@ public void indexTranslogOperations(
524512
final int indexedDuringRecovery = shards.indexDocs(randomInt(5));
525513
docs += indexedDuringRecovery;
526514

527-
assertFalse("recovery should wait on pending docs", opsSent.get());
515+
assertBusy(() -> assertFalse("recovery should not wait for on pending docs", opsSent.get()));
528516

529517
primaryEngineFactory.releaseLatchedIndexers();
530518
pendingDocsDone.await();
@@ -533,10 +521,6 @@ public void indexTranslogOperations(
533521
recoveryFuture.get();
534522

535523
assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty());
536-
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(),
537-
// we don't know which of the inflight operations made it into the translog range we re-play
538-
both(greaterThanOrEqualTo(docs-indexedDuringRecovery)).and(lessThanOrEqualTo(docs)));
539-
540524
shards.assertAllEqual(docs);
541525
} finally {
542526
primaryEngineFactory.close();

0 commit comments

Comments
 (0)