Skip to content

Commit a99e996

Browse files
committed
Adjust testWaitForPendingSeqNo (#39404)
Since #39006, we should either remove `testWaitForPendingSeqNo` or adjust it not to wait for the pending operations. This change picks the latter. Relates #39006
1 parent 11fa219 commit a99e996

File tree

1 file changed

+2
-18
lines changed

1 file changed

+2
-18
lines changed

server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java

Lines changed: 2 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,13 @@
7575
import java.util.stream.Collectors;
7676

7777
import static org.hamcrest.Matchers.anyOf;
78-
import static org.hamcrest.Matchers.both;
7978
import static org.hamcrest.Matchers.empty;
8079
import static org.hamcrest.Matchers.equalTo;
8180
import static org.hamcrest.Matchers.everyItem;
8281
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
8382
import static org.hamcrest.Matchers.is;
8483
import static org.hamcrest.Matchers.isIn;
8584
import static org.hamcrest.Matchers.lessThan;
86-
import static org.hamcrest.Matchers.lessThanOrEqualTo;
8785
import static org.hamcrest.Matchers.not;
8886

8987
public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestCase {
@@ -441,17 +439,7 @@ public void testResyncAfterPrimaryPromotion() throws Exception {
441439
}
442440
}
443441

444-
@TestLogging(
445-
"_root:DEBUG,"
446-
+ "org.elasticsearch.action.bulk:TRACE,"
447-
+ "org.elasticsearch.action.get:TRACE,"
448-
+ "org.elasticsearch.cluster.service:TRACE,"
449-
+ "org.elasticsearch.discovery:TRACE,"
450-
+ "org.elasticsearch.indices.cluster:TRACE,"
451-
+ "org.elasticsearch.indices.recovery:TRACE,"
452-
+ "org.elasticsearch.index.seqno:TRACE,"
453-
+ "org.elasticsearch.index.shard:TRACE")
454-
public void testWaitForPendingSeqNo() throws Exception {
442+
public void testDoNotWaitForPendingSeqNo() throws Exception {
455443
IndexMetaData metaData = buildIndexMetaData(1);
456444

457445
final int pendingDocs = randomIntBetween(1, 5);
@@ -525,7 +513,7 @@ public void indexTranslogOperations(
525513
final int indexedDuringRecovery = shards.indexDocs(randomInt(5));
526514
docs += indexedDuringRecovery;
527515

528-
assertFalse("recovery should wait on pending docs", opsSent.get());
516+
assertBusy(() -> assertFalse("recovery should not wait for on pending docs", opsSent.get()));
529517

530518
primaryEngineFactory.releaseLatchedIndexers();
531519
pendingDocsDone.await();
@@ -534,10 +522,6 @@ public void indexTranslogOperations(
534522
recoveryFuture.get();
535523

536524
assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty());
537-
assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(),
538-
// we don't know which of the inflight operations made it into the translog range we re-play
539-
both(greaterThanOrEqualTo(docs-indexedDuringRecovery)).and(lessThanOrEqualTo(docs)));
540-
541525
shards.assertAllEqual(docs);
542526
} finally {
543527
primaryEngineFactory.close();

0 commit comments

Comments
 (0)