Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -258,10 +258,21 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException {
assert assertDocSoftDeleted(leaf.reader(), segmentDocID) : "Delete op but soft_deletes field is not set [" + op + "]";
} else {
final BytesReference source = fields.source();
if (source == null) {
// TODO: Callers should ask for the range that source should be retained. Thus we should always
// check for the existence source once we make peer-recovery to send ops after the local checkpoint.
if (requiredFullRange) {
throw new IllegalStateException("source not found for seqno=" + seqNo +
" from_seqno=" + fromSeqNo + " to_seqno=" + toSeqNo);
} else {
skippedOperations++;
return null;
}
}
// TODO: pass the latest timestamp from engine.
final long autoGeneratedIdTimestamp = -1;
op = new Translog.Index(type, id, seqNo, primaryTerm, version,
source == null ? null : source.toBytesRef().bytes, fields.routing(), autoGeneratedIdTimestamp);
source.toBytesRef().bytes, fields.routing(), autoGeneratedIdTimestamp);
}
}
assert fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo && lastSeenSeqNo < op.seqNo() : "Unexpected operation; " +
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1061,7 +1061,7 @@ public Index(String type, String id, long seqNo, long primaryTerm, long version,
byte[] source, String routing, long autoGeneratedIdTimestamp) {
this.type = type;
this.id = id;
this.source = source == null ? null : new BytesArray(source);
this.source = new BytesArray(source);
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.version = version;
Expand Down Expand Up @@ -1111,7 +1111,7 @@ public long version() {

@Override
public Source getSource() {
return source == null ? null : new Source(source, routing);
return new Source(source, routing);
}

private void write(final StreamOutput out) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1418,26 +1418,36 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc
final MapperService mapperService = createMapperService("test");
final boolean omitSourceAllTheTime = randomBoolean();
final Set<String> liveDocs = new HashSet<>();
final Set<String> liveDocsWithSource = new HashSet<>();
try (Store store = createStore();
InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null,
globalCheckpoint::get))) {
int numDocs = scaledRandomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, randomBoolean()
|| omitSourceAllTheTime);
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource);
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
if (useRecoverySource == false) {
liveDocsWithSource.add(Integer.toString(i));
}
}
for (int i = 0; i < numDocs; i++) {
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, randomBoolean()
|| omitSourceAllTheTime);
boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime;
ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource);
if (randomBoolean()) {
engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get()));
liveDocs.remove(doc.id());
liveDocsWithSource.remove(doc.id());
}
if (randomBoolean()) {
engine.index(indexForDoc(doc));
liveDocs.add(doc.id());
if (useRecoverySource == false) {
liveDocsWithSource.add(doc.id());
} else {
liveDocsWithSource.remove(doc.id());
}
}
if (randomBoolean()) {
engine.flush(randomBoolean(), true);
Expand All @@ -1453,12 +1463,7 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc
minSeqNoToRetain = Math.min(globalCheckpoint.get() + 1 - retainedExtraOps, safeCommitLocalCheckpoint + 1);
}
engine.forceMerge(true, 1, false, false, false);
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService, (luceneOp, translogOp) -> {
if (luceneOp.seqNo() >= minSeqNoToRetain) {
assertNotNull(luceneOp.getSource());
assertThat(luceneOp.getSource().source, equalTo(translogOp.getSource().source));
}
});
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
Map<Long, Translog.Operation> ops = readAllOperationsInLucene(engine, mapperService)
.stream().collect(Collectors.toMap(Translog.Operation::seqNo, Function.identity()));
for (long seqno = 0; seqno <= engine.getLocalCheckpoint(); seqno++) {
Expand All @@ -1483,10 +1488,8 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc
globalCheckpoint.set(engine.getLocalCheckpoint());
engine.syncTranslog();
engine.forceMerge(true, 1, false, false, false);
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService, (luceneOp, translogOp) -> {
assertEquals(translogOp.getSource().source, B_1);
});
assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocs.size()));
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapperService);
assertThat(readAllOperationsInLucene(engine, mapperService), hasSize(liveDocsWithSource.size()));
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,6 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.LongSupplier;
Expand Down Expand Up @@ -822,14 +821,6 @@ public static List<Translog.Operation> readAllOperationsInLucene(Engine engine,
* Asserts the provided engine has a consistent document history between translog and Lucene index.
*/
public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException {
assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine, mapper, (luceneOp, translogOp) ->
assertThat(luceneOp.getSource().source, equalTo(translogOp.getSource().source)));
}

public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper,
BiConsumer<Translog.Operation, Translog.Operation> assertSource)
throws IOException {

if (mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false) {
return;
}
Expand Down Expand Up @@ -867,7 +858,7 @@ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine e
assertThat(luceneOp.toString(), luceneOp.primaryTerm(), equalTo(translogOp.primaryTerm()));
assertThat(luceneOp.opType(), equalTo(translogOp.opType()));
if (luceneOp.opType() == Translog.Operation.Type.INDEX) {
assertSource.accept(luceneOp, translogOp);
assertThat(luceneOp.getSource().source, equalTo(translogOp.getSource().source));
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,10 +283,6 @@ static Translog.Operation[] getOperations(IndexShard indexShard, long globalChec
try (Translog.Snapshot snapshot = indexShard.newLuceneChangesSnapshot("ccr", fromSeqNo, toSeqNo, true)) {
Translog.Operation op;
while ((op = snapshot.next()) != null) {
if (op.getSource() == null) {
throw new IllegalStateException("source not found for operation: " + op + " fromSeqNo: " + fromSeqNo +
" maxOperationCount: " + maxOperationCount);
}
operations.add(op);
seenBytes += op.estimateSize();
if (seenBytes > maxOperationSizeInBytes) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@

import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
Expand All @@ -30,6 +33,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
Expand All @@ -50,12 +54,22 @@ public void testSimpleCcrReplication() throws Exception {
shardFollowTask.start(leaderGroup.getPrimary().getGlobalCheckpoint(), followerGroup.getPrimary().getGlobalCheckpoint());
docCount += leaderGroup.appendDocs(randomInt(128));
leaderGroup.syncGlobalCheckpoint();

leaderGroup.assertAllEqual(docCount);
int expectedCount = docCount;
Set<String> indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary());
assertBusy(() -> {
assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint()));
followerGroup.assertAllEqual(indexedDocIds.size());
});
// Deletes should be replicated to the follower
List<String> deleteDocIds = randomSubsetOf(indexedDocIds);
for (String deleteId : deleteDocIds) {
BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId));
assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED));
}
leaderGroup.syncGlobalCheckpoint();
assertBusy(() -> {
assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint()));
followerGroup.assertAllEqual(expectedCount);
followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size());
});
shardFollowTask.markAsCompleted();
}
Expand Down