Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OAK-11220 : removed usage of Sets.newConcurrentHashSet #1846

Merged
merged 2 commits into from
Nov 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -230,12 +230,32 @@ public static <K> Set<K> newHashSet(final int capacity) {
*
* @see CollectionUtils#newHashMap(int)
* @see CollectionUtils#newLinkedHashSet(int)
* @see CollectionUtils#newConcurrentHashSet(Iterable) (int)
*/
@NotNull
public static <K> Set<K> newConcurrentHashSet() {
return ConcurrentHashMap.newKeySet();
}

/**
* Creates a new {@link Set} with given values which is backed by {@link ConcurrentHashMap} to allow concurrent access.
* Returning Set doesn't allow null keys and values.
*
* @return a new, empty {@link Set} which is backed by {@link ConcurrentHashMap}.
* @throws NullPointerException if any element of the iterable is null
*
* @see CollectionUtils#newHashMap(int)
* @see CollectionUtils#newLinkedHashSet(int)
* @see CollectionUtils#newConcurrentHashSet()
*/
@NotNull
public static <K> Set<K> newConcurrentHashSet(@NotNull Iterable<? extends K> elements) {
Objects.requireNonNull(elements);
final Set<K> set = newConcurrentHashSet();
elements.forEach(set::add);
return set;
}

/**
* Creates a new, empty LinkedHashSet with expected capacity.
* <p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
* Utilities for Java collections and streams.
*/
@Internal(since = "1.0.0")
@Version("1.1.0")
@Version("1.2.0")
package org.apache.jackrabbit.oak.commons.collections;
import org.apache.jackrabbit.oak.commons.annotations.Internal;
import org.osgi.annotation.versioning.Version;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,26 @@ public void concurrentHashSet() {
Assert.assertEquals(s, concurrentHashSet);
}

@Test
public void concurrentHashSetWithIterable() {
// create a set of non-null values
final Iterable<String> elements = data.stream().filter(Objects::nonNull).collect(Collectors.toSet());

Set<String> concurrentHashSet = CollectionUtils.newConcurrentHashSet(elements);

Assert.assertEquals(elements, concurrentHashSet);
}

@Test(expected = NullPointerException.class)
public void concurrentHashSetWithIterableWithNulls() {
// create a set of null values
final Iterable<String> elements = new HashSet<>(data);

CollectionUtils.newConcurrentHashSet(elements);

fail("Should throw NullPointerException");
}

@Test
public void toArrayDequeWithNonEmptyIterable() {
List<String> list = Arrays.asList("one", "two", "three");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,9 @@
import javax.management.openmbean.TabularDataSupport;
import javax.management.openmbean.TabularType;

import org.apache.jackrabbit.guava.common.collect.Sets;
import org.apache.jackrabbit.guava.common.util.concurrent.Monitor;
import org.apache.commons.io.FileUtils;
import org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
import org.apache.jackrabbit.oak.commons.conditions.Validate;
import org.apache.jackrabbit.oak.plugins.index.lucene.directory.CopyOnReadDirectory;
import org.apache.jackrabbit.oak.plugins.index.lucene.directory.CopyOnWriteDirectory;
Expand Down Expand Up @@ -104,7 +104,7 @@ public class IndexCopier implements CopyOnReadStatsMBean, Closeable {
private final boolean prefetchEnabled;
private volatile boolean closed;
private final IndexRootDirectory indexRootDirectory;
private final Set<String> validatedIndexPaths = Sets.newConcurrentHashSet();
private final Set<String> validatedIndexPaths = CollectionUtils.newConcurrentHashSet();
private final IndexSanityChecker.IndexSanityStatistics indexSanityStatistics = new IndexSanityChecker.IndexSanityStatistics();

public IndexCopier(Executor executor, File indexRootDir) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import java.util.Set;
import java.util.TreeSet;

import org.apache.jackrabbit.guava.common.collect.Sets;
import org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexDefinition;
import org.apache.jackrabbit.oak.plugins.index.lucene.directory.ActiveDeletedBlobCollectorFactory.BlobDeletionCallback;
import org.apache.jackrabbit.oak.spi.blob.BlobStore;
Expand Down Expand Up @@ -93,7 +93,7 @@ static void reReadCommandLineParam() {

private final OakDirectory base;

private final Set<String> bufferedForDelete = Sets.newConcurrentHashSet();
private final Set<String> bufferedForDelete = CollectionUtils.newConcurrentHashSet();

private NodeBuilder bufferedBuilder = EMPTY_NODE.builder();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@
import java.util.concurrent.atomic.AtomicReference;

import org.apache.jackrabbit.guava.common.collect.Iterables;
import org.apache.jackrabbit.guava.common.collect.Sets;
import org.apache.commons.io.FileUtils;
import org.apache.jackrabbit.oak.commons.IOUtils;
import org.apache.jackrabbit.oak.commons.PerfLogger;
import org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
import org.apache.jackrabbit.oak.commons.concurrent.NotifyingFutureTask;
import org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopier;
import org.apache.lucene.store.Directory;
Expand Down Expand Up @@ -72,8 +72,8 @@ public Void call() throws Exception {
private final Directory local;
private final Executor executor;
private final ConcurrentMap<String, COWFileReference> fileMap = new ConcurrentHashMap<>();
private final Set<String> deletedFilesLocal = Sets.newConcurrentHashSet();
private final Set<String> skippedFiles = Sets.newConcurrentHashSet();
private final Set<String> deletedFilesLocal = CollectionUtils.newConcurrentHashSet();
private final Set<String> skippedFiles = CollectionUtils.newConcurrentHashSet();

private final BlockingQueue<Callable<Void>> queue = new LinkedBlockingQueue<Callable<Void>>();
private final AtomicReference<Throwable> errorInCopy = new AtomicReference<Throwable>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@
import java.util.Set;

import org.apache.jackrabbit.guava.common.collect.ImmutableSet;
import org.apache.jackrabbit.guava.common.collect.Sets;
import org.apache.jackrabbit.oak.api.Blob;
import org.apache.jackrabbit.oak.api.PropertyState;
import org.apache.jackrabbit.oak.api.Type;
import org.apache.jackrabbit.oak.commons.PerfLogger;
import org.apache.jackrabbit.oak.commons.StringUtils;
import org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
import org.apache.jackrabbit.oak.plugins.blob.datastore.InMemoryDataRecord;
import org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexDefinition;
import org.apache.jackrabbit.oak.plugins.index.lucene.directory.ActiveDeletedBlobCollectorFactory.BlobDeletionCallback;
Expand Down Expand Up @@ -80,7 +80,7 @@ public class OakDirectory extends Directory {
private LockFactory lockFactory;
private final boolean readOnly;
private final boolean streamingWriteEnabled;
private final Set<String> fileNames = Sets.newConcurrentHashSet();
private final Set<String> fileNames = CollectionUtils.newConcurrentHashSet();
private final Set<String> fileNamesAtStart;
private final String indexName;
private final BlobFactory blobFactory;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@

package org.apache.jackrabbit.oak.segment;

import static org.apache.jackrabbit.guava.common.collect.Sets.newConcurrentHashSet;
import static org.apache.jackrabbit.oak.stats.StatsOptions.METRICS_ONLY;

import java.lang.ref.ReferenceQueue;
import java.lang.ref.WeakReference;
import java.util.Set;

import org.apache.jackrabbit.oak.commons.Buffer;
import org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
import org.apache.jackrabbit.oak.stats.CounterStats;
import org.apache.jackrabbit.oak.stats.StatisticsProvider;
import org.jetbrains.annotations.NotNull;
Expand Down Expand Up @@ -69,7 +69,7 @@ public class SegmentBufferMonitor {
public static final String HEAP_BUFFER_CAPACITY = "oak.segment.heap-buffer-capacity";

@NotNull
private final Set<BufferReference> buffers = newConcurrentHashSet();
private final Set<BufferReference> buffers = CollectionUtils.newConcurrentHashSet();

@NotNull
private final ReferenceQueue<Buffer> referenceQueue = new ReferenceQueue<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@
package org.apache.jackrabbit.oak.segment.spi.monitor;

import static java.util.Objects.requireNonNull;
import static org.apache.jackrabbit.guava.common.collect.Sets.newConcurrentHashSet;
import static java.util.Collections.emptySet;

import java.io.File;
import java.util.Set;

import org.apache.jackrabbit.oak.commons.collections.CollectionUtils;
import org.apache.jackrabbit.oak.spi.whiteboard.Registration;
import org.jetbrains.annotations.NotNull;

Expand All @@ -40,7 +40,7 @@ public class CompositeIOMonitor implements IOMonitor {
* @param ioMonitors {@link IOMonitor} instances to delegate to
*/
public CompositeIOMonitor(@NotNull Iterable<? extends IOMonitor> ioMonitors) {
this.ioMonitors = newConcurrentHashSet(requireNonNull(ioMonitors));
this.ioMonitors = CollectionUtils.newConcurrentHashSet(requireNonNull(ioMonitors));
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,6 @@
import org.apache.jackrabbit.guava.common.collect.ImmutableMap;
import org.apache.jackrabbit.guava.common.collect.Iterables;
import org.apache.jackrabbit.guava.common.collect.Maps;
import org.apache.jackrabbit.guava.common.collect.Sets;

/**
* Implementation of a NodeStore on {@link DocumentStore}.
Expand Down Expand Up @@ -552,7 +551,7 @@ public String serialize(Blob blob) {
* reverts changes done by commits in the set that are older than the
* current head revision.
*/
private final Set<Revision> inDoubtTrunkCommits = Sets.newConcurrentHashSet();
private final Set<Revision> inDoubtTrunkCommits = CollectionUtils.newConcurrentHashSet();

/**
* Contains journal entry revisions (branch commit style) that were created
Expand All @@ -561,7 +560,7 @@ public String serialize(Blob blob) {
* upon each backgroundWrite. It is used to avoid duplicate journal entries
* that would otherwise be created as a result of merge (normal plus exclusive) retries
*/
private final Set<String> pendingRollbackInvalidations = Sets.newConcurrentHashSet();
private final Set<String> pendingRollbackInvalidations = CollectionUtils.newConcurrentHashSet();

private final Predicate<Path> nodeCachePredicate;

Expand Down
Loading