Skip to content

Remove usages of Set<CharSequence> #35501

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 16, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -514,11 +514,11 @@ private void parse(ParseContext parseContext, Token token,
XContentParser parser, Map<String, CompletionInputMetaData> inputMap) throws IOException {
String currentFieldName = null;
if (token == Token.VALUE_STRING) {
inputMap.put(parser.text(), new CompletionInputMetaData(parser.text(), Collections.emptyMap(), 1));
inputMap.put(parser.text(), new CompletionInputMetaData(parser.text(), Collections.<String, Set<String>>emptyMap(), 1));
} else if (token == Token.START_OBJECT) {
Set<String> inputs = new HashSet<>();
int weight = 1;
Map<String, Set<CharSequence>> contextsMap = new HashMap<>();
Map<String, Set<String>> contextsMap = new HashMap<>();
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
Expand Down Expand Up @@ -603,10 +603,10 @@ private void parse(ParseContext parseContext, Token token,

static class CompletionInputMetaData {
public final String input;
public final Map<String, Set<CharSequence>> contexts;
public final Map<String, Set<String>> contexts;
public final int weight;

CompletionInputMetaData(String input, Map<String, Set<CharSequence>> contexts, int weight) {
CompletionInputMetaData(String input, Map<String, Set<String>> contexts, int weight) {
this.input = input;
this.contexts = contexts;
this.weight = weight;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ protected Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Sugges
int numResult = 0;
for (TopSuggestDocs.SuggestScoreDoc suggestDoc : collector.get().scoreLookupDocs()) {
// collect contexts
Map<String, Set<CharSequence>> contexts = Collections.emptyMap();
Map<String, Set<String>> contexts = Collections.emptyMap();
if (fieldType.hasContextMappings()) {
List<CharSequence> rawContexts = collector.getContexts(suggestDoc.doc);
if (rawContexts.size() > 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,13 +280,13 @@ public static Entry fromXContent(XContentParser parser) {
}

public static class Option extends Suggest.Suggestion.Entry.Option {
private Map<String, Set<CharSequence>> contexts = Collections.emptyMap();
private Map<String, Set<String>> contexts = Collections.emptyMap();
private ScoreDoc doc;
private SearchHit hit;

public static final ParseField CONTEXTS = new ParseField("contexts");

public Option(int docID, Text text, float score, Map<String, Set<CharSequence>> contexts) {
public Option(int docID, Text text, float score, Map<String, Set<String>> contexts) {
super(text, score);
this.doc = new ScoreDoc(docID, score);
this.contexts = Objects.requireNonNull(contexts, "context map cannot be null");
Expand All @@ -307,7 +307,7 @@ public Option(StreamInput in) throws IOException {
for (int i = 0; i < contextSize; i++) {
String contextName = in.readString();
int nContexts = in.readVInt();
Set<CharSequence> contexts = new HashSet<>(nContexts);
Set<String> contexts = new HashSet<>(nContexts);
for (int j = 0; j < nContexts; j++) {
contexts.add(in.readString());
}
Expand All @@ -322,7 +322,7 @@ protected void mergeInto(Suggest.Suggestion.Entry.Option otherOption) {
throw new UnsupportedOperationException();
}

public Map<String, Set<CharSequence>> getContexts() {
public Map<String, Set<String>> getContexts() {
return contexts;
}

Expand Down Expand Up @@ -352,7 +352,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
if (contexts.size() > 0) {
builder.startObject(CONTEXTS.getPreferredName());
for (Map.Entry<String, Set<CharSequence>> entry : contexts.entrySet()) {
for (Map.Entry<String, Set<String>> entry : contexts.entrySet()) {
builder.startArray(entry.getKey());
for (CharSequence context : entry.getValue()) {
builder.value(context.toString());
Expand All @@ -377,13 +377,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
(p,c) -> parseContexts(p), CompletionSuggestion.Entry.Option.CONTEXTS);
}

private static Map<String, Set<CharSequence>> parseContexts(XContentParser parser) throws IOException {
Map<String, Set<CharSequence>> contexts = new HashMap<>();
private static Map<String, Set<String>> parseContexts(XContentParser parser) throws IOException {
Map<String, Set<String>> contexts = new HashMap<>();
while((parser.nextToken()) != XContentParser.Token.END_OBJECT) {
ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation);
String key = parser.currentName();
ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.nextToken(), parser::getTokenLocation);
Set<CharSequence> values = new HashSet<>();
Set<String> values = new HashSet<>();
while((parser.nextToken()) != XContentParser.Token.END_ARRAY) {
ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.currentToken(), parser::getTokenLocation);
values.add(parser.text());
Expand All @@ -399,7 +399,7 @@ public static Option fromXContent(XContentParser parser) {
Text text = new Text((String) values.get(Suggestion.Entry.Option.TEXT.getPreferredName()));
Float score = (Float) values.get(Suggestion.Entry.Option.SCORE.getPreferredName());
@SuppressWarnings("unchecked")
Map<String, Set<CharSequence>> contexts = (Map<String, Set<CharSequence>>) values
Map<String, Set<String>> contexts = (Map<String, Set<String>>) values
.get(CompletionSuggestion.Entry.Option.CONTEXTS.getPreferredName());
if (contexts == null) {
contexts = Collections.emptyMap();
Expand Down Expand Up @@ -427,7 +427,7 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(false);
}
out.writeInt(contexts.size());
for (Map.Entry<String, Set<CharSequence>> entry : contexts.entrySet()) {
for (Map.Entry<String, Set<String>> entry : contexts.entrySet()) {
out.writeString(entry.getKey());
out.writeVInt(entry.getValue().size());
for (CharSequence ctx : entry.getValue()) {
Expand All @@ -444,7 +444,7 @@ public String toString() {
stringBuilder.append(" score:");
stringBuilder.append(getScore());
stringBuilder.append(" context:[");
for (Map.Entry<String, Set<CharSequence>> entry: contexts.entrySet()) {
for (Map.Entry<String, Set<String>> entry: contexts.entrySet()) {
stringBuilder.append(" ");
stringBuilder.append(entry.getKey());
stringBuilder.append(":");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,9 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params
* </ul>
*/
@Override
public Set<CharSequence> parseContext(ParseContext parseContext, XContentParser parser)
public Set<String> parseContext(ParseContext parseContext, XContentParser parser)
throws IOException, ElasticsearchParseException {
final Set<CharSequence> contexts = new HashSet<>();
final Set<String> contexts = new HashSet<>();
Token token = parser.currentToken();
if (token == Token.VALUE_STRING || token == Token.VALUE_NUMBER || token == Token.VALUE_BOOLEAN) {
contexts.add(parser.text());
Expand All @@ -134,8 +134,8 @@ public Set<CharSequence> parseContext(ParseContext parseContext, XContentParser
}

@Override
public Set<CharSequence> parseContext(Document document) {
Set<CharSequence> values = null;
public Set<String> parseContext(Document document) {
Set<String> values = null;
if (fieldName != null) {
IndexableField[] fields = document.getFields(fieldName);
values = new HashSet<>(fields.length);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,12 +95,12 @@ public String name() {
/**
* Parses a set of index-time contexts.
*/
public abstract Set<CharSequence> parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException;
public abstract Set<String> parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException;

/**
* Retrieves a set of context from a <code>document</code> at index-time.
*/
protected abstract Set<CharSequence> parseContext(ParseContext.Document document);
protected abstract Set<String> parseContext(ParseContext.Document document);

/**
* Prototype for the query context
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.apache.lucene.search.suggest.document.CompletionQuery;
import org.apache.lucene.search.suggest.document.ContextQuery;
import org.apache.lucene.search.suggest.document.ContextSuggestField;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
Expand Down Expand Up @@ -94,7 +95,7 @@ public ContextMapping<?> get(String name) {
* Adds a context-enabled field for all the defined mappings to <code>document</code>
* see {@link org.elasticsearch.search.suggest.completion.context.ContextMappings.TypedContextField}
*/
public void addField(ParseContext.Document document, String name, String input, int weight, Map<String, Set<CharSequence>> contexts) {
public void addField(ParseContext.Document document, String name, String input, int weight, Map<String, Set<String>> contexts) {
document.add(new TypedContextField(name, input, weight, contexts, document));
}

Expand All @@ -121,10 +122,10 @@ public Iterator<ContextMapping<?>> iterator() {
* at index time
*/
private class TypedContextField extends ContextSuggestField {
private final Map<String, Set<CharSequence>> contexts;
private final Map<String, Set<String>> contexts;
private final ParseContext.Document document;

TypedContextField(String name, String value, int weight, Map<String, Set<CharSequence>> contexts,
TypedContextField(String name, String value, int weight, Map<String, Set<String>> contexts,
ParseContext.Document document) {
super(name, value, weight);
this.contexts = contexts;
Expand All @@ -133,18 +134,18 @@ private class TypedContextField extends ContextSuggestField {

@Override
protected Iterable<CharSequence> contexts() {
Set<CharSequence> typedContexts = new HashSet<>();
Set<CharsRef> typedContexts = new HashSet<>();
final CharsRefBuilder scratch = new CharsRefBuilder();
scratch.grow(1);
for (int typeId = 0; typeId < contextMappings.size(); typeId++) {
scratch.setCharAt(0, (char) typeId);
scratch.setLength(1);
ContextMapping<?> mapping = contextMappings.get(typeId);
Set<CharSequence> contexts = new HashSet<>(mapping.parseContext(document));
Set<String> contexts = new HashSet<>(mapping.parseContext(document));
if (this.contexts.get(mapping.name()) != null) {
contexts.addAll(this.contexts.get(mapping.name()));
}
for (CharSequence context : contexts) {
for (String context : contexts) {
scratch.append(context);
typedContexts.add(scratch.toCharsRef());
scratch.setLength(1);
Expand All @@ -153,7 +154,7 @@ protected Iterable<CharSequence> contexts() {
if (typedContexts.isEmpty()) {
throw new IllegalArgumentException("Contexts are mandatory in context enabled completion field [" + name + "]");
}
return typedContexts;
return new ArrayList<CharSequence>(typedContexts);
}
}

Expand Down Expand Up @@ -198,18 +199,18 @@ public ContextQuery toContextQuery(CompletionQuery query, Map<String, List<Conte
* @return a map of context names and their values
*
*/
public Map<String, Set<CharSequence>> getNamedContexts(List<CharSequence> contexts) {
Map<String, Set<CharSequence>> contextMap = new HashMap<>(contexts.size());
public Map<String, Set<String>> getNamedContexts(List<CharSequence> contexts) {
Map<String, Set<String>> contextMap = new HashMap<>(contexts.size());
for (CharSequence typedContext : contexts) {
int typeId = typedContext.charAt(0);
assert typeId < contextMappings.size() : "Returned context has invalid type";
ContextMapping<?> mapping = contextMappings.get(typeId);
Set<CharSequence> contextEntries = contextMap.get(mapping.name());
Set<String> contextEntries = contextMap.get(mapping.name());
if (contextEntries == null) {
contextEntries = new HashSet<>();
contextMap.put(mapping.name(), contextEntries);
}
contextEntries.add(typedContext.subSequence(1, typedContext.length()));
contextEntries.add(typedContext.subSequence(1, typedContext.length()).toString());
}
return contextMap;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,14 +144,14 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params
* see {@code GeoPoint(String)} for GEO POINT
*/
@Override
public Set<CharSequence> parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException {
public Set<String> parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException {
if (fieldName != null) {
MappedFieldType fieldType = parseContext.mapperService().fullName(fieldName);
if (!(fieldType instanceof GeoPointFieldMapper.GeoPointFieldType)) {
throw new ElasticsearchParseException("referenced field must be mapped to geo_point");
}
}
final Set<CharSequence> contexts = new HashSet<>();
final Set<String> contexts = new HashSet<>();
Token token = parser.currentToken();
if (token == Token.START_ARRAY) {
token = parser.nextToken();
Expand All @@ -178,7 +178,7 @@ public Set<CharSequence> parseContext(ParseContext parseContext, XContentParser
} else if (token == Token.VALUE_STRING) {
final String geoHash = parser.text();
final CharSequence truncatedGeoHash = geoHash.subSequence(0, Math.min(geoHash.length(), precision));
contexts.add(truncatedGeoHash);
contexts.add(truncatedGeoHash.toString());
} else {
// or a single location
GeoPoint point = GeoUtils.parseGeoPoint(parser);
Expand All @@ -188,8 +188,8 @@ public Set<CharSequence> parseContext(ParseContext parseContext, XContentParser
}

@Override
public Set<CharSequence> parseContext(Document document) {
final Set<CharSequence> geohashes = new HashSet<>();
public Set<String> parseContext(Document document) {
final Set<String> geohashes = new HashSet<>();

if (fieldName != null) {
IndexableField[] fields = document.getFields(fieldName);
Expand Down Expand Up @@ -222,10 +222,10 @@ public Set<CharSequence> parseContext(Document document) {
}
}

Set<CharSequence> locations = new HashSet<>();
for (CharSequence geohash : geohashes) {
Set<String> locations = new HashSet<>();
for (String geohash : geohashes) {
int precision = Math.min(this.precision, geohash.length());
CharSequence truncatedGeohash = geohash.subSequence(0, precision);
String truncatedGeohash = geohash.substring(0, precision);
locations.add(truncatedGeohash);
}
return locations;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ public static Option createTestItem() {
Text text = new Text(randomAlphaOfLengthBetween(5, 15));
int docId = randomInt();
int numberOfContexts = randomIntBetween(0, 3);
Map<String, Set<CharSequence>> contexts = new HashMap<>();
Map<String, Set<String>> contexts = new HashMap<>();
for (int i = 0; i < numberOfContexts; i++) {
int numberOfValues = randomIntBetween(0, 3);
Set<CharSequence> values = new HashSet<>();
Set<String> values = new HashSet<>();
for (int v = 0; v < numberOfValues; v++) {
values.add(randomAlphaOfLengthBetween(5, 15));
}
Expand Down Expand Up @@ -106,7 +106,7 @@ private void doTestFromXContent(boolean addRandomFields) throws IOException {
}

public void testToXContent() throws IOException {
Map<String, Set<CharSequence>> contexts = Collections.singletonMap("key", Collections.singleton("value"));
Map<String, Set<String>> contexts = Collections.singletonMap("key", Collections.singleton("value"));
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts);
BytesReference xContent = toXContent(option, XContentType.JSON, randomBoolean());
assertEquals("{\"text\":\"someText\",\"score\":1.3,\"contexts\":{\"key\":[\"value\"]}}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ public void testToXContent() throws IOException {
+ "}", xContent.utf8ToString());
}
{
Map<String, Set<CharSequence>> contexts = Collections.singletonMap("key", Collections.singleton("value"));
Map<String, Set<String>> contexts = Collections.singletonMap("key", Collections.singleton("value"));
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(1, new Text("someText"), 1.3f, contexts);
CompletionSuggestion.Entry entry = new CompletionSuggestion.Entry(new Text("entryText"), 42, 313);
entry.addOption(option);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -724,7 +724,7 @@ public void testParsingContextFromDocument() throws Exception {
document.add(new Field(keyword.name(), new BytesRef("category1"), keyword));
// Ignore doc values
document.add(new SortedSetDocValuesField(keyword.name(), new BytesRef("category1")));
Set<CharSequence> context = mapping.parseContext(document);
Set<String> context = mapping.parseContext(document);
assertThat(context.size(), equalTo(1));
assertTrue(context.contains("category1"));

Expand Down