Skip to content

Commit

Permalink
Use Varargs in Model APIs (#12878)
Browse files Browse the repository at this point in the history
* Move model list APIs to use varargs

* Add missing vararg and List overloads

* Added more vararg and List overloads, add JsonGetters to List overloads

* Fix failing test

* Remove list overload from input only model

* Fix Java 8 toArray call

* Add null checking validation for vararg overloads
  • Loading branch information
alzimmermsft authored Jul 10, 2020
1 parent e0772d4 commit f627f82
Show file tree
Hide file tree
Showing 47 changed files with 750 additions and 446 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,16 @@ public static AnalyzeTextOptions map(com.azure.search.documents.indexes.implemen
}

if (obj.getCharFilters() != null) {
List<CharFilterName> charFilters =
obj.getCharFilters().stream().map(CharFilterNameConverter::map).collect(Collectors.toList());
analyzeTextOptions.setCharFilters(charFilters);
analyzeTextOptions.setCharFilters(obj.getCharFilters().stream()
.map(CharFilterNameConverter::map)
.toArray(CharFilterName[]::new));
}


if (obj.getTokenFilters() != null) {
List<TokenFilterName> tokenFilters =
obj.getTokenFilters().stream().map(TokenFilterNameConverter::map).collect(Collectors.toList());
analyzeTextOptions.setTokenFilters(tokenFilters);
analyzeTextOptions.setTokenFilters(obj.getTokenFilters().stream()
.map(TokenFilterNameConverter::map)
.toArray(TokenFilterName[]::new));
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
package com.azure.search.documents.implementation.converters;

import com.azure.search.documents.indexes.models.CjkBigramTokenFilter;
import com.azure.search.documents.indexes.models.CjkBigramTokenFilterScripts;

import java.util.List;
import java.util.stream.Collectors;
Expand All @@ -28,9 +27,9 @@ public static CjkBigramTokenFilter map(com.azure.search.documents.indexes.implem
cjkBigramTokenFilter.setOutputUnigrams(outputUnigrams);

if (obj.getIgnoreScripts() != null) {
List<CjkBigramTokenFilterScripts> ignoreScripts =
obj.getIgnoreScripts().stream().map(CjkBigramTokenFilterScriptsConverter::map).collect(Collectors.toList());
cjkBigramTokenFilter.setIgnoreScripts(ignoreScripts);
cjkBigramTokenFilter.setIgnoreScripts(obj.getIgnoreScripts().stream()
.map(CjkBigramTokenFilterScriptsConverter::map)
.collect(Collectors.toList()));
}
return cjkBigramTokenFilter;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
package com.azure.search.documents.implementation.converters;

import com.azure.search.documents.indexes.models.EdgeNGramTokenizer;
import com.azure.search.documents.indexes.models.TokenCharacterKind;

import java.util.List;
import java.util.stream.Collectors;
Expand All @@ -28,9 +27,9 @@ public static EdgeNGramTokenizer map(com.azure.search.documents.indexes.implemen
edgeNGramTokenizer.setMaxGram(maxGram);

if (obj.getTokenChars() != null) {
List<TokenCharacterKind> tokenChars =
obj.getTokenChars().stream().map(TokenCharacterKindConverter::map).collect(Collectors.toList());
edgeNGramTokenizer.setTokenChars(tokenChars);
edgeNGramTokenizer.setTokenChars(obj.getTokenChars().stream()
.map(TokenCharacterKindConverter::map)
.collect(Collectors.toList()));
}

Integer minGram = obj.getMinGram();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@ public static ElisionTokenFilter map(com.azure.search.documents.indexes.implemen
ElisionTokenFilter elisionTokenFilter = new ElisionTokenFilter(obj.getName());

if (obj.getArticles() != null) {
List<String> articles = new ArrayList<>(obj.getArticles());
elisionTokenFilter.setArticles(articles);
elisionTokenFilter.setArticles(obj.getArticles());
}
return elisionTokenFilter;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

package com.azure.search.documents.implementation.converters;

import com.azure.search.documents.indexes.models.EntityCategory;
import com.azure.search.documents.indexes.models.EntityRecognitionSkill;
import com.azure.search.documents.indexes.models.EntityRecognitionSkillLanguage;
import com.azure.search.documents.indexes.models.InputFieldMappingEntry;
Expand Down Expand Up @@ -52,9 +51,9 @@ public static EntityRecognitionSkill map(com.azure.search.documents.indexes.impl
}

if (obj.getCategories() != null) {
List<EntityCategory> categories =
obj.getCategories().stream().map(EntityCategoryConverter::map).collect(Collectors.toList());
entityRecognitionSkill.setCategories(categories);
entityRecognitionSkill.setCategories(obj.getCategories().stream()
.map(EntityCategoryConverter::map)
.collect(Collectors.toList()));
}

Double minimumPrecision = obj.getMinimumPrecision();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,8 @@

import com.azure.search.documents.indexes.models.ImageAnalysisSkill;
import com.azure.search.documents.indexes.models.ImageAnalysisSkillLanguage;
import com.azure.search.documents.indexes.models.ImageDetail;
import com.azure.search.documents.indexes.models.InputFieldMappingEntry;
import com.azure.search.documents.indexes.models.OutputFieldMappingEntry;
import com.azure.search.documents.indexes.models.VisualFeature;

import java.util.List;
import java.util.stream.Collectors;
Expand Down Expand Up @@ -44,9 +42,9 @@ public static ImageAnalysisSkill map(com.azure.search.documents.indexes.implemen
imageAnalysisSkill.setDescription(description);

if (obj.getVisualFeatures() != null) {
List<VisualFeature> visualFeatures =
obj.getVisualFeatures().stream().map(VisualFeatureConverter::map).collect(Collectors.toList());
imageAnalysisSkill.setVisualFeatures(visualFeatures);
imageAnalysisSkill.setVisualFeatures(obj.getVisualFeatures().stream()
.map(VisualFeatureConverter::map)
.collect(Collectors.toList()));
}

if (obj.getDefaultLanguageCode() != null) {
Expand All @@ -56,9 +54,9 @@ public static ImageAnalysisSkill map(com.azure.search.documents.indexes.implemen
}

if (obj.getDetails() != null) {
List<ImageDetail> details =
obj.getDetails().stream().map(ImageDetailConverter::map).collect(Collectors.toList());
imageAnalysisSkill.setDetails(details);
imageAnalysisSkill.setDetails(obj.getDetails().stream()
.map(ImageDetailConverter::map)
.collect(Collectors.toList()));
}
return imageAnalysisSkill;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ public static LuceneStandardAnalyzer map(com.azure.search.documents.indexes.impl
luceneStandardAnalyzer.setMaxTokenLength(maxTokenLength);

if (obj.getStopwords() != null) {
List<String> stopwords = new ArrayList<>(obj.getStopwords());
luceneStandardAnalyzer.setStopwords(stopwords);
luceneStandardAnalyzer.setStopwords(obj.getStopwords());
}
return luceneStandardAnalyzer;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
package com.azure.search.documents.implementation.converters;

import com.azure.search.documents.indexes.models.NGramTokenizer;
import com.azure.search.documents.indexes.models.TokenCharacterKind;

import java.util.List;
import java.util.stream.Collectors;
Expand All @@ -27,9 +26,9 @@ public static NGramTokenizer map(com.azure.search.documents.indexes.implementati
nGramTokenizer.setMaxGram(maxGram);

if (obj.getTokenChars() != null) {
List<TokenCharacterKind> tokenChars =
obj.getTokenChars().stream().map(TokenCharacterKindConverter::map).collect(Collectors.toList());
nGramTokenizer.setTokenChars(tokenChars);
nGramTokenizer.setTokenChars(obj.getTokenChars().stream()
.map(TokenCharacterKindConverter::map)
.collect(Collectors.toList()));
}

Integer minGram = obj.getMinGram();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,13 @@ public static PatternAnalyzer map(com.azure.search.documents.indexes.implementat
patternAnalyzer.setPattern(pattern);

if (obj.getFlags() != null) {
List<RegexFlags> regexFlags =
Arrays.stream(obj.getFlags().toString().split("\\|")).map(RegexFlags::fromString).collect(Collectors.toList());
patternAnalyzer.setFlags(regexFlags);
patternAnalyzer.setFlags(Arrays.stream(obj.getFlags().toString().split("\\|"))
.map(RegexFlags::fromString)
.collect(Collectors.toList()));
}

if (obj.getStopwords() != null) {
List<String> stopwords = new ArrayList<>(obj.getStopwords());
patternAnalyzer.setStopwords(stopwords);
patternAnalyzer.setStopwords(obj.getStopwords());
}
return patternAnalyzer;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import com.azure.search.documents.indexes.models.RegexFlags;

import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;

/**
Expand All @@ -28,9 +27,9 @@ public static PatternTokenizer map(com.azure.search.documents.indexes.implementa
patternTokenizer.setPattern(pattern);

if (obj.getFlags() != null) {
List<RegexFlags> regexFlags =
Arrays.stream(obj.getFlags().toString().split("\\|")).map(RegexFlags::fromString).collect(Collectors.toList());
patternTokenizer.setFlags(regexFlags);
patternTokenizer.setFlags(Arrays.stream(obj.getFlags().toString().split("\\|"))
.map(RegexFlags::fromString)
.collect(Collectors.toList()));
}

Integer group = obj.getGroup();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

package com.azure.search.documents.implementation.converters;

import com.azure.search.documents.indexes.models.ScoringFunction;
import com.azure.search.documents.indexes.models.ScoringFunctionAggregation;
import com.azure.search.documents.indexes.models.ScoringProfile;
import com.azure.search.documents.indexes.models.TextWeights;
Expand All @@ -26,9 +25,9 @@ public static ScoringProfile map(com.azure.search.documents.indexes.implementati
ScoringProfile scoringProfile = new ScoringProfile(obj.getName());

if (obj.getFunctions() != null) {
List<ScoringFunction> functions =
obj.getFunctions().stream().map(ScoringFunctionConverter::map).collect(Collectors.toList());
scoringProfile.setFunctions(functions);
scoringProfile.setFunctions(obj.getFunctions().stream()
.map(ScoringFunctionConverter::map)
.collect(Collectors.toList()));
}

if (obj.getTextWeights() != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ public static StopAnalyzer map(com.azure.search.documents.indexes.implementation
StopAnalyzer stopAnalyzer = new StopAnalyzer(obj.getName());

if (obj.getStopwords() != null) {
List<String> stopwords = new ArrayList<>(obj.getStopwords());
stopAnalyzer.setStopwords(stopwords);
stopAnalyzer.setStopwords(obj.getStopwords());
}
return stopAnalyzer;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,7 @@ public static WordDelimiterTokenFilter map(com.azure.search.documents.indexes.im
wordDelimiterTokenFilter.setNumbersCatenated(catenateNumbers);

if (obj.getProtectedWords() != null) {
List<String> protectedWords = new ArrayList<>(obj.getProtectedWords());
wordDelimiterTokenFilter.setProtectedWords(protectedWords);
wordDelimiterTokenFilter.setProtectedWords(obj.getProtectedWords());
}

Boolean generateNumberParts = obj.isGenerateNumberParts();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

import com.azure.core.annotation.Fluent;
import com.fasterxml.jackson.annotation.JsonProperty;

import java.util.Arrays;
import java.util.List;

/**
Expand Down Expand Up @@ -201,8 +203,8 @@ public List<TokenFilterName> getTokenFilters() {
* @param tokenFilters the tokenFilters value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextOptions setTokenFilters(List<TokenFilterName> tokenFilters) {
this.tokenFilters = tokenFilters;
public AnalyzeTextOptions setTokenFilters(TokenFilterName... tokenFilters) {
this.tokenFilters = (tokenFilters == null) ? null : Arrays.asList(tokenFilters);
return this;
}

Expand All @@ -225,8 +227,8 @@ public List<CharFilterName> getCharFilters() {
* @param charFilters the charFilters value to set.
* @return the AnalyzeRequest object itself.
*/
public AnalyzeTextOptions setCharFilters(List<CharFilterName> charFilters) {
this.charFilters = charFilters;
public AnalyzeTextOptions setCharFilters(CharFilterName... charFilters) {
this.charFilters = (charFilters == null) ? null : Arrays.asList(charFilters);
return this;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,11 @@
import com.azure.core.annotation.Fluent;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonSetter;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.annotation.JsonTypeName;

import java.util.Arrays;
import java.util.List;

/**
Expand Down Expand Up @@ -58,6 +61,18 @@ public List<CjkBigramTokenFilterScripts> getIgnoreScripts() {
* @param ignoreScripts the ignoreScripts value to set.
* @return the CjkBigramTokenFilter object itself.
*/
public CjkBigramTokenFilter setIgnoreScripts(CjkBigramTokenFilterScripts... ignoreScripts) {
this.ignoreScripts = (ignoreScripts == null) ? null : Arrays.asList(ignoreScripts);
return this;
}

/**
* Set the ignoreScripts property: The scripts to ignore.
*
* @param ignoreScripts the ignoreScripts value to set.
* @return the CjkBigramTokenFilter object itself.
*/
@JsonSetter
public CjkBigramTokenFilter setIgnoreScripts(List<CjkBigramTokenFilterScripts> ignoreScripts) {
this.ignoreScripts = ignoreScripts;
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@

import com.azure.core.annotation.Fluent;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonSetter;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.annotation.JsonTypeName;

import java.util.Arrays;
import java.util.List;

/**
Expand Down Expand Up @@ -102,6 +105,21 @@ public List<TokenFilterName> getTokenFilters() {
* @param tokenFilters the tokenFilters value to set.
* @return the CustomAnalyzer object itself.
*/
public CustomAnalyzer setTokenFilters(TokenFilterName... tokenFilters) {
this.tokenFilters = (tokenFilters == null) ? null : Arrays.asList(tokenFilters);
return this;
}

/**
* Set the tokenFilters property: A list of token filters used to filter
* out or modify the tokens generated by a tokenizer. For example, you can
* specify a lowercase filter that converts all characters to lowercase.
* The filters are run in the order in which they are listed.
*
* @param tokenFilters the tokenFilters value to set.
* @return the CustomAnalyzer object itself.
*/
@JsonSetter
public CustomAnalyzer setTokenFilters(List<TokenFilterName> tokenFilters) {
this.tokenFilters = tokenFilters;
return this;
Expand All @@ -128,6 +146,21 @@ public List<CharFilterName> getCharFilters() {
* @param charFilters the charFilters value to set.
* @return the CustomAnalyzer object itself.
*/
public CustomAnalyzer setCharFilters(CharFilterName... charFilters) {
this.charFilters = (charFilters == null) ? null : Arrays.asList(charFilters);
return this;
}

/**
* Set the charFilters property: A list of character filters used to
* prepare input text before it is processed by the tokenizer. For
* instance, they can replace certain characters or symbols. The filters
* are run in the order in which they are listed.
*
* @param charFilters the charFilters value to set.
* @return the CustomAnalyzer object itself.
*/
@JsonSetter
public CustomAnalyzer setCharFilters(List<CharFilterName> charFilters) {
this.charFilters = charFilters;
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@

import com.azure.core.annotation.Fluent;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonSetter;
import com.fasterxml.jackson.annotation.JsonTypeInfo;
import com.fasterxml.jackson.annotation.JsonTypeName;

import java.util.Arrays;
import java.util.List;

/**
Expand Down Expand Up @@ -106,6 +109,18 @@ public List<TokenCharacterKind> getTokenChars() {
* @param tokenChars the tokenChars value to set.
* @return the EdgeNGramTokenizer object itself.
*/
public EdgeNGramTokenizer setTokenChars(TokenCharacterKind... tokenChars) {
this.tokenChars = (tokenChars == null) ? null : Arrays.asList(tokenChars);
return this;
}

/**
* Set the tokenChars property: Character classes to keep in the tokens.
*
* @param tokenChars the tokenChars value to set.
* @return the EdgeNGramTokenizer object itself.
*/
@JsonSetter
public EdgeNGramTokenizer setTokenChars(List<TokenCharacterKind> tokenChars) {
this.tokenChars = tokenChars;
return this;
Expand Down
Loading

0 comments on commit f627f82

Please sign in to comment.