Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Code changes to add muli api support #14706

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 18 additions & 8 deletions sdk/search/search-documents/review/search-documents.api.md
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,12 @@ export interface BaseLexicalAnalyzer {
odatatype: "#Microsoft.Azure.Search.CustomAnalyzer" | "#Microsoft.Azure.Search.PatternAnalyzer" | "#Microsoft.Azure.Search.StandardAnalyzer" | "#Microsoft.Azure.Search.StopAnalyzer";
}

// @public
export interface BaseLexicalNormalizer {
name: string;
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
}

// @public
export interface BaseLexicalTokenizer {
name: string;
Expand Down Expand Up @@ -314,7 +320,8 @@ export type CustomEntityLookupSkill = BaseSearchIndexerSkill & {
export type CustomEntityLookupSkillLanguage = string;

// @public
export type CustomNormalizer = LexicalNormalizer & {
export type CustomNormalizer = BaseLexicalNormalizer & {
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
tokenFilters?: TokenFilterName[];
charFilters?: CharFilterName[];
};
Expand Down Expand Up @@ -1289,10 +1296,7 @@ export type LexicalAnalyzer = CustomAnalyzer | PatternAnalyzer | LuceneStandardA
export type LexicalAnalyzerName = string;

// @public
export interface LexicalNormalizer {
name: string;
odatatype: string;
}
export type LexicalNormalizer = CustomNormalizer;

// @public
export type LexicalNormalizerName = string;
Expand Down Expand Up @@ -1548,7 +1552,9 @@ export class SearchClient<T> implements IndexDocumentsClient<T> {
}

// @public
export type SearchClientOptions = PipelineOptions;
export interface SearchClientOptions extends PipelineOptions {
apiVersion?: string;
}

// @public
export interface SearchDocumentsPageResult<T> extends SearchDocumentsResultBase {
Expand Down Expand Up @@ -1618,7 +1624,9 @@ export class SearchIndexClient {
}

// @public
export type SearchIndexClientOptions = PipelineOptions;
export interface SearchIndexClientOptions extends PipelineOptions {
apiVersion?: string;
}

// @public
export interface SearchIndexer {
Expand Down Expand Up @@ -1665,7 +1673,9 @@ export class SearchIndexerClient {
}

// @public
export type SearchIndexerClientOptions = PipelineOptions;
export interface SearchIndexerClientOptions extends PipelineOptions {
apiVersion?: string;
}

// @public
export interface SearchIndexerDataContainer {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ export type CharFilterUnion =
| CharFilter
| MappingCharFilter
| PatternReplaceCharFilter;
export type LexicalNormalizerUnion = LexicalNormalizer | CustomNormalizer;
export type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity;

/** Represents a datasource definition, which can be used to configure an indexer. */
Expand Down Expand Up @@ -595,7 +596,7 @@ export interface SearchIndex {
/** The character filters for the index. */
charFilters?: CharFilterUnion[];
/** The normalizers for the index. */
normalizers?: LexicalNormalizer[];
normalizers?: LexicalNormalizerUnion[];
/** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */
encryptionKey?: SearchResourceEncryptionKey | null;
/** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */
Expand Down Expand Up @@ -761,8 +762,8 @@ export interface CharFilter {

/** Base type for normalizers. */
export interface LexicalNormalizer {
/** Identifies the concrete type of the normalizer. */
odatatype: string;
/** Polymorphic discriminator, which specifies the different types this object can be */
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm confused about this. Why would a generic LexicalNormalizer have the same odatatype as a CustomNormalizer?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@xirzec Yeah. It was little confusing to me too. Let me explain with an example. For the following swagger:

"Similarity": {
  "discriminator": "@odata.type",
  .........
}

"ClassicSimilarity": {
  "x-ms-discriminator-value": "#Microsoft.Azure.Search.ClassicSimilarity",
  .........
}

"BM25Similarity": {
  "x-ms-discriminator-value": "#Microsoft.Azure.Search.BM25Similarity",
  .........
}

For the above swagger, the SDK will be generated as:

export interface Similarity {
  odatatype: "#Microsoft.Azure.Search.ClassicSimilarity" | "#Microsoft.Azure.Search.BM25Similarity";
  .........
}

export type ClassicSimilarity = Similarity & {
  odatatype: "#Microsoft.Azure.Search.ClassicSimilarity";
  .........
};

export type BM25Similarity = Similarity & {
  odatatype: "#Microsoft.Azure.Search.BM25Similarity";
  .........
}

In the above code, look at the Similarity interface. It is the base class. But, the odatatype of Similarity is actually the union of odatatype of the two sub classes (ClassicSimilarity & BM25Similarity).

Now, let us look into the LexicalNormalizer. The swagger is:

"LexicalNormalizer": {
  "discriminator": "@odata.type",
   ......
}

"CustomNormalizer": {
  "x-ms-discriminator-value": "#Microsoft.Azure.Search.CustomNormalizer",
   ..........
}

For the above swagger, the SDK looks like:

export interface LexicalNormalizer {
  odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
  .........
}

export type CustomNormalizer = LexicalNormalizer & {
  odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
  .........
}

In the above code the base class LexicalNormalizer, the odatatype is set to that of the CustomNormalizer which may look a little odd.

But, consider there is another normalizer in the swagger such as this:

"SampleNormalizer": {
  "x-ms-discriminator-value": "#Microsoft.Azure.Search.SampleNormalizer",
   ..........
}

then the resulting SDK will look like:

export interface LexicalNormalizer {
  odatatype: "#Microsoft.Azure.Search.CustomNormalizer" | "#Microsoft.Azure.Search.SampleNormalizer";
  .........
}

export type CustomNormalizer = LexicalNormalizer & {
  odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
  .........
}

export type SampleNormalizer = LexicalNormalizer & {
  odatatype: "#Microsoft.Azure.Search.SampleNormalizer";
  .........
}

The confusion is that we do not have more than one sub type of normalizer, it is looking a little odd. Am I making sense?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ohh! This does make sense and helps a lot. Thanks! 👍

/** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */
name: string;
}
Expand Down Expand Up @@ -1654,6 +1655,8 @@ export type PatternReplaceCharFilter = CharFilter & {

/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
export type CustomNormalizer = LexicalNormalizer & {
/** Polymorphic discriminator, which specifies the different types this object can be */
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
/** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
tokenFilters?: TokenFilterName[];
/** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1737,6 +1737,11 @@ export const LexicalNormalizer: coreHttp.CompositeMapper = {
type: {
name: "Composite",
className: "LexicalNormalizer",
uberParent: "LexicalNormalizer",
polymorphicDiscriminator: {
serializedName: "@odata\\.type",
clientName: "@odata\\.type"
},
modelProperties: {
odatatype: {
serializedName: "@odata\\.type",
Expand Down Expand Up @@ -4578,6 +4583,8 @@ export const CustomNormalizer: coreHttp.CompositeMapper = {
type: {
name: "Composite",
className: "CustomNormalizer",
uberParent: "LexicalNormalizer",
polymorphicDiscriminator: LexicalNormalizer.type.polymorphicDiscriminator,
modelProperties: {
...LexicalNormalizer.type.modelProperties,
tokenFilters: {
Expand Down Expand Up @@ -4656,6 +4663,7 @@ export let discriminators = {
LexicalTokenizer: LexicalTokenizer,
TokenFilter: TokenFilter,
CharFilter: CharFilter,
LexicalNormalizer: LexicalNormalizer,
Similarity: Similarity,
"DataChangeDetectionPolicy.#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": HighWaterMarkChangeDetectionPolicy,
"DataChangeDetectionPolicy.#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": SqlIntegratedChangeTrackingPolicy,
Expand Down Expand Up @@ -4723,6 +4731,7 @@ export let discriminators = {
"TokenFilter.#Microsoft.Azure.Search.WordDelimiterTokenFilter": WordDelimiterTokenFilter,
"CharFilter.#Microsoft.Azure.Search.MappingCharFilter": MappingCharFilter,
"CharFilter.#Microsoft.Azure.Search.PatternReplaceCharFilter": PatternReplaceCharFilter,
"LexicalNormalizer.#Microsoft.Azure.Search.CustomNormalizer": CustomNormalizer,
"Similarity.#Microsoft.Azure.Search.ClassicSimilarity": ClassicSimilarity,
"Similarity.#Microsoft.Azure.Search.BM25Similarity": BM25Similarity
};
7 changes: 4 additions & 3 deletions sdk/search/search-documents/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,8 @@ export {
SearchResourceEncryptionKey,
SearchIndexStatistics,
SearchServiceStatistics,
SearchIndexer
SearchIndexer,
LexicalNormalizer
} from "./serviceModels";
export { default as GeographyPoint } from "./geographyPoint";
export { odata } from "./odata";
Expand Down Expand Up @@ -275,13 +276,13 @@ export {
LexicalAnalyzer as BaseLexicalAnalyzer,
CharFilter as BaseCharFilter,
DataDeletionDetectionPolicy as BaseDataDeletionDetectionPolicy,
LexicalNormalizer,
LexicalNormalizerName,
KnownLexicalNormalizerName,
CustomNormalizer,
TokenFilterName,
KnownTokenFilterName,
CharFilterName,
KnownCharFilterName
KnownCharFilterName,
LexicalNormalizer as BaseLexicalNormalizer
} from "./generated/service/models";
export { AzureKeyCredential } from "@azure/core-auth";
18 changes: 16 additions & 2 deletions sdk/search/search-documents/src/searchClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,12 @@ import { IndexDocumentsClient } from "./searchIndexingBufferedSender";
/**
* Client options used to configure Cognitive Search API requests.
*/
export type SearchClientOptions = PipelineOptions;
export interface SearchClientOptions extends PipelineOptions {
/**
* The API version to use when communicating with the service.
*/
apiVersion?: string;
}

/**
* Class used to perform operations against a search index,
Expand Down Expand Up @@ -146,7 +151,16 @@ export class SearchClient<T> implements IndexDocumentsClient<T> {
pipeline.requestPolicyFactories.unshift(odataMetadataPolicy("none"));
}

this.client = new GeneratedClient(this.endpoint, this.indexName, this.apiVersion, pipeline);
let apiVersion = this.apiVersion;

if (options.apiVersion) {
if (!["2020-06-30-Preview", "2020-06-30"].includes(options.apiVersion)) {
throw new Error(`Invalid Api Version: ${options.apiVersion}`);
}
apiVersion = options.apiVersion;
}

this.client = new GeneratedClient(this.endpoint, this.indexName, apiVersion, pipeline);
}

/**
Expand Down
18 changes: 16 additions & 2 deletions sdk/search/search-documents/src/searchIndexClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,12 @@ import { SearchClient, SearchClientOptions as GetSearchClientOptions } from "./s
/**
* Client options used to configure Cognitive Search API requests.
*/
export type SearchIndexClientOptions = PipelineOptions;
export interface SearchIndexClientOptions extends PipelineOptions {
/**
* The API version to use when communicating with the service.
*/
apiVersion?: string;
}

/**
* Class to perform operations to manage
Expand Down Expand Up @@ -137,7 +142,16 @@ export class SearchIndexClient {
pipeline.requestPolicyFactories.unshift(odataMetadataPolicy("minimal"));
}

this.client = new GeneratedClient(this.endpoint, this.apiVersion, pipeline);
let apiVersion = this.apiVersion;

if (options.apiVersion) {
if (!["2020-06-30-Preview", "2020-06-30"].includes(options.apiVersion)) {
throw new Error(`Invalid Api Version: ${options.apiVersion}`);
}
apiVersion = options.apiVersion;
}

this.client = new GeneratedClient(this.endpoint, apiVersion, pipeline);
}

private async *listIndexesPage(
Expand Down
18 changes: 16 additions & 2 deletions sdk/search/search-documents/src/searchIndexerClient.ts
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,12 @@ import { odataMetadataPolicy } from "./odataMetadataPolicy";
/**
* Client options used to configure Cognitive Search API requests.
*/
export type SearchIndexerClientOptions = PipelineOptions;
export interface SearchIndexerClientOptions extends PipelineOptions {
/**
* The API version to use when communicating with the service.
*/
apiVersion?: string;
}

/**
* Class to perform operations to manage
Expand Down Expand Up @@ -128,7 +133,16 @@ export class SearchIndexerClient {
pipeline.requestPolicyFactories.unshift(odataMetadataPolicy("minimal"));
}

this.client = new GeneratedClient(this.endpoint, this.apiVersion, pipeline);
let apiVersion = this.apiVersion;

if (options.apiVersion) {
if (!["2020-06-30-Preview", "2020-06-30"].includes(options.apiVersion)) {
throw new Error(`Invalid Api Version: ${options.apiVersion}`);
}
apiVersion = options.apiVersion;
}

this.client = new GeneratedClient(this.endpoint, apiVersion, pipeline);
}

/**
Expand Down
9 changes: 7 additions & 2 deletions sdk/search/search-documents/src/serviceModels.ts
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ import {
FieldMapping,
IndexingParameters,
IndexingSchedule,
LexicalNormalizer,
LexicalNormalizerName
LexicalNormalizerName,
CustomNormalizer
} from "./generated/service/models";

import { PagedAsyncIterableIterator } from "@azure/core-paging";
Expand Down Expand Up @@ -660,6 +660,11 @@ export type TokenFilter =
*/
export type CharFilter = MappingCharFilter | PatternReplaceCharFilter;

/**
* Contains the possible cases for LexicalNormalizer.
*/
export type LexicalNormalizer = CustomNormalizer;

/**
* Contains the possible cases for ScoringFunction.
*/
Expand Down
4 changes: 2 additions & 2 deletions sdk/search/search-documents/src/serviceUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ import {
PatternAnalyzer as GeneratedPatternAnalyzer,
CustomAnalyzer,
PatternTokenizer,
LexicalNormalizer,
LexicalNormalizerName
} from "./generated/service/models";
import {
Expand All @@ -70,7 +69,8 @@ import {
DataDeletionDetectionPolicy,
SimilarityAlgorithm,
SearchResourceEncryptionKey,
PatternAnalyzer
PatternAnalyzer,
LexicalNormalizer
} from "./serviceModels";
import { SuggestDocumentsResult, SuggestResult, SearchResult } from "./indexModels";
import {
Expand Down
10 changes: 10 additions & 0 deletions sdk/search/search-documents/swagger/Service.md
Original file line number Diff line number Diff line change
Expand Up @@ -278,3 +278,13 @@ directive:
transform: >
$["x-ms-client-name"] = "tokenizerName"
```

### Add discriminator to LexicalNormalizer

```yaml
directive:
from: swagger-document
where: $.definitions.LexicalNormalizer
transform: >
$["discriminator"] = "@odata.type";
```