Skip to content

Commit 40ca8fe

Browse files
feat(all): auto-regenerate discovery clients (#3347)
1 parent a590b9a commit 40ca8fe

26 files changed

+3018
-50
lines changed

aiplatform/v1/aiplatform-api.json

Lines changed: 220 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -15697,6 +15697,35 @@
1569715697
"https://www.googleapis.com/auth/cloud-platform"
1569815698
]
1569915699
},
15700+
"embedContent": {
15701+
"description": "Embed content with multimodal inputs.",
15702+
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/publishers/{publishersId}/models/{modelsId}:embedContent",
15703+
"httpMethod": "POST",
15704+
"id": "aiplatform.projects.locations.publishers.models.embedContent",
15705+
"parameterOrder": [
15706+
"model"
15707+
],
15708+
"parameters": {
15709+
"model": {
15710+
"description": "Required. The name of the publisher model requested to serve the prediction. Format: `projects/{project}/locations/{location}/publishers/*/models/*`",
15711+
"location": "path",
15712+
"pattern": "^projects/[^/]+/locations/[^/]+/publishers/[^/]+/models/[^/]+$",
15713+
"required": true,
15714+
"type": "string"
15715+
}
15716+
},
15717+
"path": "v1/{+model}:embedContent",
15718+
"request": {
15719+
"$ref": "GoogleCloudAiplatformV1EmbedContentRequest"
15720+
},
15721+
"response": {
15722+
"$ref": "GoogleCloudAiplatformV1EmbedContentResponse"
15723+
},
15724+
"scopes": [
15725+
"https://www.googleapis.com/auth/cloud-platform",
15726+
"https://www.googleapis.com/auth/cloud-platform.read-only"
15727+
]
15728+
},
1570015729
"fetchPredictOperation": {
1570115730
"description": "Fetch an asynchronous online prediction operation.",
1570215731
"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/publishers/{publishersId}/models/{modelsId}:fetchPredictOperation",
@@ -21198,7 +21227,7 @@
2119821227
}
2119921228
}
2120021229
},
21201-
"revision": "20251003",
21230+
"revision": "20251010",
2120221231
"rootUrl": "https://aiplatform.googleapis.com/",
2120321232
"schemas": {
2120421233
"CloudAiLargeModelsVisionGenerateVideoResponse": {
@@ -24271,7 +24300,9 @@
2427124300
"GETTING_CONTAINER_IMAGE",
2427224301
"STARTING_MODEL_SERVER",
2427324302
"FINISHING_UP",
24274-
"DEPLOYMENT_TERMINATED"
24303+
"DEPLOYMENT_TERMINATED",
24304+
"SUCCESSFULLY_DEPLOYED",
24305+
"FAILED_TO_DEPLOY"
2427524306
],
2427624307
"enumDescriptions": [
2427724308
"Default value. This value is unused.",
@@ -24282,7 +24313,9 @@
2428224313
"The deployment is getting the container image for the model server.",
2428324314
"The deployment is starting the model server.",
2428424315
"The deployment is performing finalization steps.",
24285-
"The deployment has terminated."
24316+
"The deployment has terminated.",
24317+
"The deployment has succeeded.",
24318+
"The deployment has failed."
2428624319
],
2428724320
"readOnly": true,
2428824321
"type": "string"
@@ -25364,7 +25397,9 @@
2536425397
"GETTING_CONTAINER_IMAGE",
2536525398
"STARTING_MODEL_SERVER",
2536625399
"FINISHING_UP",
25367-
"DEPLOYMENT_TERMINATED"
25400+
"DEPLOYMENT_TERMINATED",
25401+
"SUCCESSFULLY_DEPLOYED",
25402+
"FAILED_TO_DEPLOY"
2536825403
],
2536925404
"enumDescriptions": [
2537025405
"Default value. This value is unused.",
@@ -25375,7 +25410,9 @@
2537525410
"The deployment is getting the container image for the model server.",
2537625411
"The deployment is starting the model server.",
2537725412
"The deployment is performing finalization steps.",
25378-
"The deployment has terminated."
25413+
"The deployment has terminated.",
25414+
"The deployment has succeeded.",
25415+
"The deployment has failed."
2537925416
],
2538025417
"readOnly": true,
2538125418
"type": "string"
@@ -26063,6 +26100,90 @@
2606326100
},
2606426101
"type": "object"
2606526102
},
26103+
"GoogleCloudAiplatformV1EmbedContentRequest": {
26104+
"description": "Request message for PredictionService.EmbedContent.",
26105+
"id": "GoogleCloudAiplatformV1EmbedContentRequest",
26106+
"properties": {
26107+
"autoTruncate": {
26108+
"description": "Optional. Whether to silently truncate the input content if it's longer than the maximum sequence length.",
26109+
"type": "boolean"
26110+
},
26111+
"content": {
26112+
"$ref": "GoogleCloudAiplatformV1Content",
26113+
"description": "Required. Input content to be embedded. Required."
26114+
},
26115+
"outputDimensionality": {
26116+
"description": "Optional. Optional reduced dimension for the output embedding. If set, excessive values in the output embedding are truncated from the end.",
26117+
"format": "int32",
26118+
"type": "integer"
26119+
},
26120+
"taskType": {
26121+
"description": "Optional. The task type of the embedding.",
26122+
"enum": [
26123+
"UNSPECIFIED",
26124+
"RETRIEVAL_QUERY",
26125+
"RETRIEVAL_DOCUMENT",
26126+
"SEMANTIC_SIMILARITY",
26127+
"CLASSIFICATION",
26128+
"CLUSTERING",
26129+
"QUESTION_ANSWERING",
26130+
"FACT_VERIFICATION",
26131+
"CODE_RETRIEVAL_QUERY"
26132+
],
26133+
"enumDescriptions": [
26134+
"Unset value, which will default to one of the other enum values.",
26135+
"Specifies the given text is a query in a search/retrieval setting.",
26136+
"Specifies the given text is a document from the corpus being searched.",
26137+
"Specifies the given text will be used for STS.",
26138+
"Specifies that the given text will be classified.",
26139+
"Specifies that the embeddings will be used for clustering.",
26140+
"Specifies that the embeddings will be used for question answering.",
26141+
"Specifies that the embeddings will be used for fact verification.",
26142+
"Specifies that the embeddings will be used for code retrieval."
26143+
],
26144+
"type": "string"
26145+
},
26146+
"title": {
26147+
"description": "Optional. An optional title for the text.",
26148+
"type": "string"
26149+
}
26150+
},
26151+
"type": "object"
26152+
},
26153+
"GoogleCloudAiplatformV1EmbedContentResponse": {
26154+
"description": "Response message for PredictionService.EmbedContent.",
26155+
"id": "GoogleCloudAiplatformV1EmbedContentResponse",
26156+
"properties": {
26157+
"embedding": {
26158+
"$ref": "GoogleCloudAiplatformV1EmbedContentResponseEmbedding",
26159+
"description": "The embedding generated from the input content."
26160+
},
26161+
"truncated": {
26162+
"description": "Whether the input content was truncated before generating the embedding.",
26163+
"type": "boolean"
26164+
},
26165+
"usageMetadata": {
26166+
"$ref": "GoogleCloudAiplatformV1UsageMetadata",
26167+
"description": "Metadata about the response(s)."
26168+
}
26169+
},
26170+
"type": "object"
26171+
},
26172+
"GoogleCloudAiplatformV1EmbedContentResponseEmbedding": {
26173+
"description": "A list of floats representing an embedding.",
26174+
"id": "GoogleCloudAiplatformV1EmbedContentResponseEmbedding",
26175+
"properties": {
26176+
"values": {
26177+
"description": "Embedding vector values.",
26178+
"items": {
26179+
"format": "float",
26180+
"type": "number"
26181+
},
26182+
"type": "array"
26183+
}
26184+
},
26185+
"type": "object"
26186+
},
2606626187
"GoogleCloudAiplatformV1EncryptionSpec": {
2606726188
"description": "Represents a customer-managed encryption key spec that can be applied to a top-level resource.",
2606826189
"id": "GoogleCloudAiplatformV1EncryptionSpec",
@@ -38080,6 +38201,13 @@
3808038201
},
3808138202
"type": "array"
3808238203
},
38204+
"labels": {
38205+
"additionalProperties": {
38206+
"type": "string"
38207+
},
38208+
"description": "Optional. The user labels for Imagen billing usage only. Only Imagen supports labels. For other use cases, it will be ignored.",
38209+
"type": "object"
38210+
},
3808338211
"parameters": {
3808438212
"description": "The parameters that govern the prediction. The schema of the parameters may be specified via Endpoint's DeployedModels' Model's PredictSchemata's parameters_schema_uri.",
3808538213
"type": "any"
@@ -49940,6 +50068,93 @@
4994050068
},
4994150069
"type": "object"
4994250070
},
50071+
"GoogleCloudAiplatformV1UsageMetadata": {
50072+
"description": "Usage metadata about the content generation request and response. This message provides a detailed breakdown of token usage and other relevant metrics.",
50073+
"id": "GoogleCloudAiplatformV1UsageMetadata",
50074+
"properties": {
50075+
"cacheTokensDetails": {
50076+
"description": "Output only. A detailed breakdown of the token count for each modality in the cached content.",
50077+
"items": {
50078+
"$ref": "GoogleCloudAiplatformV1ModalityTokenCount"
50079+
},
50080+
"readOnly": true,
50081+
"type": "array"
50082+
},
50083+
"cachedContentTokenCount": {
50084+
"description": "Output only. The number of tokens in the cached content that was used for this request.",
50085+
"format": "int32",
50086+
"readOnly": true,
50087+
"type": "integer"
50088+
},
50089+
"candidatesTokenCount": {
50090+
"description": "The total number of tokens in the generated candidates.",
50091+
"format": "int32",
50092+
"type": "integer"
50093+
},
50094+
"candidatesTokensDetails": {
50095+
"description": "Output only. A detailed breakdown of the token count for each modality in the generated candidates.",
50096+
"items": {
50097+
"$ref": "GoogleCloudAiplatformV1ModalityTokenCount"
50098+
},
50099+
"readOnly": true,
50100+
"type": "array"
50101+
},
50102+
"promptTokenCount": {
50103+
"description": "The total number of tokens in the prompt. This includes any text, images, or other media provided in the request. When `cached_content` is set, this also includes the number of tokens in the cached content.",
50104+
"format": "int32",
50105+
"type": "integer"
50106+
},
50107+
"promptTokensDetails": {
50108+
"description": "Output only. A detailed breakdown of the token count for each modality in the prompt.",
50109+
"items": {
50110+
"$ref": "GoogleCloudAiplatformV1ModalityTokenCount"
50111+
},
50112+
"readOnly": true,
50113+
"type": "array"
50114+
},
50115+
"thoughtsTokenCount": {
50116+
"description": "Output only. The number of tokens that were part of the model's generated \"thoughts\" output, if applicable.",
50117+
"format": "int32",
50118+
"readOnly": true,
50119+
"type": "integer"
50120+
},
50121+
"toolUsePromptTokenCount": {
50122+
"description": "Output only. The number of tokens in the results from tool executions, which are provided back to the model as input, if applicable.",
50123+
"format": "int32",
50124+
"readOnly": true,
50125+
"type": "integer"
50126+
},
50127+
"toolUsePromptTokensDetails": {
50128+
"description": "Output only. A detailed breakdown by modality of the token counts from the results of tool executions, which are provided back to the model as input.",
50129+
"items": {
50130+
"$ref": "GoogleCloudAiplatformV1ModalityTokenCount"
50131+
},
50132+
"readOnly": true,
50133+
"type": "array"
50134+
},
50135+
"totalTokenCount": {
50136+
"description": "The total number of tokens for the entire request. This is the sum of `prompt_token_count`, `candidates_token_count`, `tool_use_prompt_token_count`, and `thoughts_token_count`.",
50137+
"format": "int32",
50138+
"type": "integer"
50139+
},
50140+
"trafficType": {
50141+
"description": "Output only. The traffic type for this request.",
50142+
"enum": [
50143+
"TRAFFIC_TYPE_UNSPECIFIED",
50144+
"ON_DEMAND",
50145+
"PROVISIONED_THROUGHPUT"
50146+
],
50147+
"enumDescriptions": [
50148+
"Unspecified request traffic type.",
50149+
"Type for Pay-As-You-Go traffic.",
50150+
"Type for Provisioned Throughput traffic."
50151+
],
50152+
"readOnly": true,
50153+
"type": "string"
50154+
}
50155+
},
50156+
"type": "object"
50157+
},
4994350158
"GoogleCloudAiplatformV1UserActionReference": {
4994450159
"description": "References an API call. It contains more information about long running operation and Jobs that are triggered by the API call.",
4994550160
"id": "GoogleCloudAiplatformV1UserActionReference",

0 commit comments

Comments
 (0)