From d42df74b57b3907b054292cf73228c8bf08499e4 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 30 May 2024 18:07:53 +0000 Subject: [PATCH 1/3] Update to latest models --- .../next-release/api-change-acm-23445.json | 5 + .../api-change-bedrockagent-86100.json | 5 + .../api-change-bedrockruntime-15865.json | 5 + .../api-change-cloudtrail-36744.json | 5 + .../api-change-connect-97102.json | 5 + .../api-change-emrserverless-68781.json | 5 + .../next-release/api-change-rds-71252.json | 5 + .../api-change-sagemaker-58609.json | 5 + .../acm/2015-12-08/endpoint-rule-set-1.json | 44 +- botocore/data/acm/2015-12-08/service-2.json | 1 + .../bedrock-agent/2023-06-05/service-2.json | 30 + .../bedrock-runtime/2023-09-30/service-2.json | 840 ++++++++++++++++++ .../data/cloudtrail/2013-11-01/service-2.json | 78 +- .../data/connect/2017-08-08/service-2.json | 10 +- .../2021-07-13/paginators-1.json | 6 + .../emr-serverless/2021-07-13/service-2.json | 247 ++++- botocore/data/rds/2014-10-31/service-2.json | 10 +- .../data/sagemaker/2017-07-24/service-2.json | 76 +- 18 files changed, 1323 insertions(+), 59 deletions(-) create mode 100644 .changes/next-release/api-change-acm-23445.json create mode 100644 .changes/next-release/api-change-bedrockagent-86100.json create mode 100644 .changes/next-release/api-change-bedrockruntime-15865.json create mode 100644 .changes/next-release/api-change-cloudtrail-36744.json create mode 100644 .changes/next-release/api-change-connect-97102.json create mode 100644 .changes/next-release/api-change-emrserverless-68781.json create mode 100644 .changes/next-release/api-change-rds-71252.json create mode 100644 .changes/next-release/api-change-sagemaker-58609.json diff --git a/.changes/next-release/api-change-acm-23445.json b/.changes/next-release/api-change-acm-23445.json new file mode 100644 index 0000000000..647973af68 --- /dev/null +++ b/.changes/next-release/api-change-acm-23445.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``acm``", + "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing." +} diff --git a/.changes/next-release/api-change-bedrockagent-86100.json b/.changes/next-release/api-change-bedrockagent-86100.json new file mode 100644 index 0000000000..5ef0feb893 --- /dev/null +++ b/.changes/next-release/api-change-bedrockagent-86100.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-agent``", + "description": "With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2." +} diff --git a/.changes/next-release/api-change-bedrockruntime-15865.json b/.changes/next-release/api-change-bedrockruntime-15865.json new file mode 100644 index 0000000000..7ed150636d --- /dev/null +++ b/.changes/next-release/api-change-bedrockruntime-15865.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-runtime``", + "description": "This release adds Converse and ConverseStream APIs to Bedrock Runtime" +} diff --git a/.changes/next-release/api-change-cloudtrail-36744.json b/.changes/next-release/api-change-cloudtrail-36744.json new file mode 100644 index 0000000000..75fb0d4a77 --- /dev/null +++ b/.changes/next-release/api-change-cloudtrail-36744.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``cloudtrail``", + "description": "CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour." +} diff --git a/.changes/next-release/api-change-connect-97102.json b/.changes/next-release/api-change-connect-97102.json new file mode 100644 index 0000000000..2634af1eeb --- /dev/null +++ b/.changes/next-release/api-change-connect-97102.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``connect``", + "description": "Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API" +} diff --git a/.changes/next-release/api-change-emrserverless-68781.json b/.changes/next-release/api-change-emrserverless-68781.json new file mode 100644 index 0000000000..23343c662a --- /dev/null +++ b/.changes/next-release/api-change-emrserverless-68781.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``emr-serverless``", + "description": "The release adds support for spark structured streaming." +} diff --git a/.changes/next-release/api-change-rds-71252.json b/.changes/next-release/api-change-rds-71252.json new file mode 100644 index 0000000000..c95b8b33ab --- /dev/null +++ b/.changes/next-release/api-change-rds-71252.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``rds``", + "description": "Updates Amazon RDS documentation for Aurora Postgres DBname." +} diff --git a/.changes/next-release/api-change-sagemaker-58609.json b/.changes/next-release/api-change-sagemaker-58609.json new file mode 100644 index 0000000000..f2cfb5cf7a --- /dev/null +++ b/.changes/next-release/api-change-sagemaker-58609.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sagemaker``", + "description": "Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm." +} diff --git a/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json b/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json index 671234b4f5..e0738bc0b1 100644 --- a/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json +++ b/botocore/data/acm/2015-12-08/endpoint-rule-set-1.json @@ -40,7 +40,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -83,7 +82,8 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -96,7 +96,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -110,7 +109,6 @@ "assign": "PartitionResult" } ], - "type": "tree", "rules": [ { "conditions": [ @@ -133,7 +131,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -168,7 +165,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -179,14 +175,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS and DualStack are enabled, but this partition does not support one or both", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -200,14 +198,12 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ @@ -216,18 +212,17 @@ }, "supportsFIPS" ] - } + }, + true ] } ], - "type": "tree", "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ - "aws-us-gov", { "fn": "getAttr", "argv": [ @@ -236,7 +231,8 @@ }, "name" ] - } + }, + "aws-us-gov" ] } ], @@ -256,14 +252,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "FIPS is enabled but this partition does not support FIPS", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [ @@ -277,7 +275,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [ @@ -297,7 +294,6 @@ ] } ], - "type": "tree", "rules": [ { "conditions": [], @@ -308,14 +304,16 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" }, { "conditions": [], "error": "DualStack is enabled but this partition does not support DualStack", "type": "error" } - ] + ], + "type": "tree" }, { "conditions": [], @@ -326,9 +324,11 @@ }, "type": "endpoint" } - ] + ], + "type": "tree" } - ] + ], + "type": "tree" }, { "conditions": [], diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index c3fac7b087..e2d91e26a1 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"acm", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"ACM", "serviceFullName":"AWS Certificate Manager", "serviceId":"ACM", diff --git a/botocore/data/bedrock-agent/2023-06-05/service-2.json b/botocore/data/bedrock-agent/2023-06-05/service-2.json index ec10d6386b..b76b1841ce 100644 --- a/botocore/data/bedrock-agent/2023-06-05/service-2.json +++ b/botocore/data/bedrock-agent/2023-06-05/service-2.json @@ -1546,6 +1546,16 @@ "min":20, "pattern":"^(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)$" }, + "BedrockEmbeddingModelConfiguration":{ + "type":"structure", + "members":{ + "dimensions":{ + "shape":"Dimensions", + "documentation":"

The dimensions details for the vector configuration used on the Bedrock embeddings model.

" + } + }, + "documentation":"

The vector configuration details for the Bedrock embeddings model.

" + }, "Boolean":{ "type":"boolean", "box":true @@ -2275,6 +2285,12 @@ "max":200, "min":1 }, + "Dimensions":{ + "type":"integer", + "box":true, + "max":4096, + "min":0 + }, "DisassociateAgentKnowledgeBaseRequest":{ "type":"structure", "required":[ @@ -2314,6 +2330,16 @@ "min":5, "pattern":"^DRAFT$" }, + "EmbeddingModelConfiguration":{ + "type":"structure", + "members":{ + "bedrockEmbeddingModelConfiguration":{ + "shape":"BedrockEmbeddingModelConfiguration", + "documentation":"

The vector configuration details on the Bedrock embeddings model.

" + } + }, + "documentation":"

The configuration details for the embeddings model.

" + }, "FailureReason":{ "type":"string", "max":2048, @@ -4653,6 +4679,10 @@ "embeddingModelArn":{ "shape":"BedrockEmbeddingModelArn", "documentation":"

The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.

" + }, + "embeddingModelConfiguration":{ + "shape":"EmbeddingModelConfiguration", + "documentation":"

The embeddings model configuration details for the vector model used in Knowledge Base.

" } }, "documentation":"

Contains details about the model used to create vector embeddings for the knowledge base.

" diff --git a/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/botocore/data/bedrock-runtime/2023-09-30/service-2.json index 5d545913e0..97b0648310 100644 --- a/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -12,6 +12,48 @@ "uid":"bedrock-runtime-2023-09-30" }, "operations":{ + "Converse":{ + "name":"Converse", + "http":{ + "method":"POST", + "requestUri":"/model/{modelId}/converse", + "responseCode":200 + }, + "input":{"shape":"ConverseRequest"}, + "output":{"shape":"ConverseResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ModelTimeoutException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ModelNotReadyException"}, + {"shape":"ModelErrorException"} + ], + "documentation":"

Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide.

This operation requires permission for the bedrock:InvokeModel action.

" + }, + "ConverseStream":{ + "name":"ConverseStream", + "http":{ + "method":"POST", + "requestUri":"/model/{modelId}/converse-stream", + "responseCode":200 + }, + "input":{"shape":"ConverseStreamRequest"}, + "output":{"shape":"ConverseStreamResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ModelTimeoutException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ModelNotReadyException"}, + {"shape":"ModelErrorException"} + ], + "documentation":"

Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide.

To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response.

For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide.

This operation requires permission for the bedrock:InvokeModelWithResponseStream action.

" + }, "InvokeModel":{ "name":"InvokeModel", "http":{ @@ -71,12 +113,386 @@ }, "exception":true }, + "AnyToolChoice":{ + "type":"structure", + "members":{ + }, + "documentation":"

The model must request at least one tool (no text is generated).

" + }, + "AutoToolChoice":{ + "type":"structure", + "members":{ + }, + "documentation":"

The Model automatically decides if a tool should be called or to whether to generate text instead.

" + }, "Body":{ "type":"blob", "max":25000000, "min":0, "sensitive":true }, + "ContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

Text to include in the message.

" + }, + "image":{ + "shape":"ImageBlock", + "documentation":"

Image to include in the message.

This field is only supported by Anthropic Claude 3 models.

" + }, + "toolUse":{ + "shape":"ToolUseBlock", + "documentation":"

Information about a tool use request from a model.

" + }, + "toolResult":{ + "shape":"ToolResultBlock", + "documentation":"

The result for a tool request that a model makes.

" + } + }, + "documentation":"

A block of content for a message.

", + "union":true + }, + "ContentBlockDelta":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

The content text.

" + }, + "toolUse":{ + "shape":"ToolUseBlockDelta", + "documentation":"

Information about a tool that the model is requesting to use.

" + } + }, + "documentation":"

A bock of content in a streaming response.

", + "union":true + }, + "ContentBlockDeltaEvent":{ + "type":"structure", + "required":[ + "delta", + "contentBlockIndex" + ], + "members":{ + "delta":{ + "shape":"ContentBlockDelta", + "documentation":"

The delta for a content block delta event.

" + }, + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

The block index for a content block delta event.

" + } + }, + "documentation":"

The content block delta event.

", + "event":true + }, + "ContentBlockStart":{ + "type":"structure", + "members":{ + "toolUse":{ + "shape":"ToolUseBlockStart", + "documentation":"

Information about a tool that the model is requesting to use.

" + } + }, + "documentation":"

Content block start information.

", + "union":true + }, + "ContentBlockStartEvent":{ + "type":"structure", + "required":[ + "start", + "contentBlockIndex" + ], + "members":{ + "start":{ + "shape":"ContentBlockStart", + "documentation":"

Start information about a content block start event.

" + }, + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

The index for a content block start event.

" + } + }, + "documentation":"

Content block start event.

", + "event":true + }, + "ContentBlockStopEvent":{ + "type":"structure", + "required":["contentBlockIndex"], + "members":{ + "contentBlockIndex":{ + "shape":"NonNegativeInteger", + "documentation":"

The index for a content block.

" + } + }, + "documentation":"

A content block stop event.

", + "event":true + }, + "ContentBlocks":{ + "type":"list", + "member":{"shape":"ContentBlock"} + }, + "ConversationRole":{ + "type":"string", + "enum":[ + "user", + "assistant" + ] + }, + "ConversationalModelId":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"(arn:aws(-[^:]+)?:bedrock:[a-z0-9-]{1,20}:(([0-9]{12}:custom-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}/[a-z0-9]{12})|(:foundation-model/[a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|([0-9]{12}:provisioned-model/[a-z0-9]{12})))|([a-z0-9-]{1,63}[.]{1}[a-z0-9-]{1,63}([.:]?[a-z0-9-]{1,63}))|(([0-9a-zA-Z][_-]?)+)" + }, + "ConverseMetrics":{ + "type":"structure", + "required":["latencyMs"], + "members":{ + "latencyMs":{ + "shape":"Long", + "documentation":"

The latency of the call to Converse, in milliseconds.

" + } + }, + "documentation":"

Metrics for a call to Converse.

" + }, + "ConverseOutput":{ + "type":"structure", + "members":{ + "message":{ + "shape":"Message", + "documentation":"

The message that the model generates.

" + } + }, + "documentation":"

The output from a call to Converse.

", + "union":true + }, + "ConverseRequest":{ + "type":"structure", + "required":[ + "modelId", + "messages" + ], + "members":{ + "modelId":{ + "shape":"ConversationalModelId", + "documentation":"

The identifier for the model that you want to call.

The modelId to provide depends on the type of model that you use:

", + "location":"uri", + "locationName":"modelId" + }, + "messages":{ + "shape":"Messages", + "documentation":"

The messages that you want to send to the model.

" + }, + "system":{ + "shape":"SystemContentBlocks", + "documentation":"

A system prompt to pass to the model.

" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"

Inference parameters to pass to the model. Converse supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field.

" + }, + "toolConfig":{ + "shape":"ToolConfiguration", + "documentation":"

Configuration information for the tools that the model can use when generating a response.

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

" + }, + "additionalModelRequestFields":{ + "shape":"Document", + "documentation":"

Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse supports in the inferenceConfig field. For more information, see Model parameters.

" + }, + "additionalModelResponseFieldPaths":{ + "shape":"ConverseRequestAdditionalModelResponseFieldPathsList", + "documentation":"

Additional model parameters field paths to return in the response. Converse returns the requested fields as a JSON Pointer object in the additionalModelResultFields field. The following is example JSON for additionalModelResponseFieldPaths.

[ \"/stop_sequence\" ]

For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.

Converse rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse.

" + } + } + }, + "ConverseRequestAdditionalModelResponseFieldPathsList":{ + "type":"list", + "member":{"shape":"ConverseRequestAdditionalModelResponseFieldPathsListMemberString"}, + "max":10, + "min":0 + }, + "ConverseRequestAdditionalModelResponseFieldPathsListMemberString":{ + "type":"string", + "max":256, + "min":1 + }, + "ConverseResponse":{ + "type":"structure", + "required":[ + "output", + "stopReason", + "usage", + "metrics" + ], + "members":{ + "output":{ + "shape":"ConverseOutput", + "documentation":"

The result from the call to Converse.

" + }, + "stopReason":{ + "shape":"StopReason", + "documentation":"

The reason why the model stopped generating output.

" + }, + "usage":{ + "shape":"TokenUsage", + "documentation":"

The total number of tokens used in the call to Converse. The total includes the tokens input to the model and the tokens generated by the model.

" + }, + "metrics":{ + "shape":"ConverseMetrics", + "documentation":"

Metrics for the call to Converse.

" + }, + "additionalModelResponseFields":{ + "shape":"Document", + "documentation":"

Additional fields in the response that are unique to the model.

" + } + } + }, + "ConverseStreamMetadataEvent":{ + "type":"structure", + "required":[ + "usage", + "metrics" + ], + "members":{ + "usage":{ + "shape":"TokenUsage", + "documentation":"

Usage information for the conversation stream event.

" + }, + "metrics":{ + "shape":"ConverseStreamMetrics", + "documentation":"

The metrics for the conversation stream metadata event.

" + } + }, + "documentation":"

A conversation stream metadata event.

", + "event":true + }, + "ConverseStreamMetrics":{ + "type":"structure", + "required":["latencyMs"], + "members":{ + "latencyMs":{ + "shape":"Long", + "documentation":"

The latency for the streaming request, in milliseconds.

" + } + }, + "documentation":"

Metrics for the stream.

" + }, + "ConverseStreamOutput":{ + "type":"structure", + "members":{ + "messageStart":{ + "shape":"MessageStartEvent", + "documentation":"

Message start information.

" + }, + "contentBlockStart":{ + "shape":"ContentBlockStartEvent", + "documentation":"

Start information for a content block.

" + }, + "contentBlockDelta":{ + "shape":"ContentBlockDeltaEvent", + "documentation":"

The messages output content block delta.

" + }, + "contentBlockStop":{ + "shape":"ContentBlockStopEvent", + "documentation":"

Stop information for a content block.

" + }, + "messageStop":{ + "shape":"MessageStopEvent", + "documentation":"

Message stop information.

" + }, + "metadata":{ + "shape":"ConverseStreamMetadataEvent", + "documentation":"

Metadata for the converse output stream.

" + }, + "internalServerException":{ + "shape":"InternalServerException", + "documentation":"

An internal server error occurred. Retry your request.

" + }, + "modelStreamErrorException":{ + "shape":"ModelStreamErrorException", + "documentation":"

A streaming error occurred. Retry your request.

" + }, + "validationException":{ + "shape":"ValidationException", + "documentation":"

Input validation failed. Check your request parameters and retry the request.

" + }, + "throttlingException":{ + "shape":"ThrottlingException", + "documentation":"

The number of requests exceeds the limit. Resubmit your request later.

" + } + }, + "documentation":"

The messages output stream

", + "eventstream":true + }, + "ConverseStreamRequest":{ + "type":"structure", + "required":[ + "modelId", + "messages" + ], + "members":{ + "modelId":{ + "shape":"ConversationalModelId", + "documentation":"

The ID for the model.

The modelId to provide depends on the type of model that you use:

", + "location":"uri", + "locationName":"modelId" + }, + "messages":{ + "shape":"Messages", + "documentation":"

The messages that you want to send to the model.

" + }, + "system":{ + "shape":"SystemContentBlocks", + "documentation":"

A system prompt to send to the model.

" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"

Inference parameters to pass to the model. ConverseStream supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field.

" + }, + "toolConfig":{ + "shape":"ToolConfiguration", + "documentation":"

Configuration information for the tools that the model can use when generating a response.

This field is only supported by Anthropic Claude 3 models.

" + }, + "additionalModelRequestFields":{ + "shape":"Document", + "documentation":"

Additional inference parameters that the model supports, beyond the base set of inference parameters that ConverseStream supports in the inferenceConfig field.

" + }, + "additionalModelResponseFieldPaths":{ + "shape":"ConverseStreamRequestAdditionalModelResponseFieldPathsList", + "documentation":"

Additional model parameters field paths to return in the response. ConverseStream returns the requested fields as a JSON Pointer object in the additionalModelResultFields field. The following is example JSON for additionalModelResponseFieldPaths.

[ \"/stop_sequence\" ]

For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation.

ConverseStream rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by ConverseStream.

" + } + } + }, + "ConverseStreamRequestAdditionalModelResponseFieldPathsList":{ + "type":"list", + "member":{"shape":"ConverseStreamRequestAdditionalModelResponseFieldPathsListMemberString"}, + "max":10, + "min":0 + }, + "ConverseStreamRequestAdditionalModelResponseFieldPathsListMemberString":{ + "type":"string", + "max":256, + "min":1 + }, + "ConverseStreamResponse":{ + "type":"structure", + "members":{ + "stream":{ + "shape":"ConverseStreamOutput", + "documentation":"

The output stream that the model generated.

" + } + }, + "payload":"stream" + }, + "Document":{ + "type":"structure", + "members":{ + }, + "document":true + }, "GuardrailIdentifier":{ "type":"string", "max":2048, @@ -87,6 +503,93 @@ "type":"string", "pattern":"(([1-9][0-9]{0,7})|(DRAFT))" }, + "ImageBlock":{ + "type":"structure", + "required":[ + "format", + "source" + ], + "members":{ + "format":{ + "shape":"ImageFormat", + "documentation":"

The format of the image.

" + }, + "source":{ + "shape":"ImageSource", + "documentation":"

The source for the image.

" + } + }, + "documentation":"

Image content for a message.

" + }, + "ImageFormat":{ + "type":"string", + "enum":[ + "png", + "jpeg", + "gif", + "webp" + ] + }, + "ImageSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"ImageSourceBytesBlob", + "documentation":"

The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes.

" + } + }, + "documentation":"

The source for an image.

", + "union":true + }, + "ImageSourceBytesBlob":{ + "type":"blob", + "min":1 + }, + "InferenceConfiguration":{ + "type":"structure", + "members":{ + "maxTokens":{ + "shape":"InferenceConfigurationMaxTokensInteger", + "documentation":"

The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value for the model that you are using. For more information, see Inference parameters for foundatio{ \"messages\": [ { \"role\": \"user\", \"content\": [ { \"text\": \"what's the weather in Queens, NY and Austin, TX?\" } ] }, { \"role\": \"assistant\", \"content\": [ { \"toolUse\": { \"toolUseId\": \"1\", \"name\": \"get_weather\", \"input\": { \"city\": \"Queens\", \"state\": \"NY\" } } }, { \"toolUse\": { \"toolUseId\": \"2\", \"name\": \"get_weather\", \"input\": { \"city\": \"Austin\", \"state\": \"TX\" } } } ] }, { \"role\": \"user\", \"content\": [ { \"toolResult\": { \"toolUseId\": \"2\", \"content\": [ { \"json\": { \"weather\": \"40\" } } ] } }, { \"text\": \"...\" }, { \"toolResult\": { \"toolUseId\": \"1\", \"content\": [ { \"text\": \"result text\" } ] } } ] } ], \"toolConfig\": { \"tools\": [ { \"name\": \"get_weather\", \"description\": \"Get weather\", \"inputSchema\": { \"type\": \"object\", \"properties\": { \"city\": { \"type\": \"string\", \"description\": \"City of location\" }, \"state\": { \"type\": \"string\", \"description\": \"State of location\" } }, \"required\": [\"city\", \"state\"] } } ] } } n models.

" + }, + "temperature":{ + "shape":"InferenceConfigurationTemperatureFloat", + "documentation":"

The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.

The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.

" + }, + "topP":{ + "shape":"InferenceConfigurationTopPFloat", + "documentation":"

The percentage of most-likely candidates that the model considers for the next token. For example, if you choose a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of tokens that could be next in the sequence.

The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.

" + }, + "stopSequences":{ + "shape":"InferenceConfigurationStopSequencesList", + "documentation":"

A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.

" + } + }, + "documentation":"

Base inference parameters to pass to a model in a call to Converse or ConverseStream. For more information, see Inference parameters for foundation models.

If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field in the call to Converse or ConverseStream. For more information, see Model parameters.

" + }, + "InferenceConfigurationMaxTokensInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "InferenceConfigurationStopSequencesList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":4, + "min":0 + }, + "InferenceConfigurationTemperatureFloat":{ + "type":"float", + "box":true, + "max":1, + "min":0 + }, + "InferenceConfigurationTopPFloat":{ + "type":"float", + "box":true, + "max":1, + "min":0 + }, "InternalServerException":{ "type":"structure", "members":{ @@ -243,6 +746,60 @@ }, "payload":"body" }, + "Long":{ + "type":"long", + "box":true + }, + "Message":{ + "type":"structure", + "required":[ + "role", + "content" + ], + "members":{ + "role":{ + "shape":"ConversationRole", + "documentation":"

The role that the message plays in the message.

" + }, + "content":{ + "shape":"ContentBlocks", + "documentation":"

The message content.

" + } + }, + "documentation":"

A message in the Message field. Use to send a message in a call to Converse.

" + }, + "MessageStartEvent":{ + "type":"structure", + "required":["role"], + "members":{ + "role":{ + "shape":"ConversationRole", + "documentation":"

The role for the message.

" + } + }, + "documentation":"

The start of a message.

", + "event":true + }, + "MessageStopEvent":{ + "type":"structure", + "required":["stopReason"], + "members":{ + "stopReason":{ + "shape":"StopReason", + "documentation":"

The reason why the model stopped generating output.

" + }, + "additionalModelResponseFields":{ + "shape":"Document", + "documentation":"

The additional model response fields.

" + } + }, + "documentation":"

The stop event for a message.

", + "event":true + }, + "Messages":{ + "type":"list", + "member":{"shape":"Message"} + }, "MimeType":{"type":"string"}, "ModelErrorException":{ "type":"structure", @@ -312,6 +869,15 @@ "type":"string", "pattern":"[\\s\\S]*" }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "NonNegativeInteger":{ + "type":"integer", + "box":true, + "min":0 + }, "PartBody":{ "type":"blob", "max":1000000, @@ -385,12 +951,49 @@ }, "exception":true }, + "SpecificToolChoice":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"

The name of the tool that the model must request.

" + } + }, + "documentation":"

The model must request a specific tool.

This field is only supported by Anthropic Claude 3 models.

" + }, "StatusCode":{ "type":"integer", "box":true, "max":599, "min":100 }, + "StopReason":{ + "type":"string", + "enum":[ + "end_turn", + "tool_use", + "max_tokens", + "stop_sequence", + "content_filtered" + ] + }, + "String":{"type":"string"}, + "SystemContentBlock":{ + "type":"structure", + "members":{ + "text":{ + "shape":"NonEmptyString", + "documentation":"

A system prompt for the model.

" + } + }, + "documentation":"

A system content block

", + "union":true + }, + "SystemContentBlocks":{ + "type":"list", + "member":{"shape":"SystemContentBlock"} + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -403,6 +1006,243 @@ }, "exception":true }, + "TokenUsage":{ + "type":"structure", + "required":[ + "inputTokens", + "outputTokens", + "totalTokens" + ], + "members":{ + "inputTokens":{ + "shape":"TokenUsageInputTokensInteger", + "documentation":"

The number of tokens sent in the request to the model.

" + }, + "outputTokens":{ + "shape":"TokenUsageOutputTokensInteger", + "documentation":"

The number of tokens that the model generated for the request.

" + }, + "totalTokens":{ + "shape":"TokenUsageTotalTokensInteger", + "documentation":"

The total of input tokens and tokens generated by the model.

" + } + }, + "documentation":"

The tokens used in a message API inference call.

" + }, + "TokenUsageInputTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "TokenUsageOutputTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "TokenUsageTotalTokensInteger":{ + "type":"integer", + "box":true, + "min":0 + }, + "Tool":{ + "type":"structure", + "members":{ + "toolSpec":{ + "shape":"ToolSpecification", + "documentation":"

The specfication for the tool.

" + } + }, + "documentation":"

Information about a tool that you can use with the Converse API.

", + "union":true + }, + "ToolChoice":{ + "type":"structure", + "members":{ + "auto":{ + "shape":"AutoToolChoice", + "documentation":"

The Model automatically decides if a tool should be called or to whether to generate text instead.

" + }, + "any":{ + "shape":"AnyToolChoice", + "documentation":"

The model must request at least one tool (no text is generated).

" + }, + "tool":{ + "shape":"SpecificToolChoice", + "documentation":"

The Model must request the specified tool.

" + } + }, + "documentation":"

Forces a model to use a tool.

", + "union":true + }, + "ToolConfiguration":{ + "type":"structure", + "required":["tools"], + "members":{ + "tools":{ + "shape":"ToolConfigurationToolsList", + "documentation":"

An array of tools that you want to pass to a model.

" + }, + "toolChoice":{ + "shape":"ToolChoice", + "documentation":"

If supported by model, forces the model to request a tool.

" + } + }, + "documentation":"

Configuration information for the tools that you pass to a model.

This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.

" + }, + "ToolConfigurationToolsList":{ + "type":"list", + "member":{"shape":"Tool"}, + "min":1 + }, + "ToolInputSchema":{ + "type":"structure", + "members":{ + "json":{ + "shape":"Document", + "documentation":"

The JSON schema for the tool. For more information, see JSON Schema Reference.

" + } + }, + "documentation":"

The schema for the tool. The top level schema type must be object.

", + "union":true + }, + "ToolName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9_]*" + }, + "ToolResultBlock":{ + "type":"structure", + "required":[ + "toolUseId", + "content" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"

The ID of the tool request that this is the result for.

" + }, + "content":{ + "shape":"ToolResultContentBlocks", + "documentation":"

The content for tool result content block.

" + }, + "status":{ + "shape":"ToolResultStatus", + "documentation":"

The status for the tool result content block.

This field is only supported Anthropic Claude 3 models.

" + } + }, + "documentation":"

A tool result block that contains the results for a tool request that the model previously made.

" + }, + "ToolResultContentBlock":{ + "type":"structure", + "members":{ + "json":{ + "shape":"Document", + "documentation":"

A tool result that is JSON format data.

" + }, + "text":{ + "shape":"String", + "documentation":"

A tool result that is text.

" + }, + "image":{ + "shape":"ImageBlock", + "documentation":"

A tool result that is an image.

This field is only supported by Anthropic Claude 3 models.

" + } + }, + "documentation":"

The tool result content block.

", + "union":true + }, + "ToolResultContentBlocks":{ + "type":"list", + "member":{"shape":"ToolResultContentBlock"} + }, + "ToolResultStatus":{ + "type":"string", + "enum":[ + "success", + "error" + ] + }, + "ToolSpecification":{ + "type":"structure", + "required":[ + "name", + "inputSchema" + ], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"

The name for the tool.

" + }, + "description":{ + "shape":"NonEmptyString", + "documentation":"

The description for the tool.

" + }, + "inputSchema":{ + "shape":"ToolInputSchema", + "documentation":"

The input schema for the tool in JSON format.

" + } + }, + "documentation":"

The specification for the tool.

" + }, + "ToolUseBlock":{ + "type":"structure", + "required":[ + "toolUseId", + "name", + "input" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"

The ID for the tool request.

" + }, + "name":{ + "shape":"ToolName", + "documentation":"

The name of the tool that the model wants to use.

" + }, + "input":{ + "shape":"Document", + "documentation":"

The input to pass to the tool.

" + } + }, + "documentation":"

A tool use content block. Contains information about a tool that the model is requesting be run., The model uses the result from the tool to generate a response.

" + }, + "ToolUseBlockDelta":{ + "type":"structure", + "required":["input"], + "members":{ + "input":{ + "shape":"String", + "documentation":"

The input for a requested tool.

" + } + }, + "documentation":"

The delta for a tool use block.

" + }, + "ToolUseBlockStart":{ + "type":"structure", + "required":[ + "toolUseId", + "name" + ], + "members":{ + "toolUseId":{ + "shape":"ToolUseId", + "documentation":"

The ID for the tool request.

" + }, + "name":{ + "shape":"ToolName", + "documentation":"

The name of the tool that the model is requesting to use.

" + } + }, + "documentation":"

The start of a tool use block.

" + }, + "ToolUseId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, "Trace":{ "type":"string", "enum":[ diff --git a/botocore/data/cloudtrail/2013-11-01/service-2.json b/botocore/data/cloudtrail/2013-11-01/service-2.json index 332d2b0640..cadb309048 100644 --- a/botocore/data/cloudtrail/2013-11-01/service-2.json +++ b/botocore/data/cloudtrail/2013-11-01/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"cloudtrail", "jsonVersion":"1.1", "protocol":"json", + "protocols":["json"], "serviceAbbreviation":"CloudTrail", "serviceFullName":"AWS CloudTrail", "serviceId":"CloudTrail", @@ -746,7 +747,7 @@ {"shape":"NoManagementAccountSLRExistsException"}, {"shape":"InsufficientDependencyServiceAccessPermissionException"} ], - "documentation":"

Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events for trails in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.

When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

Example

  1. You create an event selector for a trail and specify that you want write-only events.

  2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

  3. CloudTrail evaluates whether the events match your event selectors.

  4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

  5. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event.

The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown.

You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.

You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

", + "documentation":"

Configures an event selector or advanced event selectors for your trail. Use event selectors or advanced event selectors to specify management and data event settings for your trail. If you want your trail to log Insights events, be sure the event selector enables logging of the Insights event types you want configured for your trail. For more information about logging Insights events, see Logging Insights events in the CloudTrail User Guide. By default, trails created without specific event selectors are configured to log all read and write management events, and no data events.

When an event occurs in your account, CloudTrail evaluates the event selectors or advanced event selectors in all trails. For each trail, if the event matches any event selector, the trail processes and logs the event. If the event doesn't match any event selector, the trail doesn't log the event.

Example

  1. You create an event selector for a trail and specify that you want write-only events.

  2. The EC2 GetConsoleOutput and RunInstances API operations occur in your account.

  3. CloudTrail evaluates whether the events match your event selectors.

  4. The RunInstances is a write-only event and it matches your event selector. The trail logs the event.

  5. The GetConsoleOutput is a read-only event that doesn't match your event selector. The trail doesn't log the event.

The PutEventSelectors operation must be called from the Region in which the trail was created; otherwise, an InvalidHomeRegionException exception is thrown.

You can configure up to five event selectors for each trail. For more information, see Logging management events, Logging data events, and Quotas in CloudTrail in the CloudTrail User Guide.

You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either AdvancedEventSelectors or EventSelectors, but not both. If you apply AdvancedEventSelectors to a trail, any existing EventSelectors are overwritten. For more information about advanced event selectors, see Logging data events in the CloudTrail User Guide.

", "idempotent":true }, "PutInsightSelectors":{ @@ -921,7 +922,7 @@ {"shape":"OperationNotPermittedException"}, {"shape":"UnsupportedOperationException"} ], - "documentation":"

Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more considerations about importing trail events, see Considerations.

When you start a new import, the Destinations and ImportSource parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket.

When you retry an import, the ImportID parameter is required.

If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization.

" + "documentation":"

Starts an import of logged trail events from a source S3 bucket to a destination event data store. By default, CloudTrail only imports events contained in the S3 bucket's CloudTrail prefix and the prefixes inside the CloudTrail prefix, and does not check prefixes for other Amazon Web Services services. If you want to import CloudTrail events contained in another prefix, you must include the prefix in the S3LocationUri. For more considerations about importing trail events, see Considerations for copying trail events in the CloudTrail User Guide.

When you start a new import, the Destinations and ImportSource parameters are required. Before starting a new import, disable any access control lists (ACLs) attached to the source S3 bucket. For more information about disabling ACLs, see Controlling ownership of objects and disabling ACLs for your bucket.

When you retry an import, the ImportID parameter is required.

If the destination event data store is for an organization, you must use the management account to import trail events. You cannot use the delegated administrator account for the organization.

" }, "StartLogging":{ "name":"StartLogging", @@ -1230,7 +1231,7 @@ "members":{ "Field":{ "shape":"SelectorField", - "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource.

For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

" + "documentation":"

A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the field is used only for selecting events as filtering is not supported.

For CloudTrail management events, supported fields include readOnly, eventCategory, and eventSource.

For CloudTrail data events, supported fields include readOnly, eventCategory, eventName, resources.type, and resources.ARN.

For event data stores for CloudTrail Insights events, Config configuration items, Audit Manager evidence, or events outside of Amazon Web Services, the only supported field is eventCategory.

" }, "Equals":{ "shape":"Operator", @@ -1389,7 +1390,7 @@ "type":"structure", "members":{ }, - "documentation":"

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see Enabling Trusted Access with Other Amazon Web Services Services and Prepare For Creating a Trail For Your Organization.

", + "documentation":"

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, see How to enable or disable trusted access in the Organizations User Guide and Prepare For Creating a Trail For Your Organization in the CloudTrail User Guide.

", "exception":true }, "CloudTrailInvalidClientTokenIdException":{ @@ -1475,7 +1476,7 @@ }, "AdvancedEventSelectors":{ "shape":"AdvancedEventSelectors", - "documentation":"

The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.

For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include non-Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.

" + "documentation":"

The advanced event selectors to use to select the events for the data store. You can configure up to five advanced event selectors for each event data store.

For more information about how to use advanced event selectors to log CloudTrail events, see Log events by using advanced event selectors in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include Config configuration items in your event data store, see Create an event data store for Config configuration items in the CloudTrail User Guide.

For more information about how to use advanced event selectors to include events outside of Amazon Web Services events in your event data store, see Create an integration to log events from outside Amazon Web Services in the CloudTrail User Guide.

" }, "MultiRegionEnabled":{ "shape":"Boolean", @@ -1575,11 +1576,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

" + "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. For information about bucket naming rules, see Bucket naming rules in the Amazon Simple Storage Service User Guide.

" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" }, "SnsTopicName":{ "shape":"String", @@ -1630,7 +1631,7 @@ }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files.

" }, "SnsTopicName":{ "shape":"String", @@ -1685,10 +1686,10 @@ }, "Values":{ "shape":"DataResourceValues", - "documentation":"

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified objects.

" + "documentation":"

An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.

" } }, - "documentation":"

The Amazon S3 buckets, Lambda functions, or Amazon DynamoDB tables that you specify in your event selectors for your trail to log data events. Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.

The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors for the trail.

If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.

The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read and Write data events.

  1. A user uploads an image file to bucket-1.

  2. The PutObject API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.

  3. A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2.

  4. The PutObject API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.

The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named MyLambdaFunction, but not for all Lambda functions.

  1. A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.

  2. The Invoke API operation on MyLambdaFunction is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.

  3. The Invoke API operation on MyOtherLambdaFunction is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.

" + "documentation":"

Data events provide information about the resource operations performed on or within a resource itself. These are also known as data plane operations. You can specify up to 250 data resources for a trail.

Configure the DataResource to specify the resource type and resource ARNs for which you want to log data events.

You can specify the following resource types in your event selectors for your trail:

The total number of allowed data resources is 250. This number can be distributed between 1 and 5 event selectors, but the total cannot exceed 250 across all selectors for the trail.

If you are using advanced event selectors, the maximum total number of values for all conditions, across all advanced event selectors for the trail, is 500.

The following example demonstrates how logging works when you configure logging of all data events for an S3 bucket named bucket-1. In this example, the CloudTrail user specified an empty prefix, and the option to log both Read and Write data events.

  1. A user uploads an image file to bucket-1.

  2. The PutObject API operation is an Amazon S3 object-level API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified an S3 bucket with an empty prefix, events that occur on any object in that bucket are logged. The trail processes and logs the event.

  3. A user uploads an object to an Amazon S3 bucket named arn:aws:s3:::bucket-2.

  4. The PutObject API operation occurred for an object in an S3 bucket that the CloudTrail user didn't specify for the trail. The trail doesn’t log the event.

The following example demonstrates how logging works when you configure logging of Lambda data events for a Lambda function named MyLambdaFunction, but not for all Lambda functions.

  1. A user runs a script that includes a call to the MyLambdaFunction function and the MyOtherLambdaFunction function.

  2. The Invoke API operation on MyLambdaFunction is an Lambda API. It is recorded as a data event in CloudTrail. Because the CloudTrail user specified logging data events for MyLambdaFunction, any invocations of that function are logged. The trail processes and logs the event.

  3. The Invoke API operation on MyOtherLambdaFunction is an Lambda API. Because the CloudTrail user did not specify logging data events for all Lambda functions, the Invoke operation for MyOtherLambdaFunction does not match the function specified for the trail. The trail doesn’t log the event.

" }, "DataResourceValues":{ "type":"list", @@ -2086,7 +2087,7 @@ "deprecatedMessage":"UpdatedTimestamp is no longer returned by ListEventDataStores" } }, - "documentation":"

A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events that you have logged on your account. To select events for an event data store, use advanced event selectors.

" + "documentation":"

A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events that you have logged on your account. To select events for an event data store, use advanced event selectors.

" }, "EventDataStoreARNInvalidException":{ "type":"structure", @@ -2336,6 +2337,10 @@ "FederationRoleArn":{ "shape":"FederationRoleArn", "documentation":"

If Lake query federation is enabled, provides the ARN of the federation role used to access the resources for the federated event data store.

" + }, + "PartitionKeys":{ + "shape":"PartitionKeyList", + "documentation":"

The partition keys for the event data store. To improve query performance and efficiency, CloudTrail Lake organizes event data into partitions based on values derived from partition keys.

" } } }, @@ -2559,7 +2564,7 @@ }, "LatestDeliveryError":{ "shape":"String", - "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket.

" + "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver log files to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket.

" }, "LatestNotificationError":{ "shape":"String", @@ -2595,7 +2600,7 @@ }, "LatestDigestDeliveryError":{ "shape":"String", - "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, create a new bucket, and then call UpdateTrail to specify the new bucket; or fix the existing objects so that CloudTrail can again write to the bucket.

" + "documentation":"

Displays any Amazon S3 error that CloudTrail encountered when attempting to deliver a digest file to the designated bucket. For more information, see Error Responses in the Amazon S3 API Reference.

This error occurs only when there is a problem with the destination S3 bucket, and does not occur for requests that time out. To resolve the issue, fix the bucket policy so that CloudTrail can write to the bucket; or create a new bucket and call UpdateTrail to specify the new bucket.

" }, "LatestDeliveryAttemptTime":{ "shape":"String", @@ -3564,7 +3569,7 @@ "type":"structure", "members":{ }, - "documentation":"

This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Create an event data store.

", + "documentation":"

This exception is thrown when the Amazon Web Services account making the request to create or update an organization trail or event data store is not the management account for an organization in Organizations. For more information, see Prepare For Creating a Trail For Your Organization or Organization event data stores.

", "exception":true }, "OperationNotPermittedException":{ @@ -3605,6 +3610,41 @@ "min":4, "pattern":".*" }, + "PartitionKey":{ + "type":"structure", + "required":[ + "Name", + "Type" + ], + "members":{ + "Name":{ + "shape":"PartitionKeyName", + "documentation":"

The name of the partition key.

" + }, + "Type":{ + "shape":"PartitionKeyType", + "documentation":"

The data type of the partition key. For example, bigint or string.

" + } + }, + "documentation":"

Contains information about a partition key for an event data store.

" + }, + "PartitionKeyList":{ + "type":"list", + "member":{"shape":"PartitionKey"}, + "max":2 + }, + "PartitionKeyName":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, + "PartitionKeyType":{ + "type":"string", + "max":255, + "min":0, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + }, "PublicKey":{ "type":"structure", "members":{ @@ -4395,11 +4435,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"

Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket Naming Requirements.

" + "documentation":"

Name of the Amazon S3 bucket into which CloudTrail delivers your trail files. See Amazon S3 Bucket naming rules.

" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" }, "SnsTopicName":{ "shape":"String", @@ -4673,11 +4713,11 @@ }, "S3BucketName":{ "shape":"String", - "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

" + "documentation":"

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket naming rules.

" }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

" }, "SnsTopicName":{ "shape":"String", @@ -4727,7 +4767,7 @@ }, "S3KeyPrefix":{ "shape":"String", - "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your IAM Log Files.

" + "documentation":"

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated for log file delivery. For more information, see Finding Your IAM Log Files.

" }, "SnsTopicName":{ "shape":"String", diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index a744c84144..980943f613 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -5216,6 +5216,10 @@ } } }, + "AssociatedQueueIdList":{ + "type":"list", + "member":{"shape":"QueueId"} + }, "AssociationId":{ "type":"string", "max":100, @@ -17513,6 +17517,10 @@ "IsDefault":{ "shape":"Boolean", "documentation":"

Whether this a default routing profile.

" + }, + "AssociatedQueueIds":{ + "shape":"AssociatedQueueIdList", + "documentation":"

The IDs of the associated queue.

" } }, "documentation":"

Contains information about a routing profile.

" @@ -17658,7 +17666,7 @@ }, "StringCondition":{ "shape":"StringCondition", - "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are name, description, and resourceID.

" + "documentation":"

A leaf node condition which can be used to specify a string condition.

The currently supported values for FieldName are associatedQueueIds, name, description, and resourceID.

" } }, "documentation":"

The search criteria to be used to return routing profiles.

The name and description fields support \"contains\" queries with a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths outside of this range will throw invalid results.

" diff --git a/botocore/data/emr-serverless/2021-07-13/paginators-1.json b/botocore/data/emr-serverless/2021-07-13/paginators-1.json index 7193d8550a..aa3966b17b 100644 --- a/botocore/data/emr-serverless/2021-07-13/paginators-1.json +++ b/botocore/data/emr-serverless/2021-07-13/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "jobRuns" + }, + "ListJobRunAttempts": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "jobRunAttempts" } } } diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index 5bb5e4c235..d5e5c73744 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -3,8 +3,8 @@ "metadata":{ "apiVersion":"2021-07-13", "endpointPrefix":"emr-serverless", + "jsonVersion":"1.1", "protocol":"rest-json", - "protocols":["rest-json"], "serviceFullName":"EMR Serverless", "serviceId":"EMR Serverless", "signatureVersion":"v4", @@ -127,6 +127,22 @@ ], "documentation":"

Lists applications based on a set of parameters.

" }, + "ListJobRunAttempts":{ + "name":"ListJobRunAttempts", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/jobruns/{jobRunId}/attempts", + "responseCode":200 + }, + "input":{"shape":"ListJobRunAttemptsRequest"}, + "output":{"shape":"ListJobRunAttemptsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all attempt of a job run.

" + }, "ListJobRuns":{ "name":"ListJobRuns", "http":{ @@ -460,6 +476,11 @@ "X86_64" ] }, + "AttemptNumber":{ + "type":"integer", + "box":true, + "min":1 + }, "AutoStartConfig":{ "type":"structure", "members":{ @@ -837,6 +858,12 @@ "documentation":"

The ID of the job run.

", "location":"uri", "locationName":"jobRunId" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.

", + "location":"querystring", + "locationName":"attempt" } } }, @@ -867,6 +894,12 @@ "documentation":"

The ID of the job run.

", "location":"uri", "locationName":"jobRunId" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

An optimal parameter that indicates the amount of attempts for the job. If not specified, this value defaults to the attempt of the latest job.

", + "location":"querystring", + "locationName":"attempt" } } }, @@ -1115,16 +1148,126 @@ "billedResourceUtilization":{ "shape":"ResourceUtilization", "documentation":"

The aggregate vCPU, memory, and storage that Amazon Web Services has billed for the job run. The billed resources include a 1-minute minimum usage for workers, plus additional storage over 20 GB per worker. Note that billed resources do not include usage for idle pre-initialized workers.

" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run.

" + }, + "retryPolicy":{ + "shape":"RetryPolicy", + "documentation":"

The retry policy of the job run.

" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

The attempt of the job run.

" + }, + "attemptCreatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was created.

" + }, + "attemptUpdatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was last updated.

" } }, "documentation":"

Information about a job run. A job run is a unit of work, such as a Spark JAR, Hive query, or SparkSQL query, that you submit to an Amazon EMR Serverless application.

" }, + "JobRunAttemptSummary":{ + "type":"structure", + "required":[ + "applicationId", + "id", + "arn", + "createdBy", + "jobCreatedAt", + "createdAt", + "updatedAt", + "executionRole", + "state", + "stateDetails", + "releaseLabel" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

The ID of the application the job is running on.

" + }, + "id":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run attempt.

" + }, + "name":{ + "shape":"String256", + "documentation":"

The name of the job run attempt.

" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run attempt.

" + }, + "arn":{ + "shape":"JobArn", + "documentation":"

The Amazon Resource Name (ARN) of the job run.

" + }, + "createdBy":{ + "shape":"RequestIdentityUserArn", + "documentation":"

The user who created the job run.

" + }, + "jobCreatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run was created.

" + }, + "createdAt":{ + "shape":"Date", + "documentation":"

The date and time when the job run attempt was created.

" + }, + "updatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was last updated.

" + }, + "executionRole":{ + "shape":"IAMRoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the execution role of the job run..

" + }, + "state":{ + "shape":"JobRunState", + "documentation":"

The state of the job run attempt.

" + }, + "stateDetails":{ + "shape":"String256", + "documentation":"

The state details of the job run attempt.

" + }, + "releaseLabel":{ + "shape":"ReleaseLabel", + "documentation":"

The Amazon EMR release label of the job run attempt.

" + }, + "type":{ + "shape":"JobRunType", + "documentation":"

The type of the job run, such as Spark or Hive.

" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

The attempt number of the job run execution.

" + } + }, + "documentation":"

The summary of attributes associated with a job run attempt.

" + }, + "JobRunAttempts":{ + "type":"list", + "member":{"shape":"JobRunAttemptSummary"} + }, "JobRunId":{ "type":"string", "max":64, "min":1, "pattern":"[0-9a-z]+" }, + "JobRunMode":{ + "type":"string", + "enum":[ + "BATCH", + "STREAMING" + ] + }, "JobRunState":{ "type":"string", "enum":[ @@ -1171,6 +1314,10 @@ "shape":"String256", "documentation":"

The optional job run name. This doesn't have to be unique.

" }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run.

" + }, "arn":{ "shape":"JobArn", "documentation":"

The ARN of the job run.

" @@ -1206,6 +1353,18 @@ "type":{ "shape":"JobRunType", "documentation":"

The type of job run, such as Spark or Hive.

" + }, + "attempt":{ + "shape":"AttemptNumber", + "documentation":"

The attempt number of the job run execution.

" + }, + "attemptCreatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was created.

" + }, + "attemptUpdatedAt":{ + "shape":"Date", + "documentation":"

The date and time of when the job run attempt was last updated.

" } }, "documentation":"

The summary of attributes associated with a job run.

" @@ -1258,6 +1417,59 @@ } } }, + "ListJobRunAttemptsRequest":{ + "type":"structure", + "required":[ + "applicationId", + "jobRunId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

The ID of the application for which to list job runs.

", + "location":"uri", + "locationName":"applicationId" + }, + "jobRunId":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run to list.

", + "location":"uri", + "locationName":"jobRunId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of job run attempt results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListJobRunAttemptsRequestMaxResultsInteger", + "documentation":"

The maximum number of job run attempts to list.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListJobRunAttemptsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, + "ListJobRunAttemptsResponse":{ + "type":"structure", + "required":["jobRunAttempts"], + "members":{ + "jobRunAttempts":{ + "shape":"JobRunAttempts", + "documentation":"

The array of the listed job run attempt objects.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The output displays the token for the next set of application results. This is required for pagination and is available as a response of the previous request.

" + } + } + }, "ListJobRunsRequest":{ "type":"structure", "required":["applicationId"], @@ -1297,6 +1509,12 @@ "documentation":"

An optional filter for job run states. Note that if this filter contains multiple states, the resulting list will be grouped by the state.

", "location":"querystring", "locationName":"states" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job runs to list.

", + "location":"querystring", + "locationName":"mode" } } }, @@ -1529,6 +1747,25 @@ }, "documentation":"

The resource utilization for memory, storage, and vCPU for jobs.

" }, + "RetryPolicy":{ + "type":"structure", + "members":{ + "maxAttempts":{ + "shape":"AttemptNumber", + "documentation":"

Maximum number of attempts for the job run. This parameter is only applicable for BATCH mode.

" + }, + "maxFailedAttemptsPerHour":{ + "shape":"RetryPolicyMaxFailedAttemptsPerHourInteger", + "documentation":"

Maximum number of failed attempts per hour. This [arameter is only applicable for STREAMING mode.

" + } + }, + "documentation":"

The retry policy to use for a job run.

" + }, + "RetryPolicyMaxFailedAttemptsPerHourInteger":{ + "type":"integer", + "box":true, + "min":1 + }, "S3MonitoringConfiguration":{ "type":"structure", "members":{ @@ -1662,6 +1899,14 @@ "name":{ "shape":"String256", "documentation":"

The optional job run name. This doesn't have to be unique.

" + }, + "mode":{ + "shape":"JobRunMode", + "documentation":"

The mode of the job run when it starts.

" + }, + "retryPolicy":{ + "shape":"RetryPolicy", + "documentation":"

The retry policy when job run starts.

" } } }, diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index ef12d953bb..c28791443d 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -78,7 +78,7 @@ {"shape":"TenantDatabaseNotFoundFault"}, {"shape":"DBSnapshotTenantDatabaseNotFoundFault"} ], - "documentation":"

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

For an overview on tagging Amazon RDS resources, see Tagging Amazon RDS Resources.

" + "documentation":"

Adds metadata tags to an Amazon RDS resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon RDS resources, or used in a Condition statement in an IAM policy for Amazon RDS.

For an overview on tagging your relational database resources, see Tagging Amazon RDS Resources or Tagging Amazon Aurora and Amazon RDS Resources.

" }, "ApplyPendingMaintenanceAction":{ "name":"ApplyPendingMaintenanceAction", @@ -4127,7 +4127,7 @@ }, "DatabaseName":{ "shape":"String", - "documentation":"

The name for your database of up to 64 alphanumeric characters. If you don't provide a name, Amazon RDS doesn't create a database in the DB cluster you are creating.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" + "documentation":"

The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

" }, "DBClusterIdentifier":{ "shape":"String", @@ -4406,7 +4406,7 @@ "members":{ "DBName":{ "shape":"String", - "documentation":"

The meaning of this parameter differs according to the database engine you use.

Amazon Aurora MySQL

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

Constraints:

  • Must contain 1 to 64 alphanumeric characters.

  • Can't be a word reserved by the database engine.

Amazon Aurora PostgreSQL

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster.

Constraints:

  • It must contain 1 to 63 alphanumeric characters.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for Oracle

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs.

Default: ORCL

Constraints:

  • Must contain 1 to 8 alphanumeric characters.

  • Must contain a letter.

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for SQL Server

Not applicable. Must be null.

RDS for Db2

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MariaDB

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MySQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for Oracle

The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName.

Default: ORCL

Constraints:

  • Can't be longer than 8 characters.

RDS for PostgreSQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance.

Constraints:

  • Must contain 1 to 63 letters, numbers, or underscores.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for SQL Server

Not applicable. Must be null.

" + "documentation":"

The meaning of this parameter differs according to the database engine you use.

Amazon Aurora MySQL

The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster.

Constraints:

  • Must contain 1 to 64 alphanumeric characters.

  • Can't be a word reserved by the database engine.

Amazon Aurora PostgreSQL

The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

Constraints:

  • It must contain 1 to 63 alphanumeric characters.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9).

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for Oracle

The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs.

Default: ORCL

Constraints:

  • Must contain 1 to 8 alphanumeric characters.

  • Must contain a letter.

  • Can't be a word reserved by the database engine.

Amazon RDS Custom for SQL Server

Not applicable. Must be null.

RDS for Db2

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. In some cases, we recommend that you don't add a database name. For more information, see Additional considerations in the Amazon RDS User Guide.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MariaDB

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for MySQL

The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

Constraints:

  • Must contain 1 to 64 letters or numbers.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for Oracle

The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName.

Default: ORCL

Constraints:

  • Can't be longer than 8 characters.

RDS for PostgreSQL

The name of the database to create when the DB instance is created. A database named postgres is always created. If this parameter is specified, an additional database with this name is created.

Constraints:

  • Must contain 1 to 63 letters, numbers, or underscores.

  • Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

  • Can't be a word reserved by the specified database engine.

RDS for SQL Server

Not applicable. Must be null.

" }, "DBInstanceIdentifier":{ "shape":"String", @@ -15506,7 +15506,7 @@ }, "S3IngestionRoleArn":{ "shape":"String", - "documentation":"

An Amazon Web Services Identity and Access Management (IAM) role to allow Amazon RDS to access your Amazon S3 bucket.

" + "documentation":"

An Amazon Web Services Identity and Access Management (IAM) role with a trust policy and a permissions policy that allows Amazon RDS to access your Amazon S3 bucket. For information about this role, see Creating an IAM role manually in the Amazon RDS User Guide.

" }, "EnablePerformanceInsights":{ "shape":"BooleanOptional", @@ -16568,7 +16568,7 @@ "documentation":"

A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with aws: or rds:. The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").

" } }, - "documentation":"

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide.

" + "documentation":"

Metadata assigned to an Amazon RDS resource consisting of a key-value pair.

For more information, see Tagging Amazon RDS Resources in the Amazon RDS User Guide or Tagging Amazon Aurora and Amazon RDS Resources in the Amazon Aurora User Guide.

" }, "TagList":{ "type":"list", diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index fc335692ad..c8c99b2bb3 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -1920,7 +1920,7 @@ }, "input":{"shape":"DescribeModelPackageInput"}, "output":{"shape":"DescribeModelPackageOutput"}, - "documentation":"

Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace.

To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.

" + "documentation":"

Returns a description of the specified model package, which is used to create SageMaker models or list them on Amazon Web Services Marketplace.

If you provided a KMS Key ID when you created your model package, you will see the KMS Decrypt API call in your CloudTrail logs when you use this API.

To create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services Marketplace.

" }, "DescribeModelPackageGroup":{ "name":"DescribeModelPackageGroup", @@ -5121,7 +5121,13 @@ "randomforest", "extra-trees", "nn-torch", - "fastai" + "fastai", + "cnn-qr", + "deepar", + "prophet", + "npts", + "arima", + "ets" ] }, "AutoMLAlgorithmConfig":{ @@ -5130,10 +5136,10 @@ "members":{ "AutoMLAlgorithms":{ "shape":"AutoMLAlgorithms", - "documentation":"

The selection of algorithms run on a dataset to train the model candidates of an Autopilot job.

Selected algorithms must belong to the list corresponding to the training mode set in AutoMLJobConfig.Mode (ENSEMBLING or HYPERPARAMETER_TUNING). Choose a minimum of 1 algorithm.

" + "documentation":"

The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job.

" } }, - "documentation":"

The collection of algorithms run on a dataset for training the model candidates of an Autopilot job.

" + "documentation":"

The selection of algorithms trained on your dataset to generate the model candidates for an Autopilot job.

" }, "AutoMLAlgorithms":{ "type":"list", @@ -5213,7 +5219,7 @@ }, "AlgorithmsConfig":{ "shape":"AutoMLAlgorithmsConfig", - "documentation":"

Stores the configuration information for the selection of algorithms used to train the model candidates.

The list of available algorithms to choose from depends on the training mode set in AutoMLJobConfig.Mode .

For the list of all algorithms per training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "documentation":"

Stores the configuration information for the selection of algorithms trained on tabular data.

The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" } }, "documentation":"

Stores the configuration information for how a candidate is generated (optional).

" @@ -6181,7 +6187,7 @@ "members":{ "AlgorithmsConfig":{ "shape":"AutoMLAlgorithmsConfig", - "documentation":"

Stores the configuration information for the selection of algorithms used to train model candidates on tabular data.

The list of available algorithms to choose from depends on the training mode set in TabularJobConfig.Mode .

For the list of all algorithms per problem type and training mode, see AutoMLAlgorithmConfig.

For more information on each algorithm, see the Algorithm support section in Autopilot developer guide.

" + "documentation":"

Your Autopilot job trains a default set of algorithms on your dataset. For tabular and time-series data, you can customize the algorithm list by selecting a subset of algorithms for your problem type.

AlgorithmsConfig stores the customized selection of algorithms to train on your data.

" } }, "documentation":"

Stores the configuration information for how model candidates are generated using an AutoML job V2.

" @@ -9647,6 +9653,14 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package. If you want to clone a model package, set it to the model package Amazon Resource Name (ARN). If you want to register a model, set it to the model ARN.

" + }, + "SecurityConfig":{ + "shape":"ModelPackageSecurityConfig", + "documentation":"

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model card associated with the model package, see View the Details of a Model Version.

" } } }, @@ -14910,6 +14924,14 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package.

" + }, + "SecurityConfig":{ + "shape":"ModelPackageSecurityConfig", + "documentation":"

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model card associated with the model package, see View the Details of a Model Version.

" } } }, @@ -22037,7 +22059,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This parameter defines the maximum number of results that can be returned in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" }, "SortOrder":{ "shape":"SortOrder", @@ -22737,7 +22759,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This parameter defines the maximum number of results that can be returned in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" } } }, @@ -25121,7 +25143,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This parameter defines the maximum number of results that can be returned in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" }, "SortOrder":{ "shape":"SortOrder", @@ -25608,7 +25630,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

This parameter defines the maximum number of results that can be returned in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" + "documentation":"

This parameter defines the maximum number of results that can be return in a single response. The MaxResults parameter is an upper bound, not a target. If there are more results available than the value specified, a NextToken is provided in the response. The NextToken indicates that the user should get the next set of results by providing this token as a part of a subsequent call. The default value for MaxResults is 10.

" }, "SortOrder":{ "shape":"SortOrder", @@ -27045,6 +27067,8 @@ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package.

" }, + "SecurityConfig":{"shape":"ModelPackageSecurityConfig"}, + "ModelCard":{"shape":"ModelPackageModelCard"}, "Tags":{ "shape":"TagList", "documentation":"

A list of the tags associated with the model package. For more information, see Tagging Amazon Web Services resources in the Amazon Web Services General Reference Guide.

" @@ -27234,6 +27258,31 @@ "type":"list", "member":{"shape":"ModelPackageGroupSummary"} }, + "ModelPackageModelCard":{ + "type":"structure", + "members":{ + "ModelCardContent":{ + "shape":"ModelCardContent", + "documentation":"

The content of the model card.

" + }, + "ModelCardStatus":{ + "shape":"ModelCardStatus", + "documentation":"

The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.

" + } + }, + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model card associated with the model package, see View the Details of a Model Version.

" + }, + "ModelPackageSecurityConfig":{ + "type":"structure", + "required":["KmsKeyId"], + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The KMS Key ID (KMSKeyId) used for encryption of model package information.

" + } + }, + "documentation":"

An optional Key Management Service key to encrypt, decrypt, and re-encrypt model package information for regulated workloads with highly sensitive data.

" + }, "ModelPackageSortBy":{ "type":"string", "enum":[ @@ -34438,7 +34487,8 @@ "HolidayConfig":{ "shape":"HolidayConfig", "documentation":"

The collection of holiday featurization attributes used to incorporate national holiday information into your forecasting model.

" - } + }, + "CandidateGenerationConfig":{"shape":"CandidateGenerationConfig"} }, "documentation":"

The collection of settings used by an AutoML job V2 for the time-series forecasting problem type.

" }, @@ -36762,6 +36812,10 @@ "SourceUri":{ "shape":"ModelPackageSourceUri", "documentation":"

The URI of the source for the model package.

" + }, + "ModelCard":{ + "shape":"ModelPackageModelCard", + "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model card associated with the model package, see View the Details of a Model Version.

" } } }, From 2684c0277fab8f32bb86d9d27bd9225d0799c4ee Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 30 May 2024 18:07:54 +0000 Subject: [PATCH 2/3] Update endpoints model --- botocore/data/endpoints.json | 5 ----- 1 file changed, 5 deletions(-) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 06ea8df845..20db4ac34e 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -112,11 +112,6 @@ } }, "services" : { - "a4b" : { - "endpoints" : { - "us-east-1" : { } - } - }, "access-analyzer" : { "endpoints" : { "af-south-1" : { }, From 2d4ada43b5325c1cd8b8df47db8c7944282f3808 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 30 May 2024 18:08:45 +0000 Subject: [PATCH 3/3] Bumping version to 1.34.116 --- .changes/1.34.116.json | 42 +++++++++++++++++++ .../next-release/api-change-acm-23445.json | 5 --- .../api-change-bedrockagent-86100.json | 5 --- .../api-change-bedrockruntime-15865.json | 5 --- .../api-change-cloudtrail-36744.json | 5 --- .../api-change-connect-97102.json | 5 --- .../api-change-emrserverless-68781.json | 5 --- .../next-release/api-change-rds-71252.json | 5 --- .../api-change-sagemaker-58609.json | 5 --- CHANGELOG.rst | 13 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 12 files changed, 57 insertions(+), 42 deletions(-) create mode 100644 .changes/1.34.116.json delete mode 100644 .changes/next-release/api-change-acm-23445.json delete mode 100644 .changes/next-release/api-change-bedrockagent-86100.json delete mode 100644 .changes/next-release/api-change-bedrockruntime-15865.json delete mode 100644 .changes/next-release/api-change-cloudtrail-36744.json delete mode 100644 .changes/next-release/api-change-connect-97102.json delete mode 100644 .changes/next-release/api-change-emrserverless-68781.json delete mode 100644 .changes/next-release/api-change-rds-71252.json delete mode 100644 .changes/next-release/api-change-sagemaker-58609.json diff --git a/.changes/1.34.116.json b/.changes/1.34.116.json new file mode 100644 index 0000000000..c256a14397 --- /dev/null +++ b/.changes/1.34.116.json @@ -0,0 +1,42 @@ +[ + { + "category": "``acm``", + "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing.", + "type": "api-change" + }, + { + "category": "``bedrock-agent``", + "description": "With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2.", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "This release adds Converse and ConverseStream APIs to Bedrock Runtime", + "type": "api-change" + }, + { + "category": "``cloudtrail``", + "description": "CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "The release adds support for spark structured streaming.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Updates Amazon RDS documentation for Aurora Postgres DBname.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-acm-23445.json b/.changes/next-release/api-change-acm-23445.json deleted file mode 100644 index 647973af68..0000000000 --- a/.changes/next-release/api-change-acm-23445.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``acm``", - "description": "add v2 smoke tests and smithy smokeTests trait for SDK testing." -} diff --git a/.changes/next-release/api-change-bedrockagent-86100.json b/.changes/next-release/api-change-bedrockagent-86100.json deleted file mode 100644 index 5ef0feb893..0000000000 --- a/.changes/next-release/api-change-bedrockagent-86100.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-agent``", - "description": "With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2." -} diff --git a/.changes/next-release/api-change-bedrockruntime-15865.json b/.changes/next-release/api-change-bedrockruntime-15865.json deleted file mode 100644 index 7ed150636d..0000000000 --- a/.changes/next-release/api-change-bedrockruntime-15865.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-runtime``", - "description": "This release adds Converse and ConverseStream APIs to Bedrock Runtime" -} diff --git a/.changes/next-release/api-change-cloudtrail-36744.json b/.changes/next-release/api-change-cloudtrail-36744.json deleted file mode 100644 index 75fb0d4a77..0000000000 --- a/.changes/next-release/api-change-cloudtrail-36744.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``cloudtrail``", - "description": "CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour." -} diff --git a/.changes/next-release/api-change-connect-97102.json b/.changes/next-release/api-change-connect-97102.json deleted file mode 100644 index 2634af1eeb..0000000000 --- a/.changes/next-release/api-change-connect-97102.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``connect``", - "description": "Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API" -} diff --git a/.changes/next-release/api-change-emrserverless-68781.json b/.changes/next-release/api-change-emrserverless-68781.json deleted file mode 100644 index 23343c662a..0000000000 --- a/.changes/next-release/api-change-emrserverless-68781.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``emr-serverless``", - "description": "The release adds support for spark structured streaming." -} diff --git a/.changes/next-release/api-change-rds-71252.json b/.changes/next-release/api-change-rds-71252.json deleted file mode 100644 index c95b8b33ab..0000000000 --- a/.changes/next-release/api-change-rds-71252.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``rds``", - "description": "Updates Amazon RDS documentation for Aurora Postgres DBname." -} diff --git a/.changes/next-release/api-change-sagemaker-58609.json b/.changes/next-release/api-change-sagemaker-58609.json deleted file mode 100644 index f2cfb5cf7a..0000000000 --- a/.changes/next-release/api-change-sagemaker-58609.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``sagemaker``", - "description": "Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 74f60a2896..fcf6b6e60f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,19 @@ CHANGELOG ========= +1.34.116 +======== + +* api-change:``acm``: add v2 smoke tests and smithy smokeTests trait for SDK testing. +* api-change:``bedrock-agent``: With this release, Knowledge bases for Bedrock adds support for Titan Text Embedding v2. +* api-change:``bedrock-runtime``: This release adds Converse and ConverseStream APIs to Bedrock Runtime +* api-change:``cloudtrail``: CloudTrail Lake returns PartitionKeys in the GetEventDataStore API response. Events are grouped into partitions based on these keys for better query performance. For example, the calendarday key groups events by day, while combining the calendarday key with the hour key groups them by day and hour. +* api-change:``connect``: Adding associatedQueueIds as a SearchCriteria and response field to the SearchRoutingProfiles API +* api-change:``emr-serverless``: The release adds support for spark structured streaming. +* api-change:``rds``: Updates Amazon RDS documentation for Aurora Postgres DBname. +* api-change:``sagemaker``: Adds Model Card information as a new component to Model Package. Autopilot launches algorithm selection for TimeSeries modality to generate AutoML candidates per algorithm. + + 1.34.115 ======== diff --git a/botocore/__init__.py b/botocore/__init__.py index 8d2030e055..922edfac76 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.115' +__version__ = '1.34.116' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 0b8ec6d25b..4432488305 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.34.1' # The full version, including alpha/beta/rc tags. -release = '1.34.115' +release = '1.34.116' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.