diff --git a/.changes/1.35.53.json b/.changes/1.35.53.json new file mode 100644 index 0000000000..bbb193551a --- /dev/null +++ b/.changes/1.35.53.json @@ -0,0 +1,37 @@ +[ + { + "category": "``amp``", + "description": "Added support for UpdateScraper API, to enable updating collector configuration in-place", + "type": "api-change" + }, + { + "category": "``autoscaling``", + "description": "Adds bake time for Auto Scaling group Instance Refresh", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "Add `podNamespace` to `EksAttemptDetail` and `containerID` to `EksAttemptContainerDetail`.", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "Add UDP support for AWS PrivateLink and dual-stack Network Load Balancers", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Add schedule support for AWS Glue column statistics", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "SageMaker HyperPod adds scale-down at instance level via BatchDeleteClusterNodes API and group level via UpdateCluster API. SageMaker Training exposes secondary job status in TrainingJobSummary from ListTrainingJobs API. SageMaker now supports G6, G6e, P5e instances for HyperPod and Training.", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "This release enables customers to provide the email template content in the SESv2 SendEmail and SendBulkEmail APIs instead of the name or the ARN of a stored email template.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e24b2501c5..9e285de489 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.35.53 +======= + +* api-change:``amp``: Added support for UpdateScraper API, to enable updating collector configuration in-place +* api-change:``autoscaling``: Adds bake time for Auto Scaling group Instance Refresh +* api-change:``batch``: Add `podNamespace` to `EksAttemptDetail` and `containerID` to `EksAttemptContainerDetail`. +* api-change:``elbv2``: Add UDP support for AWS PrivateLink and dual-stack Network Load Balancers +* api-change:``glue``: Add schedule support for AWS Glue column statistics +* api-change:``sagemaker``: SageMaker HyperPod adds scale-down at instance level via BatchDeleteClusterNodes API and group level via UpdateCluster API. SageMaker Training exposes secondary job status in TrainingJobSummary from ListTrainingJobs API. SageMaker now supports G6, G6e, P5e instances for HyperPod and Training. +* api-change:``sesv2``: This release enables customers to provide the email template content in the SESv2 SendEmail and SendBulkEmail APIs instead of the name or the ARN of a stored email template. + + 1.35.52 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 52d5707539..ef4a32cdcb 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.52' +__version__ = '1.35.53' class NullHandler(logging.Handler): diff --git a/botocore/config.py b/botocore/config.py index 587dc95ad8..eee55bb06d 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -115,10 +115,10 @@ class Config: * ``virtual`` -- Addressing style is always virtual. The name of the bucket must be DNS compatible or an exception will be thrown. - Endpoints will be addressed as such: ``mybucket.s3.amazonaws.com`` + Endpoints will be addressed as such: ``amzn-s3-demo-bucket.s3.amazonaws.com`` * ``path`` -- Addressing style is always by path. Endpoints will be - addressed as such: ``s3.amazonaws.com/mybucket`` + addressed as such: ``s3.amazonaws.com/amzn-s3-demo-bucket`` * ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use when the region is configured to be us-east-1. Values must be a diff --git a/botocore/data/amp/2020-08-01/service-2.json b/botocore/data/amp/2020-08-01/service-2.json index fb83866421..2ba81375e7 100644 --- a/botocore/data/amp/2020-08-01/service-2.json +++ b/botocore/data/amp/2020-08-01/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"aps", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Prometheus Service", "serviceId":"amp", "signatureVersion":"v4", "signingName":"aps", - "uid":"amp-2020-08-01" + "uid":"amp-2020-08-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateAlertManagerDefinition":{ @@ -90,7 +92,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. You can configure the scraper to control what metrics are collected, and what transformations are applied prior to sending them to your workspace.

If needed, an IAM role will be created for you that gives Amazon Managed Service for Prometheus access to the metrics in your cluster. For more information, see Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User Guide.

You cannot update a scraper. If you want to change the configuration of the scraper, create a new scraper and delete the old one.

The scrapeConfiguration parameter contains the base64-encoded version of the YAML configuration file.

For more information about collectors, including what metrics are collected, and how to configure the scraper, see Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User Guide.

", + "documentation":"

The CreateScraper operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. Scrapers are flexible, and can be configured to control what metrics are collected, the frequency of collection, what transformations are applied to the metrics, and more.

An IAM role will be created for you that Amazon Managed Service for Prometheus uses to access the metrics in your cluster. You must configure this role with a policy that allows it to scrape metrics from your cluster. For more information, see Configuring your Amazon EKS cluster in the Amazon Managed Service for Prometheus User Guide.

The scrapeConfiguration parameter contains the base-64 encoded YAML configuration for the scraper.

For more information about collectors, including what metrics are collected, and how to configure the scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.

", "idempotent":true }, "CreateWorkspace":{ @@ -364,7 +366,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are workspaces and rule groups namespaces.

" + "documentation":"

The ListTagsForResource operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are scrapers, workspaces, and rule groups namespaces.

" }, "ListWorkspaces":{ "name":"ListWorkspaces", @@ -441,7 +443,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.

If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.

" + "documentation":"

The TagResource operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces.

If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. To remove a tag, use UntagResource.

" }, "UntagResource":{ "name":"UntagResource", @@ -459,7 +461,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"} ], - "documentation":"

Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.

", + "documentation":"

Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces.

", "idempotent":true }, "UpdateLoggingConfiguration":{ @@ -481,6 +483,27 @@ "documentation":"

Updates the log group ARN or the workspace ID of the current logging configuration.

", "idempotent":true }, + "UpdateScraper":{ + "name":"UpdateScraper", + "http":{ + "method":"PUT", + "requestUri":"/scrapers/{scraperId}", + "responseCode":202 + }, + "input":{"shape":"UpdateScraperRequest"}, + "output":{"shape":"UpdateScraperResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Updates an existing scraper.

You can't use this function to update the source from which the scraper is collecting metrics. To change the source, delete the scraper and create a new one.

", + "idempotent":true + }, "UpdateWorkspaceAlias":{ "name":"UpdateWorkspaceAlias", "http":{ @@ -549,7 +572,7 @@ "documentation":"

A structure that displays the current status of the alert manager definition..

" } }, - "documentation":"

The details of an alert manager definition.

" + "documentation":"

The details of an alert manager definition. It is the configuration for the alert manager, including information about receivers for routing alerts.

" }, "AlertManagerDefinitionStatus":{ "type":"structure", @@ -673,7 +696,7 @@ }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"

The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this API.

" + "documentation":"

The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation.

" }, "workspaceId":{ "shape":"WorkspaceId", @@ -766,7 +789,7 @@ "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"

(optional) a name to associate with the scraper. This is for your use, and does not need to be unique.

" + "documentation":"

(optional) An alias to associate with the scraper. This is for your use, and does not need to be unique.

" }, "clientToken":{ "shape":"IdempotencyToken", @@ -779,7 +802,7 @@ }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"

The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

" + "documentation":"

The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

" }, "source":{ "shape":"Source", @@ -1134,7 +1157,7 @@ "members":{ "ampConfiguration":{ "shape":"AmpConfiguration", - "documentation":"

The Amazon Managed Service for Prometheusworkspace to send metrics to.

" + "documentation":"

The Amazon Managed Service for Prometheus workspace to send metrics to.

" } }, "documentation":"

Where to send the metrics from a scraper.

", @@ -1343,7 +1366,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The ARN of the resource to list tages for. Must be a workspace or rule groups namespace resource.

", + "documentation":"

The ARN of the resource to list tages for. Must be a workspace, scraper, or rule groups namespace resource.

", "location":"uri", "locationName":"resourceArn" } @@ -1438,7 +1461,7 @@ "documentation":"

The ID of the workspace the logging configuration is for.

" } }, - "documentation":"

Contains information about the logging configuration.

" + "documentation":"

Contains information about the logging configuration for the workspace.

" }, "LoggingConfigurationStatus":{ "type":"structure", @@ -1617,7 +1640,7 @@ "members":{ "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"

The ARN of the rule groups namespace.

" + "documentation":"

The ARN of the rule groups namespace. For example, arn:aws:aps:<region>:123456789012:rulegroupsnamespace/ws-example1-1234-abcd-5678-ef90abcd1234/rulesfile1.

" }, "createdAt":{ "shape":"Timestamp", @@ -1730,12 +1753,12 @@ "documentation":"

The base 64 encoded scrape configuration file.

" } }, - "documentation":"

A scrape configuration for a scraper, base 64 encoded. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

", + "documentation":"

A scrape configuration for a scraper, base 64 encoded. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.

", "union":true }, "ScraperAlias":{ "type":"string", - "documentation":"

A user-assigned scraper alias.

", + "documentation":"

An optional user-assigned scraper alias.

", "max":100, "min":1, "pattern":"^[0-9A-Za-z][-.0-9A-Z_a-z]*$" @@ -1764,7 +1787,7 @@ }, "arn":{ "shape":"ScraperArn", - "documentation":"

The Amazon Resource Name (ARN) of the scraper.

" + "documentation":"

The Amazon Resource Name (ARN) of the scraper. For example, arn:aws:aps:<region>:123456798012:scraper/s-example1-1234-abcd-5678-ef9012abcd34.

" }, "createdAt":{ "shape":"Timestamp", @@ -1780,15 +1803,15 @@ }, "roleArn":{ "shape":"IamRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.

For example, arn:aws:iam::123456789012:role/service-role/AmazonGrafanaServiceRole-12example.

" }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"

The configuration file in use by the scraper.

" + "documentation":"

The configuration in use by the scraper.

" }, "scraperId":{ "shape":"ScraperId", - "documentation":"

The ID of the scraper.

" + "documentation":"

The ID of the scraper. For example, s-example1-1234-abcd-5678-ef9012abcd34.

" }, "source":{ "shape":"Source", @@ -1846,9 +1869,11 @@ "documentation":"

State of a scraper.

", "enum":[ "CREATING", + "UPDATING", "ACTIVE", "DELETING", "CREATION_FAILED", + "UPDATE_FAILED", "DELETION_FAILED" ] }, @@ -2015,13 +2040,13 @@ "type":"map", "key":{ "shape":"TagKey", - "documentation":"

The key of the tag. May not begin with aws:.

" + "documentation":"

The key of the tag. Must not begin with aws:.

" }, "value":{ "shape":"TagValue", "documentation":"

The value of the tag.

" }, - "documentation":"

The list of tags assigned to the resource.

", + "documentation":"

A tag associated with a resource.

", "max":50, "min":0 }, @@ -2034,13 +2059,13 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The ARN of the workspace or rule groups namespace to apply tags to.

", + "documentation":"

The ARN of the resource to apply tags to.

", "location":"uri", "locationName":"resourceArn" }, "tags":{ "shape":"TagMap", - "documentation":"

The list of tag keys and values to associate with the resource.

Keys may not begin with aws:.

" + "documentation":"

The list of tag keys and values to associate with the resource.

Keys must not begin with aws:.

" } } }, @@ -2096,7 +2121,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"

The ARN of the workspace or rule groups namespace.

", + "documentation":"

The ARN of the resource from which to remove a tag.

", "location":"uri", "locationName":"resourceArn" }, @@ -2149,6 +2174,61 @@ }, "documentation":"

Represents the output of an UpdateLoggingConfiguration operation.

" }, + "UpdateScraperRequest":{ + "type":"structure", + "required":["scraperId"], + "members":{ + "alias":{ + "shape":"ScraperAlias", + "documentation":"

The new alias of the scraper.

" + }, + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.

", + "idempotencyToken":true + }, + "destination":{ + "shape":"Destination", + "documentation":"

The new Amazon Managed Service for Prometheus workspace to send metrics to.

" + }, + "scrapeConfiguration":{ + "shape":"ScrapeConfiguration", + "documentation":"

Contains the base-64 encoded YAML configuration for the scraper.

For more information about configuring a scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.

" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

The ID of the scraper to update.

", + "location":"uri", + "locationName":"scraperId" + } + } + }, + "UpdateScraperResponse":{ + "type":"structure", + "required":[ + "arn", + "scraperId", + "status" + ], + "members":{ + "arn":{ + "shape":"ScraperArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated scraper.

" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"

The ID of the updated scraper.

" + }, + "status":{ + "shape":"ScraperStatus", + "documentation":"

A structure that displays the current status of the scraper.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The list of tag keys and values that are associated with the scraper.

" + } + } + }, "UpdateWorkspaceAliasRequest":{ "type":"structure", "required":["workspaceId"], @@ -2258,11 +2338,11 @@ "members":{ "alias":{ "shape":"WorkspaceAlias", - "documentation":"

The alias that is assigned to this workspace to help identify it. It may not be unique.

" + "documentation":"

The alias that is assigned to this workspace to help identify it. It does not need to be unique.

" }, "arn":{ "shape":"WorkspaceArn", - "documentation":"

The ARN of the workspace.

" + "documentation":"

The ARN of the workspace. For example, arn:aws:aps:<region>:123456789012:workspace/ws-example1-1234-abcd-5678-ef90abcd1234.

" }, "createdAt":{ "shape":"Timestamp", @@ -2274,7 +2354,7 @@ }, "prometheusEndpoint":{ "shape":"Uri", - "documentation":"

The Prometheus endpoint available for this workspace.

" + "documentation":"

The Prometheus endpoint available for this workspace. For example, https://aps-workspaces.<region>.amazonaws.com/workspaces/ws-example1-1234-abcd-5678-ef90abcd1234/api/v1/.

" }, "status":{ "shape":"WorkspaceStatus", @@ -2286,7 +2366,7 @@ }, "workspaceId":{ "shape":"WorkspaceId", - "documentation":"

The unique ID for the workspace.

" + "documentation":"

The unique ID for the workspace. For example, ws-example1-1234-abcd-5678-ef90abcd1234.

" } }, "documentation":"

The full details about one Amazon Managed Service for Prometheus workspace in your account.

" @@ -2331,7 +2411,7 @@ "members":{ "alias":{ "shape":"WorkspaceAlias", - "documentation":"

The alias that is assigned to this workspace to help identify it. It may not be unique.

" + "documentation":"

The alias that is assigned to this workspace to help identify it. It does not need to be unique.

" }, "arn":{ "shape":"WorkspaceArn", diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 04b3760248..63edfab60a 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -1656,6 +1656,11 @@ "type":"list", "member":{"shape":"XmlStringMaxLen255"} }, + "BakeTime":{ + "type":"integer", + "max":172800, + "min":0 + }, "BareMetal":{ "type":"string", "enum":[ @@ -3174,7 +3179,7 @@ }, "Status":{ "shape":"InstanceRefreshStatus", - "documentation":"

The current status for the instance refresh operation:

" + "documentation":"

The current status for the instance refresh operation:

" }, "StatusReason":{ "shape":"XmlStringMaxLen1023", @@ -3271,7 +3276,8 @@ "Cancelled", "RollbackInProgress", "RollbackFailed", - "RollbackSuccessful" + "RollbackSuccessful", + "Baking" ] }, "InstanceRefreshWarmPoolProgress":{ @@ -4794,6 +4800,10 @@ "MaxHealthyPercentage":{ "shape":"IntPercent100To200", "documentation":"

Specifies the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. The value is expressed as a percentage of the desired capacity of the Auto Scaling group. Value range is 100 to 200.

If you specify MaxHealthyPercentage, you must also specify MinHealthyPercentage, and the difference between them cannot be greater than 100. A larger range increases the number of instances that can be replaced at the same time.

If you do not specify this property, the default is 100 percent, or the percentage set in the instance maintenance policy for the Auto Scaling group, if defined.

" + }, + "BakeTime":{ + "shape":"BakeTime", + "documentation":"

The amount of time, in seconds, to wait at the end of an instance refresh before the instance refresh is considered complete.

" } }, "documentation":"

Describes the preferences for an instance refresh.

" @@ -5262,7 +5272,7 @@ }, "Preferences":{ "shape":"RefreshPreferences", - "documentation":"

Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following:

" + "documentation":"

Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby state or protected from scale in are found. You can also choose to enable additional features, such as the following:

" } } }, diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 1eac45acda..2946cad2ef 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -596,7 +596,7 @@ }, "reason":{ "shape":"String", - "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.

" + "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs.

This parameter has as limit of 1024 characters.

" } }, "documentation":"

Contains the parameters for CancelJob.

" @@ -1219,7 +1219,7 @@ }, "schedulingPolicyArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter is specified, the job queue uses a fair share scheduling policy. If this parameter isn't specified, the job queue uses a first in, first out (FIFO) scheduling policy. After a job queue is created, you can replace but can't remove the fair share scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy.

" + "documentation":"

The Amazon Resource Name (ARN) of the fair share scheduling policy. Job queues that don't have a scheduling policy are scheduled in a first-in, first-out (FIFO) model. After a job queue has a scheduling policy, it can be replaced but can't be removed.

The format is aws:Partition:batch:Region:Account:scheduling-policy/Name .

An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy.

A job queue without a scheduling policy is scheduled as a FIFO job queue and can't have a scheduling policy added. Jobs queues with a scheduling policy can have a maximum of 500 active fair share identifiers. When the limit has been reached, submissions of any jobs that add a new fair share identifier fail.

" }, "priority":{ "shape":"Integer", @@ -1235,7 +1235,7 @@ }, "jobStateTimeLimitActions":{ "shape":"JobStateTimeLimitActions", - "documentation":"

The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed.

" + "documentation":"

The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)

" } }, "documentation":"

Contains the parameters for CreateJobQueue.

" @@ -1616,7 +1616,7 @@ "members":{ "taskProperties":{ "shape":"ListEcsTaskProperties", - "documentation":"

An object that contains the properties for the Amazon ECS task definition of a job.

This object is currently limited to one element.

" + "documentation":"

An object that contains the properties for the Amazon ECS task definition of a job.

This object is currently limited to one task element. However, the task element can run up to 10 containers.

" } }, "documentation":"

An object that contains the properties for the Amazon ECS resources of a job.

" @@ -1749,6 +1749,10 @@ "shape":"String", "documentation":"

The name of a container.

" }, + "containerID":{ + "shape":"String", + "documentation":"

The ID for the container.

" + }, "exitCode":{ "shape":"Integer", "documentation":"

The exit code returned for the job attempt. A non-zero exit code is considered failed.

" @@ -1783,6 +1787,10 @@ "shape":"String", "documentation":"

The name of the pod for this job attempt.

" }, + "podNamespace":{ + "shape":"String", + "documentation":"

The namespace of the Amazon EKS cluster that the pod exists in.

" + }, "nodeName":{ "shape":"String", "documentation":"

The name of the node for this job attempt.

" @@ -2109,11 +2117,11 @@ }, "containers":{ "shape":"EksContainers", - "documentation":"

The properties of the container that's used on the Amazon EKS pod.

" + "documentation":"

The properties of the container that's used on the Amazon EKS pod.

This object is limited to 10 elements.

" }, "initContainers":{ "shape":"EksContainers", - "documentation":"

These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.

This object is limited to 10 elements

" + "documentation":"

These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.

This object is limited to 10 elements.

" }, "volumes":{ "shape":"EksVolumes", @@ -2189,7 +2197,7 @@ }, "initContainers":{ "shape":"EksContainerOverrideList", - "documentation":"

The overrides for the conatainers defined in the Amazon EKS pod. These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.

This object is limited to 10 elements

" + "documentation":"

The overrides for the initContainers defined in the Amazon EKS pod. These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.

" }, "metadata":{ "shape":"EksMetadata", @@ -3973,7 +3981,7 @@ }, "reason":{ "shape":"String", - "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.

" + "documentation":"

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs.

This parameter has as limit of 1024 characters.

" } }, "documentation":"

Contains the parameters for TerminateJob.

" @@ -4137,7 +4145,7 @@ }, "jobStateTimeLimitActions":{ "shape":"JobStateTimeLimitActions", - "documentation":"

The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed.

" + "documentation":"

The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)

" } }, "documentation":"

Contains the parameters for UpdateJobQueue.

" diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index 047b643e0a..28d71382af 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -787,7 +787,7 @@ {"shape":"InvalidTargetException"}, {"shape":"TooManyRegistrationsForTargetIdException"} ], - "documentation":"

Registers the specified targets with the specified target group.

If the target is an EC2 instance, it must be in the running state when you register it.

By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.

" + "documentation":"

Registers the specified targets with the specified target group.

If the target is an EC2 instance, it must be in the running state when you register it.

By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.

With a Network Load Balancer, you can't register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.

" }, "RemoveListenerCertificates":{ "name":"RemoveListenerCertificates", @@ -1287,6 +1287,10 @@ "LoadBalancerAddresses":{ "shape":"LoadBalancerAddresses", "documentation":"

[Network Load Balancers] If you need static IP addresses for your load balancer, you can specify one Elastic IP address per Availability Zone when you create an internal-facing load balancer. For internal load balancers, you can specify a private IP address from the IPv4 range of the subnet.

" + }, + "SourceNatIpv6Prefixes":{ + "shape":"SourceNatIpv6Prefixes", + "documentation":"

[Network Load Balancers with UDP listeners] The IPv6 prefixes to use for source NAT. For each subnet, specify an IPv6 prefix (/80 netmask) from the subnet CIDR block or auto_assigned to use an IPv6 prefix selected at random from the subnet CIDR block.

" } }, "documentation":"

Information about an Availability Zone.

" @@ -1388,11 +1392,11 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can’t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.

" + "documentation":"

The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can’t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.

" }, "Port":{ "shape":"Port", - "documentation":"

The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.

" + "documentation":"

The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer.

" }, "SslPolicy":{ "shape":"SslPolicyName", @@ -1439,11 +1443,11 @@ }, "Subnets":{ "shape":"Subnets", - "documentation":"

The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" + "documentation":"

The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.

" + "documentation":"

The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets.

" }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -1451,7 +1455,7 @@ }, "Scheme":{ "shape":"LoadBalancerSchemeEnum", - "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.

The default is an Internet-facing load balancer.

You cannot specify a scheme for a Gateway Load Balancer.

" + "documentation":"

The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.

The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.

The default is an Internet-facing load balancer.

You can't specify a scheme for a Gateway Load Balancer.

" }, "Tags":{ "shape":"TagList", @@ -1463,11 +1467,15 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

Note: Internal load balancers must use the ipv4 IP address type.

[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

[Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

The IP address type. Internal load balancers must use ipv4.

[Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses).

[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses).

" }, "CustomerOwnedIpv4Pool":{ "shape":"CustomerOwnedIpv4Pool", "documentation":"

[Application Load Balancers on Outposts] The ID of the customer-owned address pool (CoIP pool).

" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"

[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off.

" } } }, @@ -1554,7 +1562,7 @@ }, "HealthCheckEnabled":{ "shape":"HealthCheckEnabled", - "documentation":"

Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and cannot be disabled.

" + "documentation":"

Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and can't be disabled.

" }, "HealthCheckPath":{ "shape":"Path", @@ -1590,7 +1598,7 @@ }, "IpAddressType":{ "shape":"TargetGroupIpAddressTypeEnum", - "documentation":"

The type of IP address used for this target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4.

" + "documentation":"

The IP address type. The default value is ipv4.

" } } }, @@ -1613,7 +1621,7 @@ "members":{ "Name":{ "shape":"TrustStoreName", - "documentation":"

The name of the trust store.

This name must be unique per region and cannot be changed after creation.

" + "documentation":"

The name of the trust store.

This name must be unique per region and can't be changed after creation.

" }, "CaCertificatesBundleS3Bucket":{ "shape":"S3Bucket", @@ -1654,7 +1662,7 @@ "type":"structure", "members":{ }, - "documentation":"

The specified association cannot be within the same account.

", + "documentation":"

The specified association can't be within the same account.

", "error":{ "code":"DeleteAssociationSameAccount", "httpStatusCode":400, @@ -2309,6 +2317,13 @@ }, "exception":true }, + "EnablePrefixForIpv6SourceNatEnum":{ + "type":"string", + "enum":[ + "on", + "off" + ] + }, "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{"type":"string"}, "EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum":{ "type":"string", @@ -2775,7 +2790,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Application Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

[Network Load Balancers and Gateway Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

The type of IP addresses used for public or private connections by the subnets attached to your load balancer.

[Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses).

[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses).

" }, "CustomerOwnedIpv4Pool":{ "shape":"CustomerOwnedIpv4Pool", @@ -2784,6 +2799,10 @@ "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{ "shape":"EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic", "documentation":"

Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink.

" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"

[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off.

" } }, "documentation":"

Information about a load balancer.

" @@ -2824,7 +2843,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by all load balancers:

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attributes are supported by only Network Load Balancers:

" + "documentation":"

The name of the attribute.

The following attributes are supported by all load balancers:

The following attributes are supported by both Application Load Balancers and Network Load Balancers:

The following attributes are supported by only Application Load Balancers:

The following attributes are supported by only Network Load Balancers:

" }, "Value":{ "shape":"LoadBalancerAttributeValue", @@ -2967,11 +2986,11 @@ }, "Port":{ "shape":"Port", - "documentation":"

The port for connections from clients to the load balancer. You cannot specify a port for a Gateway Load Balancer.

" + "documentation":"

The port for connections from clients to the load balancer. You can't specify a port for a Gateway Load Balancer.

" }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.

" + "documentation":"

The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.

" }, "SslPolicy":{ "shape":"SslPolicyName", @@ -3300,7 +3319,7 @@ "members":{ "Protocol":{ "shape":"RedirectActionProtocol", - "documentation":"

The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.

" + "documentation":"

The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.

" }, "Port":{ "shape":"RedirectActionPort", @@ -3595,7 +3614,7 @@ "documentation":"

Information for a source IP condition. Specify only when Field is source-ip.

" } }, - "documentation":"

Information about a condition for a rule.

Each rule can optionally include up to one of each of the following conditions: http-request-method, host-header, path-pattern, and source-ip. Each rule can also optionally include one or more of each of the following conditions: http-header and query-string. Note that the value for a condition cannot be empty.

For more information, see Quotas for your Application Load Balancers.

" + "documentation":"

Information about a condition for a rule.

Each rule can optionally include up to one of each of the following conditions: http-request-method, host-header, path-pattern, and source-ip. Each rule can also optionally include one or more of each of the following conditions: http-header and query-string. Note that the value for a condition can't be empty.

For more information, see Quotas for your Application Load Balancers.

" }, "RuleConditionList":{ "type":"list", @@ -3673,7 +3692,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

Note: Internal load balancers must use the ipv4 IP address type.

[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors.

[Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

The IP address type. Internal load balancers must use ipv4.

[Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses).

Application Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.

[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses).

" } } }, @@ -3749,15 +3768,19 @@ }, "Subnets":{ "shape":"Subnets", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" + "documentation":"

The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.

[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.

[Application Load Balancers on Outposts] You must specify one Outpost subnet.

[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.

[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.

[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.

" }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (for IPv6 only public addresses, with private IPv4 and IPv6 addresses).

[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener.

[Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses).

" + "documentation":"

The IP address type.

[Application Load Balancers] The possible values are ipv4 (IPv4 addresses), dualstack (IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 (public IPv6 addresses and private IPv4 and IPv6 addresses).

[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4 (IPv4 addresses) and dualstack (IPv4 and IPv6 addresses).

" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"

[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack. The default value is off.

" } } }, @@ -3770,7 +3793,11 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"

[Application Load Balancers] The IP address type.

[Network Load Balancers] The IP address type.

[Gateway Load Balancers] The IP address type.

" + "documentation":"

The IP address type.

" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"

[Network Load Balancers] Indicates whether to use an IPv6 prefix from each subnet for source NAT.

" } } }, @@ -3784,6 +3811,11 @@ }, "documentation":"

Information about a source IP condition.

You can use this condition to route based on the IP address of the source that connects to the load balancer. If a client is behind a proxy, this is the IP address of the proxy not the IP address of the client.

" }, + "SourceNatIpv6Prefix":{"type":"string"}, + "SourceNatIpv6Prefixes":{ + "type":"list", + "member":{"shape":"SourceNatIpv6Prefix"} + }, "SslPolicies":{ "type":"list", "member":{"shape":"SslPolicy"} @@ -3842,6 +3874,10 @@ "IPv6Address":{ "shape":"IPv6Address", "documentation":"

[Network Load Balancers] The IPv6 address.

" + }, + "SourceNatIpv6Prefix":{ + "shape":"SourceNatIpv6Prefix", + "documentation":"

[Network Load Balancers with UDP listeners] The IPv6 prefix to use for source NAT. Specify an IPv6 prefix (/80 netmask) from the subnet CIDR block or auto_assigned to use an IPv6 prefix selected at random from the subnet CIDR block.

" } }, "documentation":"

Information about a subnet mapping.

" @@ -4034,7 +4070,7 @@ }, "IpAddressType":{ "shape":"TargetGroupIpAddressTypeEnum", - "documentation":"

The type of IP address used for this target group. The possible values are ipv4 and ipv6. This is an optional parameter. If not specified, the IP address type defaults to ipv4.

" + "documentation":"

The IP address type. The default value is ipv4.

" } }, "documentation":"

Information about a target group.

" @@ -4061,7 +4097,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"

The name of the attribute.

The following attributes are supported by all load balancers:

The following attributes are supported by Application Load Balancers and Network Load Balancers:

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

The following attributes are supported only by Network Load Balancers:

The following attributes are supported only by Gateway Load Balancers:

" + "documentation":"

The name of the attribute.

The following attributes are supported by all load balancers:

The following attributes are supported by Application Load Balancers and Network Load Balancers:

The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:

The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:

The following attributes are supported only by Network Load Balancers:

The following attributes are supported only by Gateway Load Balancers:

" }, "Value":{ "shape":"TargetGroupAttributeValue", diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 43bbe1e77d..59fedef47b 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -20929,7 +20929,12 @@ "tags" : [ "dualstack" ] } ] }, - "eu-south-2" : { }, + "eu-south-2" : { + "variants" : [ { + "hostname" : "textract.eu-south-2.api.aws", + "tags" : [ "dualstack" ] + } ] + }, "eu-west-1" : { "variants" : [ { "hostname" : "textract.eu-west-1.api.aws", @@ -22250,6 +22255,16 @@ "tags" : [ "fips" ] } ] }, + "ap-southeast-5" : { + "credentialScope" : { + "region" : "ap-southeast-5" + }, + "hostname" : "wafv2.ap-southeast-5.amazonaws.com", + "variants" : [ { + "hostname" : "wafv2-fips.ap-southeast-5.amazonaws.com", + "tags" : [ "fips" ] + } ] + }, "ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" @@ -22427,6 +22442,13 @@ "deprecated" : true, "hostname" : "wafv2-fips.ap-southeast-4.amazonaws.com" }, + "fips-ap-southeast-5" : { + "credentialScope" : { + "region" : "ap-southeast-5" + }, + "deprecated" : true, + "hostname" : "wafv2-fips.ap-southeast-5.amazonaws.com" + }, "fips-ca-central-1" : { "credentialScope" : { "region" : "ca-central-1" diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index 6e4716c11d..88431bb37c 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -415,6 +415,25 @@ ], "documentation":"

Creates a classifier in the user's account. This can be a GrokClassifier, an XMLClassifier, a JsonClassifier, or a CsvClassifier, depending on which field of the request is present.

" }, + "CreateColumnStatisticsTaskSettings":{ + "name":"CreateColumnStatisticsTaskSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateColumnStatisticsTaskSettingsRequest"}, + "output":{"shape":"CreateColumnStatisticsTaskSettingsResponse"}, + "errors":[ + {"shape":"AlreadyExistsException"}, + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"}, + {"shape":"ColumnStatisticsTaskRunningException"} + ], + "documentation":"

Creates settings for a column statistics task.

" + }, "CreateConnection":{ "name":"CreateConnection", "http":{ @@ -868,6 +887,21 @@ ], "documentation":"

Retrieves table statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is DeleteTable.

" }, + "DeleteColumnStatisticsTaskSettings":{ + "name":"DeleteColumnStatisticsTaskSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteColumnStatisticsTaskSettingsRequest"}, + "output":{"shape":"DeleteColumnStatisticsTaskSettingsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes settings for a column statistics task.

" + }, "DeleteConnection":{ "name":"DeleteConnection", "http":{ @@ -1393,6 +1427,21 @@ ], "documentation":"

Retrieves information about all runs associated with the specified table.

" }, + "GetColumnStatisticsTaskSettings":{ + "name":"GetColumnStatisticsTaskSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetColumnStatisticsTaskSettingsRequest"}, + "output":{"shape":"GetColumnStatisticsTaskSettingsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Gets settings for a column statistics task.

" + }, "GetConnection":{ "name":"GetConnection", "http":{ @@ -3004,6 +3053,22 @@ ], "documentation":"

Starts a column statistics task run, for a specified table and columns.

" }, + "StartColumnStatisticsTaskRunSchedule":{ + "name":"StartColumnStatisticsTaskRunSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartColumnStatisticsTaskRunScheduleRequest"}, + "output":{"shape":"StartColumnStatisticsTaskRunScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Starts a column statistics task run schedule.

" + }, "StartCrawler":{ "name":"StartCrawler", "http":{ @@ -3209,6 +3274,21 @@ ], "documentation":"

Stops a task run for the specified table.

" }, + "StopColumnStatisticsTaskRunSchedule":{ + "name":"StopColumnStatisticsTaskRunSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopColumnStatisticsTaskRunScheduleRequest"}, + "output":{"shape":"StopColumnStatisticsTaskRunScheduleResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Stops a column statistics task run schedule.

" + }, "StopCrawler":{ "name":"StopCrawler", "http":{ @@ -3414,6 +3494,23 @@ ], "documentation":"

Creates or updates table statistics of columns.

The Identity and Access Management (IAM) permission required for this operation is UpdateTable.

" }, + "UpdateColumnStatisticsTaskSettings":{ + "name":"UpdateColumnStatisticsTaskSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateColumnStatisticsTaskSettingsRequest"}, + "output":{"shape":"UpdateColumnStatisticsTaskSettingsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"VersionMismatchException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Updates settings for a column statistics task.

" + }, "UpdateConnection":{ "name":"UpdateConnection", "http":{ @@ -6369,6 +6466,10 @@ "shape":"NameString", "documentation":"

The type of workers being used for generating stats. The default is g.1x.

" }, + "ComputationType":{ + "shape":"ComputationType", + "documentation":"

The type of column statistics computation.

" + }, "Status":{ "shape":"ColumnStatisticsState", "documentation":"

The status of the task run.

" @@ -6421,6 +6522,44 @@ "type":"list", "member":{"shape":"ColumnStatisticsTaskRun"} }, + "ColumnStatisticsTaskSettings":{ + "type":"structure", + "members":{ + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"TableName", + "documentation":"

The name of the table for which to generate column statistics.

" + }, + "Schedule":{ + "shape":"Schedule", + "documentation":"

A schedule for running the column statistics, specified in CRON syntax.

" + }, + "ColumnNameList":{ + "shape":"ColumnNameList", + "documentation":"

A list of column names for which to run statistics.

" + }, + "CatalogID":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog in which the database resides.

" + }, + "Role":{ + "shape":"Role", + "documentation":"

The role used for running the column statistics.

" + }, + "SampleSize":{ + "shape":"SampleSizePercentage", + "documentation":"

The percentage of data to sample.

" + }, + "SecurityConfiguration":{ + "shape":"CrawlerSecurityConfiguration", + "documentation":"

Name of the security configuration that is used to encrypt CloudWatch logs.

" + } + }, + "documentation":"

The settings for a column statistics task.

" + }, "ColumnStatisticsTaskStoppingException":{ "type":"structure", "members":{ @@ -6507,6 +6646,13 @@ "bzip2" ] }, + "ComputationType":{ + "type":"string", + "enum":[ + "FULL", + "INCREMENTAL" + ] + }, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -7385,6 +7531,57 @@ "members":{ } }, + "CreateColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "Role" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which to generate column statistics.

" + }, + "Role":{ + "shape":"NameString", + "documentation":"

The role used for running the column statistics.

" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"

A schedule for running the column statistics, specified in CRON syntax.

" + }, + "ColumnNameList":{ + "shape":"ColumnNameList", + "documentation":"

A list of column names for which to run statistics.

" + }, + "SampleSize":{ + "shape":"SampleSizePercentage", + "documentation":"

The percentage of data to sample.

" + }, + "CatalogID":{ + "shape":"NameString", + "documentation":"

The ID of the Data Catalog in which the database resides.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

Name of the security configuration that is used to encrypt CloudWatch logs.

" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"

A map of tags.

" + } + } + }, + "CreateColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "CreateConnectionRequest":{ "type":"structure", "required":["ConnectionInput"], @@ -9723,6 +9920,28 @@ "members":{ } }, + "DeleteColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which to delete column statistics.

" + } + } + }, + "DeleteColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteConnectionNameList":{ "type":"list", "member":{"shape":"NameString"}, @@ -11649,6 +11868,32 @@ } } }, + "GetColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which to retrieve column statistics.

" + } + } + }, + "GetColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + "ColumnStatisticsTaskSettings":{ + "shape":"ColumnStatisticsTaskSettings", + "documentation":"

A ColumnStatisticsTaskSettings object representing the settings for the column statistics task.

" + } + } + }, "GetConnectionRequest":{ "type":"structure", "required":["Name"], @@ -20710,6 +20955,28 @@ } } }, + "StartColumnStatisticsTaskRunScheduleRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which to start a column statistic task run schedule.

" + } + } + }, + "StartColumnStatisticsTaskRunScheduleResponse":{ + "type":"structure", + "members":{ + } + }, "StartCrawlerRequest":{ "type":"structure", "required":["Name"], @@ -21324,6 +21591,28 @@ "members":{ } }, + "StopColumnStatisticsTaskRunScheduleRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which to stop a column statistic task run schedule.

" + } + } + }, + "StopColumnStatisticsTaskRunScheduleResponse":{ + "type":"structure", + "members":{ + } + }, "StopCrawlerRequest":{ "type":"structure", "required":["Name"], @@ -22801,6 +23090,52 @@ "max":25, "min":0 }, + "UpdateColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The name of the database where the table resides.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table for which to generate column statistics.

" + }, + "Role":{ + "shape":"NameString", + "documentation":"

The role used for running the column statistics.

" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"

A schedule for running the column statistics, specified in CRON syntax.

" + }, + "ColumnNameList":{ + "shape":"ColumnNameList", + "documentation":"

A list of column names for which to run statistics.

" + }, + "SampleSize":{ + "shape":"SampleSizePercentage", + "documentation":"

The percentage of data to sample.

" + }, + "CatalogID":{ + "shape":"NameString", + "documentation":"

The ID of the Data Catalog in which the database resides.

" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"

Name of the security configuration that is used to encrypt CloudWatch logs.

" + } + } + }, + "UpdateColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateConnectionRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 7307b931fe..7310848cdf 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -54,6 +54,19 @@ ], "documentation":"

Associates a trial component with a trial. A trial component can be associated with multiple trials. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.

" }, + "BatchDeleteClusterNodes":{ + "name":"BatchDeleteClusterNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteClusterNodesRequest"}, + "output":{"shape":"BatchDeleteClusterNodesResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes specific nodes within a SageMaker HyperPod cluster. BatchDeleteClusterNodes accepts a cluster name and a list of node IDs.

" + }, "BatchDescribeModelPackage":{ "name":"BatchDescribeModelPackage", "http":{ @@ -3670,7 +3683,7 @@ {"shape":"ResourceNotFound"}, {"shape":"ConflictException"} ], - "documentation":"

Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.

" + "documentation":"

Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.

The UpgradeClusterSoftware API call may impact your SageMaker HyperPod cluster uptime and availability. Plan accordingly to mitigate potential disruptions to your workloads.

" }, "UpdateCodeRepository":{ "name":"UpdateCodeRepository", @@ -6188,6 +6201,73 @@ }, "documentation":"

Configuration to control how SageMaker captures inference data for batch transform jobs.

" }, + "BatchDeleteClusterNodesError":{ + "type":"structure", + "required":[ + "Code", + "Message", + "NodeId" + ], + "members":{ + "Code":{ + "shape":"BatchDeleteClusterNodesErrorCode", + "documentation":"

The error code associated with the error encountered when deleting a node.

The code provides information about the specific issue encountered, such as the node not being found, the node's status being invalid for deletion, or the node ID being in use by another process.

" + }, + "Message":{ + "shape":"String", + "documentation":"

A message describing the error encountered when deleting a node.

" + }, + "NodeId":{ + "shape":"ClusterNodeId", + "documentation":"

The ID of the node that encountered an error during the deletion process.

" + } + }, + "documentation":"

Represents an error encountered when deleting a node from a SageMaker HyperPod cluster.

" + }, + "BatchDeleteClusterNodesErrorCode":{ + "type":"string", + "enum":[ + "NodeIdNotFound", + "InvalidNodeStatus", + "NodeIdInUse" + ] + }, + "BatchDeleteClusterNodesErrorList":{ + "type":"list", + "member":{"shape":"BatchDeleteClusterNodesError"}, + "max":99, + "min":1 + }, + "BatchDeleteClusterNodesRequest":{ + "type":"structure", + "required":[ + "ClusterName", + "NodeIds" + ], + "members":{ + "ClusterName":{ + "shape":"ClusterNameOrArn", + "documentation":"

The name of the SageMaker HyperPod cluster from which to delete the specified nodes.

" + }, + "NodeIds":{ + "shape":"ClusterNodeIds", + "documentation":"

A list of node IDs to be deleted from the specified cluster.

For SageMaker HyperPod clusters using the Slurm workload manager, you cannot remove instances that are configured as Slurm controller nodes.

" + } + } + }, + "BatchDeleteClusterNodesResponse":{ + "type":"structure", + "members":{ + "Failed":{ + "shape":"BatchDeleteClusterNodesErrorList", + "documentation":"

A list of errors encountered when deleting the specified nodes.

" + }, + "Successful":{ + "shape":"ClusterNodeIds", + "documentation":"

A list of node IDs that were successfully deleted from the specified cluster.

" + } + } + }, "BatchDescribeModelPackageError":{ "type":"structure", "required":[ @@ -7439,7 +7519,26 @@ "ml.t3.medium", "ml.t3.large", "ml.t3.xlarge", - "ml.t3.2xlarge" + "ml.t3.2xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.16xlarge", + "ml.g6.12xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.gr6.4xlarge", + "ml.gr6.8xlarge", + "ml.g6e.xlarge", + "ml.g6e.2xlarge", + "ml.g6e.4xlarge", + "ml.g6e.8xlarge", + "ml.g6e.16xlarge", + "ml.g6e.12xlarge", + "ml.g6e.24xlarge", + "ml.g6e.48xlarge", + "ml.p5e.48xlarge" ] }, "ClusterLifeCycleConfig":{ @@ -7533,6 +7632,12 @@ "min":1, "pattern":"^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$" }, + "ClusterNodeIds":{ + "type":"list", + "member":{"shape":"ClusterNodeId"}, + "max":99, + "min":1 + }, "ClusterNodeRecovery":{ "type":"string", "enum":[ @@ -36525,6 +36630,22 @@ "ml.g5.12xlarge", "ml.g5.24xlarge", "ml.g5.48xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.16xlarge", + "ml.g6.12xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.g6e.xlarge", + "ml.g6e.2xlarge", + "ml.g6e.4xlarge", + "ml.g6e.8xlarge", + "ml.g6e.16xlarge", + "ml.g6e.12xlarge", + "ml.g6e.24xlarge", + "ml.g6e.48xlarge", "ml.trn1.2xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge", @@ -36862,6 +36983,10 @@ "shape":"TrainingJobStatus", "documentation":"

The status of the training job.

" }, + "SecondaryStatus":{ + "shape":"SecondaryStatus", + "documentation":"

The secondary status of the training job.

" + }, "WarmPoolStatus":{ "shape":"WarmPoolStatus", "documentation":"

The status of the warm pool associated with the training job.

" diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index da55b75617..70bb197f11 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -6605,6 +6605,10 @@ "shape":"AmazonResourceName", "documentation":"

The Amazon Resource Name (ARN) of the template.

" }, + "TemplateContent":{ + "shape":"EmailTemplateContent", + "documentation":"

The content of the template.

Amazon SES supports only simple substitions when you send email using the SendEmail or SendBulkEmail operations and you provide the full template content in the request.

" + }, "TemplateData":{ "shape":"EmailTemplateData", "documentation":"

An object that defines the values to use for message variables in the template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the value to use for that variable.

" @@ -6614,7 +6618,7 @@ "documentation":"

The list of message headers that will be added to the email message.

" } }, - "documentation":"

An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to define, save, and reuse in email messages that you send.

" + "documentation":"

An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to reuse in email messages that you send. You can specifiy the email template by providing the name or ARN of an email template previously saved in your Amazon SES account or by providing the full template content.

" }, "TemplateContent":{ "type":"string", diff --git a/botocore/signers.py b/botocore/signers.py index 89319af10b..14692d16f4 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -576,11 +576,14 @@ def generate_presigned_post( :type conditions: list :param conditions: A list of conditions to include in the policy. Each element can be either a list or a structure. For example: - [ - {"acl": "public-read"}, - {"bucket": "mybucket"}, - ["starts-with", "$key", "mykey"] - ] + + .. code:: python + + [ + {"acl": "public-read"}, + {"bucket": "amzn-s3-demo-bucket"}, + ["starts-with", "$key", "mykey"] + ] :type expires_in: int :param expires_in: The number of seconds the presigned post is valid @@ -595,12 +598,17 @@ def generate_presigned_post( the form fields and respective values to use when submitting the post. For example: - {'url': 'https://mybucket.s3.amazonaws.com - 'fields': {'acl': 'public-read', + .. code:: python + + { + 'url': 'https://amzn-s3-demo-bucket.s3.amazonaws.com', + 'fields': { + 'acl': 'public-read', 'key': 'mykey', 'signature': 'mysignature', - 'policy': 'mybase64 encoded policy'} - } + 'policy': 'mybase64 encoded policy' + } + } """ if fields is None: fields = {} @@ -751,11 +759,13 @@ def generate_presigned_post( :param Conditions: A list of conditions to include in the policy. Each element can be either a list or a structure. For example: - [ - {"acl": "public-read"}, - ["content-length-range", 2, 5], - ["starts-with", "$success_action_redirect", ""] - ] + .. code:: python + + [ + {"acl": "public-read"}, + ["content-length-range", 2, 5], + ["starts-with", "$success_action_redirect", ""] + ] Conditions that are included may pertain to acl, content-length-range, Cache-Control, Content-Type, @@ -764,7 +774,7 @@ def generate_presigned_post( and/or x-amz-meta-. Note that if you include a condition, you must specify - the a valid value in the fields dictionary as well. A value will + a valid value in the fields dictionary as well. A value will not be added automatically to the fields dictionary based on the conditions. @@ -778,12 +788,17 @@ def generate_presigned_post( the form fields and respective values to use when submitting the post. For example: - {'url': 'https://mybucket.s3.amazonaws.com - 'fields': {'acl': 'public-read', + .. code:: python + + { + 'url': 'https://amzn-s3-demo-bucket.s3.amazonaws.com', + 'fields': { + 'acl': 'public-read', 'key': 'mykey', 'signature': 'mysignature', - 'policy': 'mybase64 encoded policy'} - } + 'policy': 'mybase64 encoded policy' + } + } """ bucket = Bucket key = Key diff --git a/docs/source/client_upgrades.rst b/docs/source/client_upgrades.rst index 74adbe22ea..781df3a221 100644 --- a/docs/source/client_upgrades.rst +++ b/docs/source/client_upgrades.rst @@ -21,7 +21,7 @@ Below is an example of the old interface: s3 = session.get_service('s3') endpoint = s3.get_endpoint('us-west-2') list_objects = s3.get_operation('ListObjects') - http, response = list_objects.call(endpoint, Bucket='mybucket') + http, response = list_objects.call(endpoint, Bucket='amzn-s3-demo-bucket') if http.status_code == 200: print("Contents: %s" % response['Contents]) else: @@ -36,7 +36,7 @@ Here's an example of the newer (preferred) client interface: import botocore.session session = botocore.session.get_session() s3 = session.create_client('s3', 'us-west-2') - response = s3.list_objects(Bucket='mybucket') + response = s3.list_objects(Bucket='amzn-s3-demo-bucket') print("Contents: %s" % response['Contents']) @@ -132,22 +132,22 @@ Use a single client to make multiple API calls. endpoint = service.get_endpoint('us-west-2') operation = service.get_operation('ListObjects') head_object = service.get_operation('HeadObject') - parsed = operation.call(endpoint, Bucket='mybucket')[1] + parsed = operation.call(endpoint, Bucket='amzn-s3-demo-bucket')[1] for obj in parsed['Contents']: name = obj['Key'] # Use existing connection be passing in the same endpoint. - print(head_object.call(endpoint, Bucket='mybucket', Key=name)) + print(head_object.call(endpoint, Bucket='amzn-s3-demo-bucket', Key=name)) **New** .. code-block:: python s3 = session.get_client('s3', 'us-west-2') - for obj in s3.list_objects(Bucket='mybucket')['Contents']: + for obj in s3.list_objects(Bucket='amzn-s3-demo-bucket')['Contents']: name = obj['Key'] # Using the same client will reuse any existing HTTP # connections the client was using. - print(s3.head_object(Bucket='mybucket', Key=name)) + print(s3.head_object(Bucket='amzn-s3-demo-bucket', Key=name)) Operation and Method Names diff --git a/docs/source/conf.py b/docs/source/conf.py index b709ae8733..68a2cf516e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.52' +release = '1.35.53' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/topics/paginators.rst b/docs/source/topics/paginators.rst index fba76b713a..edab8fe68d 100644 --- a/docs/source/topics/paginators.rst +++ b/docs/source/topics/paginators.rst @@ -33,7 +33,7 @@ underlying API operation. The ``paginate`` method then returns an iterable paginator = client.get_paginator('list_objects') # Create a PageIterator from the Paginator - page_iterator = paginator.paginate(Bucket='my-bucket') + page_iterator = paginator.paginate(Bucket='amzn-s3-demo-bucket') for page in page_iterator: print(page['Contents']) @@ -48,7 +48,7 @@ the pages of API operation results. The ``paginate`` method accepts a pagination:: paginator = client.get_paginator('list_objects') - page_iterator = paginator.paginate(Bucket='my-bucket', + page_iterator = paginator.paginate(Bucket='amzn-s3-demo-bucket', PaginationConfig={'MaxItems': 10}) ``MaxItems`` @@ -83,7 +83,7 @@ to the client:: session = botocore.session.get_session() client = session.create_client('s3', region_name='us-west-2') paginator = client.get_paginator('list_objects') - operation_parameters = {'Bucket': 'my-bucket', + operation_parameters = {'Bucket': 'amzn-s3-demo-bucket', 'Prefix': 'foo/baz'} page_iterator = paginator.paginate(**operation_parameters) for page in page_iterator: @@ -101,7 +101,7 @@ JMESPath expressions that are applied to each page of results through the .. code-block:: python paginator = client.get_paginator('list_objects') - page_iterator = paginator.paginate(Bucket='my-bucket') + page_iterator = paginator.paginate(Bucket='amzn-s3-demo-bucket') filtered_iterator = page_iterator.search("Contents[?Size > `100`][]") for key_data in filtered_iterator: print(key_data)