diff --git a/.changes/1.35.53.json b/.changes/1.35.53.json new file mode 100644 index 0000000000..bbb193551a --- /dev/null +++ b/.changes/1.35.53.json @@ -0,0 +1,37 @@ +[ + { + "category": "``amp``", + "description": "Added support for UpdateScraper API, to enable updating collector configuration in-place", + "type": "api-change" + }, + { + "category": "``autoscaling``", + "description": "Adds bake time for Auto Scaling group Instance Refresh", + "type": "api-change" + }, + { + "category": "``batch``", + "description": "Add `podNamespace` to `EksAttemptDetail` and `containerID` to `EksAttemptContainerDetail`.", + "type": "api-change" + }, + { + "category": "``elbv2``", + "description": "Add UDP support for AWS PrivateLink and dual-stack Network Load Balancers", + "type": "api-change" + }, + { + "category": "``glue``", + "description": "Add schedule support for AWS Glue column statistics", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "SageMaker HyperPod adds scale-down at instance level via BatchDeleteClusterNodes API and group level via UpdateCluster API. SageMaker Training exposes secondary job status in TrainingJobSummary from ListTrainingJobs API. SageMaker now supports G6, G6e, P5e instances for HyperPod and Training.", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "This release enables customers to provide the email template content in the SESv2 SendEmail and SendBulkEmail APIs instead of the name or the ARN of a stored email template.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/CHANGELOG.rst b/CHANGELOG.rst index e24b2501c5..9e285de489 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.35.53 +======= + +* api-change:``amp``: Added support for UpdateScraper API, to enable updating collector configuration in-place +* api-change:``autoscaling``: Adds bake time for Auto Scaling group Instance Refresh +* api-change:``batch``: Add `podNamespace` to `EksAttemptDetail` and `containerID` to `EksAttemptContainerDetail`. +* api-change:``elbv2``: Add UDP support for AWS PrivateLink and dual-stack Network Load Balancers +* api-change:``glue``: Add schedule support for AWS Glue column statistics +* api-change:``sagemaker``: SageMaker HyperPod adds scale-down at instance level via BatchDeleteClusterNodes API and group level via UpdateCluster API. SageMaker Training exposes secondary job status in TrainingJobSummary from ListTrainingJobs API. SageMaker now supports G6, G6e, P5e instances for HyperPod and Training. +* api-change:``sesv2``: This release enables customers to provide the email template content in the SESv2 SendEmail and SendBulkEmail APIs instead of the name or the ARN of a stored email template. + + 1.35.52 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index 52d5707539..ef4a32cdcb 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.35.52' +__version__ = '1.35.53' class NullHandler(logging.Handler): diff --git a/botocore/config.py b/botocore/config.py index 587dc95ad8..eee55bb06d 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -115,10 +115,10 @@ class Config: * ``virtual`` -- Addressing style is always virtual. The name of the bucket must be DNS compatible or an exception will be thrown. - Endpoints will be addressed as such: ``mybucket.s3.amazonaws.com`` + Endpoints will be addressed as such: ``amzn-s3-demo-bucket.s3.amazonaws.com`` * ``path`` -- Addressing style is always by path. Endpoints will be - addressed as such: ``s3.amazonaws.com/mybucket`` + addressed as such: ``s3.amazonaws.com/amzn-s3-demo-bucket`` * ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use when the region is configured to be us-east-1. Values must be a diff --git a/botocore/data/amp/2020-08-01/service-2.json b/botocore/data/amp/2020-08-01/service-2.json index fb83866421..2ba81375e7 100644 --- a/botocore/data/amp/2020-08-01/service-2.json +++ b/botocore/data/amp/2020-08-01/service-2.json @@ -5,11 +5,13 @@ "endpointPrefix":"aps", "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceFullName":"Amazon Prometheus Service", "serviceId":"amp", "signatureVersion":"v4", "signingName":"aps", - "uid":"amp-2020-08-01" + "uid":"amp-2020-08-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "CreateAlertManagerDefinition":{ @@ -90,7 +92,7 @@ {"shape":"InternalServerException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"
The CreateScraper
operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. You can configure the scraper to control what metrics are collected, and what transformations are applied prior to sending them to your workspace.
If needed, an IAM role will be created for you that gives Amazon Managed Service for Prometheus access to the metrics in your cluster. For more information, see Using roles for scraping metrics from EKS in the Amazon Managed Service for Prometheus User Guide.
You cannot update a scraper. If you want to change the configuration of the scraper, create a new scraper and delete the old one.
The scrapeConfiguration
parameter contains the base64-encoded version of the YAML configuration file.
For more information about collectors, including what metrics are collected, and how to configure the scraper, see Amazon Web Services managed collectors in the Amazon Managed Service for Prometheus User Guide.
The CreateScraper
operation creates a scraper to collect metrics. A scraper pulls metrics from Prometheus-compatible sources within an Amazon EKS cluster, and sends them to your Amazon Managed Service for Prometheus workspace. Scrapers are flexible, and can be configured to control what metrics are collected, the frequency of collection, what transformations are applied to the metrics, and more.
An IAM role will be created for you that Amazon Managed Service for Prometheus uses to access the metrics in your cluster. You must configure this role with a policy that allows it to scrape metrics from your cluster. For more information, see Configuring your Amazon EKS cluster in the Amazon Managed Service for Prometheus User Guide.
The scrapeConfiguration
parameter contains the base-64 encoded YAML configuration for the scraper.
For more information about collectors, including what metrics are collected, and how to configure the scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.
The ListTagsForResource
operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are workspaces and rule groups namespaces.
The ListTagsForResource
operation returns the tags that are associated with an Amazon Managed Service for Prometheus resource. Currently, the only resources that can be tagged are scrapers, workspaces, and rule groups namespaces.
The TagResource
operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.
If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag.
" + "documentation":"The TagResource
operation associates tags with an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces.
If you specify a new tag key for the resource, this tag is appended to the list of tags associated with the resource. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces the previous value for that tag. To remove a tag, use UntagResource
.
Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are workspaces and rule groups namespaces.
", + "documentation":"Removes the specified tags from an Amazon Managed Service for Prometheus resource. The only resources that can be tagged are rule groups namespaces, scrapers, and workspaces.
", "idempotent":true }, "UpdateLoggingConfiguration":{ @@ -481,6 +483,27 @@ "documentation":"Updates the log group ARN or the workspace ID of the current logging configuration.
", "idempotent":true }, + "UpdateScraper":{ + "name":"UpdateScraper", + "http":{ + "method":"PUT", + "requestUri":"/scrapers/{scraperId}", + "responseCode":202 + }, + "input":{"shape":"UpdateScraperRequest"}, + "output":{"shape":"UpdateScraperResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"Updates an existing scraper.
You can't use this function to update the source from which the scraper is collecting metrics. To change the source, delete the scraper and create a new one.
", + "idempotent":true + }, "UpdateWorkspaceAlias":{ "name":"UpdateWorkspaceAlias", "http":{ @@ -549,7 +572,7 @@ "documentation":"A structure that displays the current status of the alert manager definition..
" } }, - "documentation":"The details of an alert manager definition.
" + "documentation":"The details of an alert manager definition. It is the configuration for the alert manager, including information about receivers for routing alerts.
" }, "AlertManagerDefinitionStatus":{ "type":"structure", @@ -673,7 +696,7 @@ }, "logGroupArn":{ "shape":"LogGroupArn", - "documentation":"The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this API.
" + "documentation":"The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation.
" }, "workspaceId":{ "shape":"WorkspaceId", @@ -766,7 +789,7 @@ "members":{ "alias":{ "shape":"ScraperAlias", - "documentation":"(optional) a name to associate with the scraper. This is for your use, and does not need to be unique.
" + "documentation":"(optional) An alias to associate with the scraper. This is for your use, and does not need to be unique.
" }, "clientToken":{ "shape":"IdempotencyToken", @@ -779,7 +802,7 @@ }, "scrapeConfiguration":{ "shape":"ScrapeConfiguration", - "documentation":"The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.
" + "documentation":"The configuration file to use in the new scraper. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.
" }, "source":{ "shape":"Source", @@ -1134,7 +1157,7 @@ "members":{ "ampConfiguration":{ "shape":"AmpConfiguration", - "documentation":"The Amazon Managed Service for Prometheusworkspace to send metrics to.
" + "documentation":"The Amazon Managed Service for Prometheus workspace to send metrics to.
" } }, "documentation":"Where to send the metrics from a scraper.
", @@ -1343,7 +1366,7 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"The ARN of the resource to list tages for. Must be a workspace or rule groups namespace resource.
", + "documentation":"The ARN of the resource to list tages for. Must be a workspace, scraper, or rule groups namespace resource.
", "location":"uri", "locationName":"resourceArn" } @@ -1438,7 +1461,7 @@ "documentation":"The ID of the workspace the logging configuration is for.
" } }, - "documentation":"Contains information about the logging configuration.
" + "documentation":"Contains information about the logging configuration for the workspace.
" }, "LoggingConfigurationStatus":{ "type":"structure", @@ -1617,7 +1640,7 @@ "members":{ "arn":{ "shape":"RuleGroupsNamespaceArn", - "documentation":"The ARN of the rule groups namespace.
" + "documentation":"The ARN of the rule groups namespace. For example, arn:aws:aps:<region>:123456789012:rulegroupsnamespace/ws-example1-1234-abcd-5678-ef90abcd1234/rulesfile1
.
The base 64 encoded scrape configuration file.
" } }, - "documentation":"A scrape configuration for a scraper, base 64 encoded. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.
", + "documentation":"A scrape configuration for a scraper, base 64 encoded. For more information, see Scraper configuration in the Amazon Managed Service for Prometheus User Guide.
", "union":true }, "ScraperAlias":{ "type":"string", - "documentation":"A user-assigned scraper alias.
", + "documentation":"An optional user-assigned scraper alias.
", "max":100, "min":1, "pattern":"^[0-9A-Za-z][-.0-9A-Z_a-z]*$" @@ -1764,7 +1787,7 @@ }, "arn":{ "shape":"ScraperArn", - "documentation":"The Amazon Resource Name (ARN) of the scraper.
" + "documentation":"The Amazon Resource Name (ARN) of the scraper. For example, arn:aws:aps:<region>:123456798012:scraper/s-example1-1234-abcd-5678-ef9012abcd34
.
The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.
" + "documentation":"The Amazon Resource Name (ARN) of the IAM role that provides permissions for the scraper to discover and collect metrics on your behalf.
For example, arn:aws:iam::123456789012:role/service-role/AmazonGrafanaServiceRole-12example
.
The configuration file in use by the scraper.
" + "documentation":"The configuration in use by the scraper.
" }, "scraperId":{ "shape":"ScraperId", - "documentation":"The ID of the scraper.
" + "documentation":"The ID of the scraper. For example, s-example1-1234-abcd-5678-ef9012abcd34
.
State of a scraper.
", "enum":[ "CREATING", + "UPDATING", "ACTIVE", "DELETING", "CREATION_FAILED", + "UPDATE_FAILED", "DELETION_FAILED" ] }, @@ -2015,13 +2040,13 @@ "type":"map", "key":{ "shape":"TagKey", - "documentation":"The key of the tag. May not begin with aws:
.
The key of the tag. Must not begin with aws:
.
The value of the tag.
" }, - "documentation":"The list of tags assigned to the resource.
", + "documentation":"A tag associated with a resource.
", "max":50, "min":0 }, @@ -2034,13 +2059,13 @@ "members":{ "resourceArn":{ "shape":"String", - "documentation":"The ARN of the workspace or rule groups namespace to apply tags to.
", + "documentation":"The ARN of the resource to apply tags to.
", "location":"uri", "locationName":"resourceArn" }, "tags":{ "shape":"TagMap", - "documentation":"The list of tag keys and values to associate with the resource.
Keys may not begin with aws:
.
The list of tag keys and values to associate with the resource.
Keys must not begin with aws:
.
The ARN of the workspace or rule groups namespace.
", + "documentation":"The ARN of the resource from which to remove a tag.
", "location":"uri", "locationName":"resourceArn" }, @@ -2149,6 +2174,61 @@ }, "documentation":"Represents the output of an UpdateLoggingConfiguration
operation.
The new alias of the scraper.
" + }, + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"A unique identifier that you can provide to ensure the idempotency of the request. Case-sensitive.
", + "idempotencyToken":true + }, + "destination":{ + "shape":"Destination", + "documentation":"The new Amazon Managed Service for Prometheus workspace to send metrics to.
" + }, + "scrapeConfiguration":{ + "shape":"ScrapeConfiguration", + "documentation":"Contains the base-64 encoded YAML configuration for the scraper.
For more information about configuring a scraper, see Using an Amazon Web Services managed collector in the Amazon Managed Service for Prometheus User Guide.
The ID of the scraper to update.
", + "location":"uri", + "locationName":"scraperId" + } + } + }, + "UpdateScraperResponse":{ + "type":"structure", + "required":[ + "arn", + "scraperId", + "status" + ], + "members":{ + "arn":{ + "shape":"ScraperArn", + "documentation":"The Amazon Resource Name (ARN) of the updated scraper.
" + }, + "scraperId":{ + "shape":"ScraperId", + "documentation":"The ID of the updated scraper.
" + }, + "status":{ + "shape":"ScraperStatus", + "documentation":"A structure that displays the current status of the scraper.
" + }, + "tags":{ + "shape":"TagMap", + "documentation":"The list of tag keys and values that are associated with the scraper.
" + } + } + }, "UpdateWorkspaceAliasRequest":{ "type":"structure", "required":["workspaceId"], @@ -2258,11 +2338,11 @@ "members":{ "alias":{ "shape":"WorkspaceAlias", - "documentation":"The alias that is assigned to this workspace to help identify it. It may not be unique.
" + "documentation":"The alias that is assigned to this workspace to help identify it. It does not need to be unique.
" }, "arn":{ "shape":"WorkspaceArn", - "documentation":"The ARN of the workspace.
" + "documentation":"The ARN of the workspace. For example, arn:aws:aps:<region>:123456789012:workspace/ws-example1-1234-abcd-5678-ef90abcd1234
.
The Prometheus endpoint available for this workspace.
" + "documentation":"The Prometheus endpoint available for this workspace. For example, https://aps-workspaces.<region>.amazonaws.com/workspaces/ws-example1-1234-abcd-5678-ef90abcd1234/api/v1/
.
The unique ID for the workspace.
" + "documentation":"The unique ID for the workspace. For example, ws-example1-1234-abcd-5678-ef90abcd1234
.
The full details about one Amazon Managed Service for Prometheus workspace in your account.
" @@ -2331,7 +2411,7 @@ "members":{ "alias":{ "shape":"WorkspaceAlias", - "documentation":"The alias that is assigned to this workspace to help identify it. It may not be unique.
" + "documentation":"The alias that is assigned to this workspace to help identify it. It does not need to be unique.
" }, "arn":{ "shape":"WorkspaceArn", diff --git a/botocore/data/autoscaling/2011-01-01/service-2.json b/botocore/data/autoscaling/2011-01-01/service-2.json index 04b3760248..63edfab60a 100644 --- a/botocore/data/autoscaling/2011-01-01/service-2.json +++ b/botocore/data/autoscaling/2011-01-01/service-2.json @@ -1656,6 +1656,11 @@ "type":"list", "member":{"shape":"XmlStringMaxLen255"} }, + "BakeTime":{ + "type":"integer", + "max":172800, + "min":0 + }, "BareMetal":{ "type":"string", "enum":[ @@ -3174,7 +3179,7 @@ }, "Status":{ "shape":"InstanceRefreshStatus", - "documentation":"The current status for the instance refresh operation:
Pending
- The request was created, but the instance refresh has not started.
InProgress
- An instance refresh is in progress.
Successful
- An instance refresh completed successfully.
Failed
- An instance refresh failed to complete. You can troubleshoot using the status reason and the scaling activities.
Cancelling
- An ongoing instance refresh is being cancelled.
Cancelled
- The instance refresh is cancelled.
RollbackInProgress
- An instance refresh is being rolled back.
RollbackFailed
- The rollback failed to complete. You can troubleshoot using the status reason and the scaling activities.
RollbackSuccessful
- The rollback completed successfully.
The current status for the instance refresh operation:
Pending
- The request was created, but the instance refresh has not started.
InProgress
- An instance refresh is in progress.
Successful
- An instance refresh completed successfully.
Failed
- An instance refresh failed to complete. You can troubleshoot using the status reason and the scaling activities.
Cancelling
- An ongoing instance refresh is being cancelled.
Cancelled
- The instance refresh is cancelled.
RollbackInProgress
- An instance refresh is being rolled back.
RollbackFailed
- The rollback failed to complete. You can troubleshoot using the status reason and the scaling activities.
RollbackSuccessful
- The rollback completed successfully.
Baking
- Waiting the specified bake time after an instance refresh has finished updating instances.
Specifies the maximum percentage of the group that can be in service and healthy, or pending, to support your workload when replacing instances. The value is expressed as a percentage of the desired capacity of the Auto Scaling group. Value range is 100 to 200.
If you specify MaxHealthyPercentage
, you must also specify MinHealthyPercentage
, and the difference between them cannot be greater than 100. A larger range increases the number of instances that can be replaced at the same time.
If you do not specify this property, the default is 100 percent, or the percentage set in the instance maintenance policy for the Auto Scaling group, if defined.
" + }, + "BakeTime":{ + "shape":"BakeTime", + "documentation":"The amount of time, in seconds, to wait at the end of an instance refresh before the instance refresh is considered complete.
" } }, "documentation":"Describes the preferences for an instance refresh.
" @@ -5262,7 +5272,7 @@ }, "Preferences":{ "shape":"RefreshPreferences", - "documentation":"Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby
state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
Sets your preferences for the instance refresh so that it performs as expected when you start it. Includes the instance warmup time, the minimum and maximum healthy percentages, and the behaviors that you want Amazon EC2 Auto Scaling to use if instances that are in Standby
state or protected from scale in are found. You can also choose to enable additional features, such as the following:
Auto rollback
Checkpoints
CloudWatch alarms
Skip matching
Bake time
A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.
" + "documentation":"A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs.
This parameter has as limit of 1024 characters.
" } }, "documentation":"Contains the parameters for CancelJob
.
The Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter is specified, the job queue uses a fair share scheduling policy. If this parameter isn't specified, the job queue uses a first in, first out (FIFO) scheduling policy. After a job queue is created, you can replace but can't remove the fair share scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name
. An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy
.
The Amazon Resource Name (ARN) of the fair share scheduling policy. Job queues that don't have a scheduling policy are scheduled in a first-in, first-out (FIFO) model. After a job queue has a scheduling policy, it can be replaced but can't be removed.
The format is aws:Partition:batch:Region:Account:scheduling-policy/Name
.
An example is aws:aws:batch:us-west-2:123456789012:scheduling-policy/MySchedulingPolicy
.
A job queue without a scheduling policy is scheduled as a FIFO job queue and can't have a scheduling policy added. Jobs queues with a scheduling policy can have a maximum of 500 active fair share identifiers. When the limit has been reached, submissions of any jobs that add a new fair share identifier fail.
" }, "priority":{ "shape":"Integer", @@ -1235,7 +1235,7 @@ }, "jobStateTimeLimitActions":{ "shape":"JobStateTimeLimitActions", - "documentation":"The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds
has passed.
The set of actions that Batch performs on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds
has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)
Contains the parameters for CreateJobQueue
.
An object that contains the properties for the Amazon ECS task definition of a job.
This object is currently limited to one element.
An object that contains the properties for the Amazon ECS task definition of a job.
This object is currently limited to one task element. However, the task element can run up to 10 containers.
An object that contains the properties for the Amazon ECS resources of a job.
" @@ -1749,6 +1749,10 @@ "shape":"String", "documentation":"The name of a container.
" }, + "containerID":{ + "shape":"String", + "documentation":"The ID for the container.
" + }, "exitCode":{ "shape":"Integer", "documentation":"The exit code returned for the job attempt. A non-zero exit code is considered failed.
" @@ -1783,6 +1787,10 @@ "shape":"String", "documentation":"The name of the pod for this job attempt.
" }, + "podNamespace":{ + "shape":"String", + "documentation":"The namespace of the Amazon EKS cluster that the pod exists in.
" + }, "nodeName":{ "shape":"String", "documentation":"The name of the node for this job attempt.
" @@ -2109,11 +2117,11 @@ }, "containers":{ "shape":"EksContainers", - "documentation":"The properties of the container that's used on the Amazon EKS pod.
" + "documentation":"The properties of the container that's used on the Amazon EKS pod.
This object is limited to 10 elements.
These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.
This object is limited to 10 elements
These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.
This object is limited to 10 elements.
The overrides for the conatainers defined in the Amazon EKS pod. These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.
This object is limited to 10 elements
The overrides for the initContainers
defined in the Amazon EKS pod. These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.
A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.
" + "documentation":"A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. It is also recorded in the Batch activity logs.
This parameter has as limit of 1024 characters.
" } }, "documentation":"Contains the parameters for TerminateJob
.
The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds
has passed.
The set of actions that Batch perform on jobs that remain at the head of the job queue in the specified state longer than specified times. Batch will perform each action after maxTimeSeconds
has passed. (Note: The minimum value for maxTimeSeconds is 600 (10 minutes) and its maximum value is 86,400 (24 hours).)
Contains the parameters for UpdateJobQueue
.
Registers the specified targets with the specified target group.
If the target is an EC2 instance, it must be in the running
state when you register it.
By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.
With a Network Load Balancer, you cannot register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.
" + "documentation":"Registers the specified targets with the specified target group.
If the target is an EC2 instance, it must be in the running
state when you register it.
By default, the load balancer routes requests to registered targets using the protocol and port for the target group. Alternatively, you can override the port for a target when you register it. You can register each EC2 instance or IP address with the same target group multiple times using different ports.
With a Network Load Balancer, you can't register instances by instance ID if they have the following instance types: C1, CC1, CC2, CG1, CG2, CR1, CS1, G1, G2, HI1, HS1, M1, M2, M3, and T1. You can register instances of these types by IP address.
" }, "RemoveListenerCertificates":{ "name":"RemoveListenerCertificates", @@ -1287,6 +1287,10 @@ "LoadBalancerAddresses":{ "shape":"LoadBalancerAddresses", "documentation":"[Network Load Balancers] If you need static IP addresses for your load balancer, you can specify one Elastic IP address per Availability Zone when you create an internal-facing load balancer. For internal load balancers, you can specify a private IP address from the IPv4 range of the subnet.
" + }, + "SourceNatIpv6Prefixes":{ + "shape":"SourceNatIpv6Prefixes", + "documentation":"[Network Load Balancers with UDP listeners] The IPv6 prefixes to use for source NAT. For each subnet, specify an IPv6 prefix (/80 netmask) from the subnet CIDR block or auto_assigned
to use an IPv6 prefix selected at random from the subnet CIDR block.
Information about an Availability Zone.
" @@ -1388,11 +1392,11 @@ }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can’t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.
" + "documentation":"The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can’t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.
" }, "Port":{ "shape":"Port", - "documentation":"The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.
" + "documentation":"The port on which the load balancer is listening. You can't specify a port for a Gateway Load Balancer.
" }, "SslPolicy":{ "shape":"SslPolicyName", @@ -1439,11 +1443,11 @@ }, "Subnets":{ "shape":"Subnets", - "documentation":"The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
" + "documentation":"The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both. To specify an Elastic IP address, specify subnet mappings instead of subnets.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You cannot specify Elastic IP addresses for your subnets.
" + "documentation":"The IDs of the subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings, but not both.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones. You can't specify Elastic IP addresses for your subnets.
" }, "SecurityGroups":{ "shape":"SecurityGroups", @@ -1451,7 +1455,7 @@ }, "Scheme":{ "shape":"LoadBalancerSchemeEnum", - "documentation":"The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.
The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.
The default is an Internet-facing load balancer.
You cannot specify a scheme for a Gateway Load Balancer.
" + "documentation":"The nodes of an Internet-facing load balancer have public IP addresses. The DNS name of an Internet-facing load balancer is publicly resolvable to the public IP addresses of the nodes. Therefore, Internet-facing load balancers can route requests from clients over the internet.
The nodes of an internal load balancer have only private IP addresses. The DNS name of an internal load balancer is publicly resolvable to the private IP addresses of the nodes. Therefore, internal load balancers can route requests only from clients with access to the VPC for the load balancer.
The default is an Internet-facing load balancer.
You can't specify a scheme for a Gateway Load Balancer.
" }, "Tags":{ "shape":"TagList", @@ -1463,11 +1467,15 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"Note: Internal load balancers must use the ipv4
IP address type.
[Application Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses), dualstack
(for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(for IPv6 only public addresses, with private IPv4 and IPv6 addresses).
[Network Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses). You can’t specify dualstack
for a load balancer with a UDP or TCP_UDP listener.
[Gateway Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses).
The IP address type. Internal load balancers must use ipv4
.
[Application Load Balancers] The possible values are ipv4
(IPv4 addresses), dualstack
(IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(public IPv6 addresses and private IPv4 and IPv6 addresses).
[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4
(IPv4 addresses) and dualstack
(IPv4 and IPv6 addresses).
[Application Load Balancers on Outposts] The ID of the customer-owned address pool (CoIP pool).
" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack
. The default value is off
.
Indicates whether health checks are enabled. If the target type is lambda
, health checks are disabled by default but can be enabled. If the target type is instance
, ip
, or alb
, health checks are always enabled and cannot be disabled.
Indicates whether health checks are enabled. If the target type is lambda
, health checks are disabled by default but can be enabled. If the target type is instance
, ip
, or alb
, health checks are always enabled and can't be disabled.
The type of IP address used for this target group. The possible values are ipv4
and ipv6
. This is an optional parameter. If not specified, the IP address type defaults to ipv4
.
The IP address type. The default value is ipv4
.
The name of the trust store.
This name must be unique per region and cannot be changed after creation.
" + "documentation":"The name of the trust store.
This name must be unique per region and can't be changed after creation.
" }, "CaCertificatesBundleS3Bucket":{ "shape":"S3Bucket", @@ -1654,7 +1662,7 @@ "type":"structure", "members":{ }, - "documentation":"The specified association cannot be within the same account.
", + "documentation":"The specified association can't be within the same account.
", "error":{ "code":"DeleteAssociationSameAccount", "httpStatusCode":400, @@ -2309,6 +2317,13 @@ }, "exception":true }, + "EnablePrefixForIpv6SourceNatEnum":{ + "type":"string", + "enum":[ + "on", + "off" + ] + }, "EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic":{"type":"string"}, "EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum":{ "type":"string", @@ -2775,7 +2790,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"[Application Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4
(for only IPv4 addresses), dualstack
(for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(for IPv6 only public addresses, with private IPv4 and IPv6 addresses).
[Network Load Balancers and Gateway Load Balancers] The type of IP addresses used for public or private connections by the subnets attached to your load balancer. The possible values are ipv4
(for only IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses).
The type of IP addresses used for public or private connections by the subnets attached to your load balancer.
[Application Load Balancers] The possible values are ipv4
(IPv4 addresses), dualstack
(IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(public IPv6 addresses and private IPv4 and IPv6 addresses).
[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4
(IPv4 addresses) and dualstack
(IPv4 and IPv6 addresses).
Indicates whether to evaluate inbound security group rules for traffic sent to a Network Load Balancer through Amazon Web Services PrivateLink.
" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack
. The default value is off
.
Information about a load balancer.
" @@ -2824,7 +2843,7 @@ "members":{ "Key":{ "shape":"LoadBalancerAttributeKey", - "documentation":"The name of the attribute.
The following attributes are supported by all load balancers:
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The possible values are true
and false
. The default for Network Load Balancers and Gateway Load Balancers is false
. The default for Application Load Balancers is true
, and cannot be changed.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
ipv6.deny_all_igw_traffic
- Blocks internet gateway (IGW) access to the load balancer. It is set to false
for internet-facing load balancers and true
for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
client_keep_alive.seconds
- The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.
connection_logs.s3.enabled
- Indicates whether connection logs are enabled. The value is true
or false
. The default is false
.
connection_logs.s3.bucket
- The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
connection_logs.s3.prefix
- The prefix for the location in the S3 bucket for the connection logs.
routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
routing.http.preserve_host_header.enabled
- Indicates whether the Application Load Balancer should preserve the Host
header in the HTTP request and send it to the target without any change. The possible values are true
and false
. The default is false
.
routing.http.x_amzn_tls_version_and_cipher_suite.enabled
- Indicates whether the two headers (x-amzn-tls-version
and x-amzn-tls-cipher-suite
), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version
header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite
header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true
and false
. The default is false
.
routing.http.xff_client_port.enabled
- Indicates whether the X-Forwarded-For
header should preserve the source port that the client used to connect to the load balancer. The possible values are true
and false
. The default is false
.
routing.http.xff_header_processing.mode
- Enables you to modify, preserve, or remove the X-Forwarded-For
header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append
, preserve
, and remove
. The default is append
.
If the value is append
, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For
header in the HTTP request before it sends it to targets.
If the value is preserve
the Application Load Balancer preserves the X-Forwarded-For
header in the HTTP request, and sends it to targets without any change.
If the value is remove
, the Application Load Balancer removes the X-Forwarded-For
header in the HTTP request before it sends it to targets.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The possible values are true
and false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
waf.fail_open.enabled
- Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true
and false
. The default is false
.
The following attributes are supported by only Network Load Balancers:
dns_record.client_routing_policy
- Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity
with 100 percent zonal affinity, partial_availability_zone_affinity
with 85 percent zonal affinity, and any_availability_zone
with 0 percent zonal affinity.
zonal_shift.config.enabled
- Indicates whether zonal shift is enabled. The possible values are true
and false
. The default is false
.
The name of the attribute.
The following attributes are supported by all load balancers:
deletion_protection.enabled
- Indicates whether deletion protection is enabled. The value is true
or false
. The default is false
.
load_balancing.cross_zone.enabled
- Indicates whether cross-zone load balancing is enabled. The possible values are true
and false
. The default for Network Load Balancers and Gateway Load Balancers is false
. The default for Application Load Balancers is true
, and can't be changed.
The following attributes are supported by both Application Load Balancers and Network Load Balancers:
access_logs.s3.enabled
- Indicates whether access logs are enabled. The value is true
or false
. The default is false
.
access_logs.s3.bucket
- The name of the S3 bucket for the access logs. This attribute is required if access logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
access_logs.s3.prefix
- The prefix for the location in the S3 bucket for the access logs.
ipv6.deny_all_igw_traffic
- Blocks internet gateway (IGW) access to the load balancer. It is set to false
for internet-facing load balancers and true
for internal load balancers, preventing unintended access to your internal load balancer through an internet gateway.
The following attributes are supported by only Application Load Balancers:
idle_timeout.timeout_seconds
- The idle timeout value, in seconds. The valid range is 1-4000 seconds. The default is 60 seconds.
client_keep_alive.seconds
- The client keep alive value, in seconds. The valid range is 60-604800 seconds. The default is 3600 seconds.
connection_logs.s3.enabled
- Indicates whether connection logs are enabled. The value is true
or false
. The default is false
.
connection_logs.s3.bucket
- The name of the S3 bucket for the connection logs. This attribute is required if connection logs are enabled. The bucket must exist in the same region as the load balancer and have a bucket policy that grants Elastic Load Balancing permissions to write to the bucket.
connection_logs.s3.prefix
- The prefix for the location in the S3 bucket for the connection logs.
routing.http.desync_mitigation_mode
- Determines how the load balancer handles requests that might pose a security risk to your application. The possible values are monitor
, defensive
, and strictest
. The default is defensive
.
routing.http.drop_invalid_header_fields.enabled
- Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true
) or routed to targets (false
). The default is false
.
routing.http.preserve_host_header.enabled
- Indicates whether the Application Load Balancer should preserve the Host
header in the HTTP request and send it to the target without any change. The possible values are true
and false
. The default is false
.
routing.http.x_amzn_tls_version_and_cipher_suite.enabled
- Indicates whether the two headers (x-amzn-tls-version
and x-amzn-tls-cipher-suite
), which contain information about the negotiated TLS version and cipher suite, are added to the client request before sending it to the target. The x-amzn-tls-version
header has information about the TLS protocol version negotiated with the client, and the x-amzn-tls-cipher-suite
header has information about the cipher suite negotiated with the client. Both headers are in OpenSSL format. The possible values for the attribute are true
and false
. The default is false
.
routing.http.xff_client_port.enabled
- Indicates whether the X-Forwarded-For
header should preserve the source port that the client used to connect to the load balancer. The possible values are true
and false
. The default is false
.
routing.http.xff_header_processing.mode
- Enables you to modify, preserve, or remove the X-Forwarded-For
header in the HTTP request before the Application Load Balancer sends the request to the target. The possible values are append
, preserve
, and remove
. The default is append
.
If the value is append
, the Application Load Balancer adds the client IP address (of the last hop) to the X-Forwarded-For
header in the HTTP request before it sends it to targets.
If the value is preserve
the Application Load Balancer preserves the X-Forwarded-For
header in the HTTP request, and sends it to targets without any change.
If the value is remove
, the Application Load Balancer removes the X-Forwarded-For
header in the HTTP request before it sends it to targets.
routing.http2.enabled
- Indicates whether HTTP/2 is enabled. The possible values are true
and false
. The default is true
. Elastic Load Balancing requires that message header names contain only alphanumeric characters and hyphens.
waf.fail_open.enabled
- Indicates whether to allow a WAF-enabled load balancer to route requests to targets if it is unable to forward the request to Amazon Web Services WAF. The possible values are true
and false
. The default is false
.
The following attributes are supported by only Network Load Balancers:
dns_record.client_routing_policy
- Indicates how traffic is distributed among the load balancer Availability Zones. The possible values are availability_zone_affinity
with 100 percent zonal affinity, partial_availability_zone_affinity
with 85 percent zonal affinity, and any_availability_zone
with 0 percent zonal affinity.
zonal_shift.config.enabled
- Indicates whether zonal shift is enabled. The possible values are true
and false
. The default is false
.
The port for connections from clients to the load balancer. You cannot specify a port for a Gateway Load Balancer.
" + "documentation":"The port for connections from clients to the load balancer. You can't specify a port for a Gateway Load Balancer.
" }, "Protocol":{ "shape":"ProtocolEnum", - "documentation":"The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.
" + "documentation":"The protocol for connections from clients to the load balancer. Application Load Balancers support the HTTP and HTTPS protocols. Network Load Balancers support the TCP, TLS, UDP, and TCP_UDP protocols. You can’t change the protocol to UDP or TCP_UDP if dual-stack mode is enabled. You can't specify a protocol for a Gateway Load Balancer.
" }, "SslPolicy":{ "shape":"SslPolicyName", @@ -3300,7 +3319,7 @@ "members":{ "Protocol":{ "shape":"RedirectActionProtocol", - "documentation":"The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You cannot redirect HTTPS to HTTP.
" + "documentation":"The protocol. You can specify HTTP, HTTPS, or #{protocol}. You can redirect HTTP to HTTP, HTTP to HTTPS, and HTTPS to HTTPS. You can't redirect HTTPS to HTTP.
" }, "Port":{ "shape":"RedirectActionPort", @@ -3595,7 +3614,7 @@ "documentation":"Information for a source IP condition. Specify only when Field
is source-ip
.
Information about a condition for a rule.
Each rule can optionally include up to one of each of the following conditions: http-request-method
, host-header
, path-pattern
, and source-ip
. Each rule can also optionally include one or more of each of the following conditions: http-header
and query-string
. Note that the value for a condition cannot be empty.
For more information, see Quotas for your Application Load Balancers.
" + "documentation":"Information about a condition for a rule.
Each rule can optionally include up to one of each of the following conditions: http-request-method
, host-header
, path-pattern
, and source-ip
. Each rule can also optionally include one or more of each of the following conditions: http-header
and query-string
. Note that the value for a condition can't be empty.
For more information, see Quotas for your Application Load Balancers.
" }, "RuleConditionList":{ "type":"list", @@ -3673,7 +3692,7 @@ }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"Note: Internal load balancers must use the ipv4
IP address type.
[Application Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses), dualstack
(for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(for IPv6 only public addresses, with private IPv4 and IPv6 addresses).
Note: Application Load Balancer authentication only supports IPv4 addresses when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer cannot complete the authentication process, resulting in HTTP 500 errors.
[Network Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses). You can’t specify dualstack
for a load balancer with a UDP or TCP_UDP listener.
[Gateway Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses).
The IP address type. Internal load balancers must use ipv4
.
[Application Load Balancers] The possible values are ipv4
(IPv4 addresses), dualstack
(IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(public IPv6 addresses and private IPv4 and IPv6 addresses).
Application Load Balancer authentication supports IPv4 addresses only when connecting to an Identity Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer can't complete the authentication process, resulting in HTTP 500 errors.
[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4
(IPv4 addresses) and dualstack
(IPv4 and IPv6 addresses).
The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
" + "documentation":"The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.
[Application Load Balancers] You must specify subnets from at least two Availability Zones.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers and Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
" }, "SubnetMappings":{ "shape":"SubnetMappings", - "documentation":"The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You cannot specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
" + "documentation":"The IDs of the public subnets. You can specify only one subnet per Availability Zone. You must specify either subnets or subnet mappings.
[Application Load Balancers] You must specify subnets from at least two Availability Zones. You can't specify Elastic IP addresses for your subnets.
[Application Load Balancers on Outposts] You must specify one Outpost subnet.
[Application Load Balancers on Local Zones] You can specify subnets from one or more Local Zones.
[Network Load Balancers] You can specify subnets from one or more Availability Zones. You can specify one Elastic IP address per subnet if you need static IP addresses for your internet-facing load balancer. For internal load balancers, you can specify one private IP address per subnet from the IPv4 range of the subnet. For internet-facing load balancer, you can specify one IPv6 address per subnet.
[Gateway Load Balancers] You can specify subnets from one or more Availability Zones.
" }, "IpAddressType":{ "shape":"IpAddressType", - "documentation":"[Application Load Balancers] The IP address type. The possible values are ipv4
(for only IPv4 addresses), dualstack
(for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(for IPv6 only public addresses, with private IPv4 and IPv6 addresses).
[Network Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4
(for IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses). You can’t specify dualstack
for a load balancer with a UDP or TCP_UDP listener.
[Gateway Load Balancers] The type of IP addresses used by the subnets for your load balancer. The possible values are ipv4
(for IPv4 addresses) and dualstack
(for IPv4 and IPv6 addresses).
The IP address type.
[Application Load Balancers] The possible values are ipv4
(IPv4 addresses), dualstack
(IPv4 and IPv6 addresses), and dualstack-without-public-ipv4
(public IPv6 addresses and private IPv4 and IPv6 addresses).
[Network Load Balancers and Gateway Load Balancers] The possible values are ipv4
(IPv4 addresses) and dualstack
(IPv4 and IPv6 addresses).
[Network Load Balancers with UDP listeners] Indicates whether to use an IPv6 prefix from each subnet for source NAT. The IP address type must be dualstack
. The default value is off
.
[Application Load Balancers] The IP address type.
[Network Load Balancers] The IP address type.
[Gateway Load Balancers] The IP address type.
" + "documentation":"The IP address type.
" + }, + "EnablePrefixForIpv6SourceNat":{ + "shape":"EnablePrefixForIpv6SourceNatEnum", + "documentation":"[Network Load Balancers] Indicates whether to use an IPv6 prefix from each subnet for source NAT.
" } } }, @@ -3784,6 +3811,11 @@ }, "documentation":"Information about a source IP condition.
You can use this condition to route based on the IP address of the source that connects to the load balancer. If a client is behind a proxy, this is the IP address of the proxy not the IP address of the client.
" }, + "SourceNatIpv6Prefix":{"type":"string"}, + "SourceNatIpv6Prefixes":{ + "type":"list", + "member":{"shape":"SourceNatIpv6Prefix"} + }, "SslPolicies":{ "type":"list", "member":{"shape":"SslPolicy"} @@ -3842,6 +3874,10 @@ "IPv6Address":{ "shape":"IPv6Address", "documentation":"[Network Load Balancers] The IPv6 address.
" + }, + "SourceNatIpv6Prefix":{ + "shape":"SourceNatIpv6Prefix", + "documentation":"[Network Load Balancers with UDP listeners] The IPv6 prefix to use for source NAT. Specify an IPv6 prefix (/80 netmask) from the subnet CIDR block or auto_assigned
to use an IPv6 prefix selected at random from the subnet CIDR block.
Information about a subnet mapping.
" @@ -4034,7 +4070,7 @@ }, "IpAddressType":{ "shape":"TargetGroupIpAddressTypeEnum", - "documentation":"The type of IP address used for this target group. The possible values are ipv4
and ipv6
. This is an optional parameter. If not specified, the IP address type defaults to ipv4
.
The IP address type. The default value is ipv4
.
Information about a target group.
" @@ -4061,7 +4097,7 @@ "members":{ "Key":{ "shape":"TargetGroupAttributeKey", - "documentation":"The name of the attribute.
The following attributes are supported by all load balancers:
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.
stickiness.enabled
- Indicates whether target stickiness is enabled. The value is true
or false
. The default is false
.
stickiness.type
- Indicates the type of stickiness. The possible values are:
lb_cookie
and app_cookie
for Application Load Balancers.
source_ip
for Network Load Balancers.
source_ip_dest_ip
and source_ip_dest_ip_proto
for Gateway Load Balancers.
The following attributes are supported by Application Load Balancers and Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross zone load balancing is enabled. The value is true
, false
or use_load_balancer_configuration
. The default is use_load_balancer_configuration
.
target_group_health.dns_failover.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to the maximum number of targets. The default is off
.
target_group_health.dns_failover.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to 100. The default is off
.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off
or an integer from 1 to 100. The default is off
.
The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
, least_outstanding_requests
, or weighted_random
. The default is round_robin
.
load_balancing.algorithm.anomaly_mitigation
- Only available when load_balancing.algorithm.type
is weighted_random
. Indicates whether anomaly mitigation is enabled. The value is on
or off
. The default is off
.
slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).
stickiness.app_cookie.cookie_name
- Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB
, AWSALBAPP
, and AWSALBTG
; they're reserved for use by the load balancer.
stickiness.app_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
The following attributes are supported only by Network Load Balancers:
deregistration_delay.connection_termination.enabled
- Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true
or false
. For new UDP/TCP_UDP target groups the default is true
. Otherwise, the default is false
.
preserve_client_ip.enabled
- Indicates whether client IP preservation is enabled. The value is true
or false
. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation cannot be disabled for UDP and TCP_UDP target groups.
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
target_health_state.unhealthy.connection_termination.enabled
- Indicates whether the load balancer terminates connections to unhealthy targets. The value is true
or false
. The default is true
.
target_health_state.unhealthy.draining_interval_seconds
- The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from unhealthy.draining
to unhealthy
. The range is 0-360000 seconds. The default value is 0 seconds.
Note: This attribute can only be configured when target_health_state.unhealthy.connection_termination.enabled
is false
.
The following attributes are supported only by Gateway Load Balancers:
target_failover.on_deregistration
- Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
target_failover.on_unhealthy
- Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) cannot be set independently. The value you set for both attributes must be the same.
The name of the attribute.
The following attributes are supported by all load balancers:
deregistration_delay.timeout_seconds
- The amount of time, in seconds, for Elastic Load Balancing to wait before changing the state of a deregistering target from draining
to unused
. The range is 0-3600 seconds. The default value is 300 seconds. If the target is a Lambda function, this attribute is not supported.
stickiness.enabled
- Indicates whether target stickiness is enabled. The value is true
or false
. The default is false
.
stickiness.type
- Indicates the type of stickiness. The possible values are:
lb_cookie
and app_cookie
for Application Load Balancers.
source_ip
for Network Load Balancers.
source_ip_dest_ip
and source_ip_dest_ip_proto
for Gateway Load Balancers.
The following attributes are supported by Application Load Balancers and Network Load Balancers:
load_balancing.cross_zone.enabled
- Indicates whether cross zone load balancing is enabled. The value is true
, false
or use_load_balancer_configuration
. The default is use_load_balancer_configuration
.
target_group_health.dns_failover.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to the maximum number of targets. The default is off
.
target_group_health.dns_failover.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are off
or an integer from 1 to 100. The default is off
.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.count
- The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are 1 to the maximum number of targets. The default is 1.
target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage
- The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are off
or an integer from 1 to 100. The default is off
.
The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address:
load_balancing.algorithm.type
- The load balancing algorithm determines how the load balancer selects targets when routing requests. The value is round_robin
, least_outstanding_requests
, or weighted_random
. The default is round_robin
.
load_balancing.algorithm.anomaly_mitigation
- Only available when load_balancing.algorithm.type
is weighted_random
. Indicates whether anomaly mitigation is enabled. The value is on
or off
. The default is off
.
slow_start.duration_seconds
- The time period, in seconds, during which a newly registered target receives an increasing share of the traffic to the target group. After this time period ends, the target receives its full share of traffic. The range is 30-900 seconds (15 minutes). The default is 0 seconds (disabled).
stickiness.app_cookie.cookie_name
- Indicates the name of the application-based cookie. Names that start with the following prefixes are not allowed: AWSALB
, AWSALBAPP
, and AWSALBTG
; they're reserved for use by the load balancer.
stickiness.app_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the application-based cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
stickiness.lb_cookie.duration_seconds
- The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function:
lambda.multi_value_headers.enabled
- Indicates whether the request and response headers that are exchanged between the load balancer and the Lambda function include arrays of values or strings. The value is true
or false
. The default is false
. If the value is false
and the request contains a duplicate header field name or query parameter key, the load balancer uses the last value sent by the client.
The following attributes are supported only by Network Load Balancers:
deregistration_delay.connection_termination.enabled
- Indicates whether the load balancer terminates connections at the end of the deregistration timeout. The value is true
or false
. For new UDP/TCP_UDP target groups the default is true
. Otherwise, the default is false
.
preserve_client_ip.enabled
- Indicates whether client IP preservation is enabled. The value is true
or false
. The default is disabled if the target group type is IP address and the target group protocol is TCP or TLS. Otherwise, the default is enabled. Client IP preservation can't be disabled for UDP and TCP_UDP target groups.
proxy_protocol_v2.enabled
- Indicates whether Proxy Protocol version 2 is enabled. The value is true
or false
. The default is false
.
target_health_state.unhealthy.connection_termination.enabled
- Indicates whether the load balancer terminates connections to unhealthy targets. The value is true
or false
. The default is true
.
target_health_state.unhealthy.draining_interval_seconds
- The amount of time for Elastic Load Balancing to wait before changing the state of an unhealthy target from unhealthy.draining
to unhealthy
. The range is 0-360000 seconds. The default value is 0 seconds.
Note: This attribute can only be configured when target_health_state.unhealthy.connection_termination.enabled
is false
.
The following attributes are supported only by Gateway Load Balancers:
target_failover.on_deregistration
- Indicates how the Gateway Load Balancer handles existing flows when a target is deregistered. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
target_failover.on_unhealthy
- Indicates how the Gateway Load Balancer handles existing flows when a target is unhealthy. The possible values are rebalance
and no_rebalance
. The default is no_rebalance
. The two attributes (target_failover.on_deregistration
and target_failover.on_unhealthy
) can't be set independently. The value you set for both attributes must be the same.
Creates a classifier in the user's account. This can be a GrokClassifier
, an XMLClassifier
, a JsonClassifier
, or a CsvClassifier
, depending on which field of the request is present.
Creates settings for a column statistics task.
" + }, "CreateConnection":{ "name":"CreateConnection", "http":{ @@ -868,6 +887,21 @@ ], "documentation":"Retrieves table statistics of columns.
The Identity and Access Management (IAM) permission required for this operation is DeleteTable
.
Deletes settings for a column statistics task.
" + }, "DeleteConnection":{ "name":"DeleteConnection", "http":{ @@ -1393,6 +1427,21 @@ ], "documentation":"Retrieves information about all runs associated with the specified table.
" }, + "GetColumnStatisticsTaskSettings":{ + "name":"GetColumnStatisticsTaskSettings", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetColumnStatisticsTaskSettingsRequest"}, + "output":{"shape":"GetColumnStatisticsTaskSettingsResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"Gets settings for a column statistics task.
" + }, "GetConnection":{ "name":"GetConnection", "http":{ @@ -3004,6 +3053,22 @@ ], "documentation":"Starts a column statistics task run, for a specified table and columns.
" }, + "StartColumnStatisticsTaskRunSchedule":{ + "name":"StartColumnStatisticsTaskRunSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartColumnStatisticsTaskRunScheduleRequest"}, + "output":{"shape":"StartColumnStatisticsTaskRunScheduleResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"Starts a column statistics task run schedule.
" + }, "StartCrawler":{ "name":"StartCrawler", "http":{ @@ -3209,6 +3274,21 @@ ], "documentation":"Stops a task run for the specified table.
" }, + "StopColumnStatisticsTaskRunSchedule":{ + "name":"StopColumnStatisticsTaskRunSchedule", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopColumnStatisticsTaskRunScheduleRequest"}, + "output":{"shape":"StopColumnStatisticsTaskRunScheduleResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"Stops a column statistics task run schedule.
" + }, "StopCrawler":{ "name":"StopCrawler", "http":{ @@ -3414,6 +3494,23 @@ ], "documentation":"Creates or updates table statistics of columns.
The Identity and Access Management (IAM) permission required for this operation is UpdateTable
.
Updates settings for a column statistics task.
" + }, "UpdateConnection":{ "name":"UpdateConnection", "http":{ @@ -6369,6 +6466,10 @@ "shape":"NameString", "documentation":"The type of workers being used for generating stats. The default is g.1x
.
The type of column statistics computation.
" + }, "Status":{ "shape":"ColumnStatisticsState", "documentation":"The status of the task run.
" @@ -6421,6 +6522,44 @@ "type":"list", "member":{"shape":"ColumnStatisticsTaskRun"} }, + "ColumnStatisticsTaskSettings":{ + "type":"structure", + "members":{ + "DatabaseName":{ + "shape":"DatabaseName", + "documentation":"The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"TableName", + "documentation":"The name of the table for which to generate column statistics.
" + }, + "Schedule":{ + "shape":"Schedule", + "documentation":"A schedule for running the column statistics, specified in CRON syntax.
" + }, + "ColumnNameList":{ + "shape":"ColumnNameList", + "documentation":"A list of column names for which to run statistics.
" + }, + "CatalogID":{ + "shape":"CatalogIdString", + "documentation":"The ID of the Data Catalog in which the database resides.
" + }, + "Role":{ + "shape":"Role", + "documentation":"The role used for running the column statistics.
" + }, + "SampleSize":{ + "shape":"SampleSizePercentage", + "documentation":"The percentage of data to sample.
" + }, + "SecurityConfiguration":{ + "shape":"CrawlerSecurityConfiguration", + "documentation":"Name of the security configuration that is used to encrypt CloudWatch logs.
" + } + }, + "documentation":"The settings for a column statistics task.
" + }, "ColumnStatisticsTaskStoppingException":{ "type":"structure", "members":{ @@ -6507,6 +6646,13 @@ "bzip2" ] }, + "ComputationType":{ + "type":"string", + "enum":[ + "FULL", + "INCREMENTAL" + ] + }, "ConcurrentModificationException":{ "type":"structure", "members":{ @@ -7385,6 +7531,57 @@ "members":{ } }, + "CreateColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "Role" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"NameString", + "documentation":"The name of the table for which to generate column statistics.
" + }, + "Role":{ + "shape":"NameString", + "documentation":"The role used for running the column statistics.
" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"A schedule for running the column statistics, specified in CRON syntax.
" + }, + "ColumnNameList":{ + "shape":"ColumnNameList", + "documentation":"A list of column names for which to run statistics.
" + }, + "SampleSize":{ + "shape":"SampleSizePercentage", + "documentation":"The percentage of data to sample.
" + }, + "CatalogID":{ + "shape":"NameString", + "documentation":"The ID of the Data Catalog in which the database resides.
" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"Name of the security configuration that is used to encrypt CloudWatch logs.
" + }, + "Tags":{ + "shape":"TagsMap", + "documentation":"A map of tags.
" + } + } + }, + "CreateColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "CreateConnectionRequest":{ "type":"structure", "required":["ConnectionInput"], @@ -9723,6 +9920,28 @@ "members":{ } }, + "DeleteColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"NameString", + "documentation":"The name of the table for which to delete column statistics.
" + } + } + }, + "DeleteColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteConnectionNameList":{ "type":"list", "member":{"shape":"NameString"}, @@ -11649,6 +11868,32 @@ } } }, + "GetColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"NameString", + "documentation":"The name of the table for which to retrieve column statistics.
" + } + } + }, + "GetColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + "ColumnStatisticsTaskSettings":{ + "shape":"ColumnStatisticsTaskSettings", + "documentation":"A ColumnStatisticsTaskSettings
object representing the settings for the column statistics task.
The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"NameString", + "documentation":"The name of the table for which to start a column statistic task run schedule.
" + } + } + }, + "StartColumnStatisticsTaskRunScheduleResponse":{ + "type":"structure", + "members":{ + } + }, "StartCrawlerRequest":{ "type":"structure", "required":["Name"], @@ -21324,6 +21591,28 @@ "members":{ } }, + "StopColumnStatisticsTaskRunScheduleRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"NameString", + "documentation":"The name of the table for which to stop a column statistic task run schedule.
" + } + } + }, + "StopColumnStatisticsTaskRunScheduleResponse":{ + "type":"structure", + "members":{ + } + }, "StopCrawlerRequest":{ "type":"structure", "required":["Name"], @@ -22801,6 +23090,52 @@ "max":25, "min":0 }, + "UpdateColumnStatisticsTaskSettingsRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "DatabaseName":{ + "shape":"NameString", + "documentation":"The name of the database where the table resides.
" + }, + "TableName":{ + "shape":"NameString", + "documentation":"The name of the table for which to generate column statistics.
" + }, + "Role":{ + "shape":"NameString", + "documentation":"The role used for running the column statistics.
" + }, + "Schedule":{ + "shape":"CronExpression", + "documentation":"A schedule for running the column statistics, specified in CRON syntax.
" + }, + "ColumnNameList":{ + "shape":"ColumnNameList", + "documentation":"A list of column names for which to run statistics.
" + }, + "SampleSize":{ + "shape":"SampleSizePercentage", + "documentation":"The percentage of data to sample.
" + }, + "CatalogID":{ + "shape":"NameString", + "documentation":"The ID of the Data Catalog in which the database resides.
" + }, + "SecurityConfiguration":{ + "shape":"NameString", + "documentation":"Name of the security configuration that is used to encrypt CloudWatch logs.
" + } + } + }, + "UpdateColumnStatisticsTaskSettingsResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateConnectionRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 7307b931fe..7310848cdf 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -54,6 +54,19 @@ ], "documentation":"Associates a trial component with a trial. A trial component can be associated with multiple trials. To disassociate a trial component from a trial, call the DisassociateTrialComponent API.
" }, + "BatchDeleteClusterNodes":{ + "name":"BatchDeleteClusterNodes", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteClusterNodesRequest"}, + "output":{"shape":"BatchDeleteClusterNodesResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"Deletes specific nodes within a SageMaker HyperPod cluster. BatchDeleteClusterNodes
accepts a cluster name and a list of node IDs.
To safeguard your work, back up your data to Amazon S3 or an FSx for Lustre file system before invoking the API on a worker node group. This will help prevent any potential data loss from the instance root volume. For more information about backup, see Use the backup script provided by SageMaker HyperPod.
If you want to invoke this API on an existing cluster, you'll first need to patch the cluster by running the UpdateClusterSoftware API. For more information about patching a cluster, see Update the SageMaker HyperPod platform software of a cluster.
Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.
" + "documentation":"Updates the platform software of a SageMaker HyperPod cluster for security patching. To learn how to use this API, see Update the SageMaker HyperPod platform software of a cluster.
The UpgradeClusterSoftware
API call may impact your SageMaker HyperPod cluster uptime and availability. Plan accordingly to mitigate potential disruptions to your workloads.
Configuration to control how SageMaker captures inference data for batch transform jobs.
" }, + "BatchDeleteClusterNodesError":{ + "type":"structure", + "required":[ + "Code", + "Message", + "NodeId" + ], + "members":{ + "Code":{ + "shape":"BatchDeleteClusterNodesErrorCode", + "documentation":"The error code associated with the error encountered when deleting a node.
The code provides information about the specific issue encountered, such as the node not being found, the node's status being invalid for deletion, or the node ID being in use by another process.
" + }, + "Message":{ + "shape":"String", + "documentation":"A message describing the error encountered when deleting a node.
" + }, + "NodeId":{ + "shape":"ClusterNodeId", + "documentation":"The ID of the node that encountered an error during the deletion process.
" + } + }, + "documentation":"Represents an error encountered when deleting a node from a SageMaker HyperPod cluster.
" + }, + "BatchDeleteClusterNodesErrorCode":{ + "type":"string", + "enum":[ + "NodeIdNotFound", + "InvalidNodeStatus", + "NodeIdInUse" + ] + }, + "BatchDeleteClusterNodesErrorList":{ + "type":"list", + "member":{"shape":"BatchDeleteClusterNodesError"}, + "max":99, + "min":1 + }, + "BatchDeleteClusterNodesRequest":{ + "type":"structure", + "required":[ + "ClusterName", + "NodeIds" + ], + "members":{ + "ClusterName":{ + "shape":"ClusterNameOrArn", + "documentation":"The name of the SageMaker HyperPod cluster from which to delete the specified nodes.
" + }, + "NodeIds":{ + "shape":"ClusterNodeIds", + "documentation":"A list of node IDs to be deleted from the specified cluster.
For SageMaker HyperPod clusters using the Slurm workload manager, you cannot remove instances that are configured as Slurm controller nodes.
A list of errors encountered when deleting the specified nodes.
" + }, + "Successful":{ + "shape":"ClusterNodeIds", + "documentation":"A list of node IDs that were successfully deleted from the specified cluster.
" + } + } + }, "BatchDescribeModelPackageError":{ "type":"structure", "required":[ @@ -7439,7 +7519,26 @@ "ml.t3.medium", "ml.t3.large", "ml.t3.xlarge", - "ml.t3.2xlarge" + "ml.t3.2xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.16xlarge", + "ml.g6.12xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.gr6.4xlarge", + "ml.gr6.8xlarge", + "ml.g6e.xlarge", + "ml.g6e.2xlarge", + "ml.g6e.4xlarge", + "ml.g6e.8xlarge", + "ml.g6e.16xlarge", + "ml.g6e.12xlarge", + "ml.g6e.24xlarge", + "ml.g6e.48xlarge", + "ml.p5e.48xlarge" ] }, "ClusterLifeCycleConfig":{ @@ -7533,6 +7632,12 @@ "min":1, "pattern":"^i-[a-f0-9]{8}(?:[a-f0-9]{9})?$" }, + "ClusterNodeIds":{ + "type":"list", + "member":{"shape":"ClusterNodeId"}, + "max":99, + "min":1 + }, "ClusterNodeRecovery":{ "type":"string", "enum":[ @@ -36525,6 +36630,22 @@ "ml.g5.12xlarge", "ml.g5.24xlarge", "ml.g5.48xlarge", + "ml.g6.xlarge", + "ml.g6.2xlarge", + "ml.g6.4xlarge", + "ml.g6.8xlarge", + "ml.g6.16xlarge", + "ml.g6.12xlarge", + "ml.g6.24xlarge", + "ml.g6.48xlarge", + "ml.g6e.xlarge", + "ml.g6e.2xlarge", + "ml.g6e.4xlarge", + "ml.g6e.8xlarge", + "ml.g6e.16xlarge", + "ml.g6e.12xlarge", + "ml.g6e.24xlarge", + "ml.g6e.48xlarge", "ml.trn1.2xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge", @@ -36862,6 +36983,10 @@ "shape":"TrainingJobStatus", "documentation":"The status of the training job.
" }, + "SecondaryStatus":{ + "shape":"SecondaryStatus", + "documentation":"The secondary status of the training job.
" + }, "WarmPoolStatus":{ "shape":"WarmPoolStatus", "documentation":"The status of the warm pool associated with the training job.
" diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index da55b75617..70bb197f11 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -6605,6 +6605,10 @@ "shape":"AmazonResourceName", "documentation":"The Amazon Resource Name (ARN) of the template.
" }, + "TemplateContent":{ + "shape":"EmailTemplateContent", + "documentation":"The content of the template.
Amazon SES supports only simple substitions when you send email using the SendEmail
or SendBulkEmail
operations and you provide the full template content in the request.
An object that defines the values to use for message variables in the template. This object is a set of key-value pairs. Each key defines a message variable in the template. The corresponding value defines the value to use for that variable.
" @@ -6614,7 +6618,7 @@ "documentation":"The list of message headers that will be added to the email message.
" } }, - "documentation":"An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to define, save, and reuse in email messages that you send.
" + "documentation":"An object that defines the email template to use for an email message, and the values to use for any message variables in that template. An email template is a type of message template that contains content that you want to reuse in email messages that you send. You can specifiy the email template by providing the name or ARN of an email template previously saved in your Amazon SES account or by providing the full template content.
" }, "TemplateContent":{ "type":"string", diff --git a/botocore/signers.py b/botocore/signers.py index 89319af10b..14692d16f4 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -576,11 +576,14 @@ def generate_presigned_post( :type conditions: list :param conditions: A list of conditions to include in the policy. Each element can be either a list or a structure. For example: - [ - {"acl": "public-read"}, - {"bucket": "mybucket"}, - ["starts-with", "$key", "mykey"] - ] + + .. code:: python + + [ + {"acl": "public-read"}, + {"bucket": "amzn-s3-demo-bucket"}, + ["starts-with", "$key", "mykey"] + ] :type expires_in: int :param expires_in: The number of seconds the presigned post is valid @@ -595,12 +598,17 @@ def generate_presigned_post( the form fields and respective values to use when submitting the post. For example: - {'url': 'https://mybucket.s3.amazonaws.com - 'fields': {'acl': 'public-read', + .. code:: python + + { + 'url': 'https://amzn-s3-demo-bucket.s3.amazonaws.com', + 'fields': { + 'acl': 'public-read', 'key': 'mykey', 'signature': 'mysignature', - 'policy': 'mybase64 encoded policy'} - } + 'policy': 'mybase64 encoded policy' + } + } """ if fields is None: fields = {} @@ -751,11 +759,13 @@ def generate_presigned_post( :param Conditions: A list of conditions to include in the policy. Each element can be either a list or a structure. For example: - [ - {"acl": "public-read"}, - ["content-length-range", 2, 5], - ["starts-with", "$success_action_redirect", ""] - ] + .. code:: python + + [ + {"acl": "public-read"}, + ["content-length-range", 2, 5], + ["starts-with", "$success_action_redirect", ""] + ] Conditions that are included may pertain to acl, content-length-range, Cache-Control, Content-Type, @@ -764,7 +774,7 @@ def generate_presigned_post( and/or x-amz-meta-. Note that if you include a condition, you must specify - the a valid value in the fields dictionary as well. A value will + a valid value in the fields dictionary as well. A value will not be added automatically to the fields dictionary based on the conditions. @@ -778,12 +788,17 @@ def generate_presigned_post( the form fields and respective values to use when submitting the post. For example: - {'url': 'https://mybucket.s3.amazonaws.com - 'fields': {'acl': 'public-read', + .. code:: python + + { + 'url': 'https://amzn-s3-demo-bucket.s3.amazonaws.com', + 'fields': { + 'acl': 'public-read', 'key': 'mykey', 'signature': 'mysignature', - 'policy': 'mybase64 encoded policy'} - } + 'policy': 'mybase64 encoded policy' + } + } """ bucket = Bucket key = Key diff --git a/docs/source/client_upgrades.rst b/docs/source/client_upgrades.rst index 74adbe22ea..781df3a221 100644 --- a/docs/source/client_upgrades.rst +++ b/docs/source/client_upgrades.rst @@ -21,7 +21,7 @@ Below is an example of the old interface: s3 = session.get_service('s3') endpoint = s3.get_endpoint('us-west-2') list_objects = s3.get_operation('ListObjects') - http, response = list_objects.call(endpoint, Bucket='mybucket') + http, response = list_objects.call(endpoint, Bucket='amzn-s3-demo-bucket') if http.status_code == 200: print("Contents: %s" % response['Contents]) else: @@ -36,7 +36,7 @@ Here's an example of the newer (preferred) client interface: import botocore.session session = botocore.session.get_session() s3 = session.create_client('s3', 'us-west-2') - response = s3.list_objects(Bucket='mybucket') + response = s3.list_objects(Bucket='amzn-s3-demo-bucket') print("Contents: %s" % response['Contents']) @@ -132,22 +132,22 @@ Use a single client to make multiple API calls. endpoint = service.get_endpoint('us-west-2') operation = service.get_operation('ListObjects') head_object = service.get_operation('HeadObject') - parsed = operation.call(endpoint, Bucket='mybucket')[1] + parsed = operation.call(endpoint, Bucket='amzn-s3-demo-bucket')[1] for obj in parsed['Contents']: name = obj['Key'] # Use existing connection be passing in the same endpoint. - print(head_object.call(endpoint, Bucket='mybucket', Key=name)) + print(head_object.call(endpoint, Bucket='amzn-s3-demo-bucket', Key=name)) **New** .. code-block:: python s3 = session.get_client('s3', 'us-west-2') - for obj in s3.list_objects(Bucket='mybucket')['Contents']: + for obj in s3.list_objects(Bucket='amzn-s3-demo-bucket')['Contents']: name = obj['Key'] # Using the same client will reuse any existing HTTP # connections the client was using. - print(s3.head_object(Bucket='mybucket', Key=name)) + print(s3.head_object(Bucket='amzn-s3-demo-bucket', Key=name)) Operation and Method Names diff --git a/docs/source/conf.py b/docs/source/conf.py index b709ae8733..68a2cf516e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.35.' # The full version, including alpha/beta/rc tags. -release = '1.35.52' +release = '1.35.53' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/topics/paginators.rst b/docs/source/topics/paginators.rst index fba76b713a..edab8fe68d 100644 --- a/docs/source/topics/paginators.rst +++ b/docs/source/topics/paginators.rst @@ -33,7 +33,7 @@ underlying API operation. The ``paginate`` method then returns an iterable paginator = client.get_paginator('list_objects') # Create a PageIterator from the Paginator - page_iterator = paginator.paginate(Bucket='my-bucket') + page_iterator = paginator.paginate(Bucket='amzn-s3-demo-bucket') for page in page_iterator: print(page['Contents']) @@ -48,7 +48,7 @@ the pages of API operation results. The ``paginate`` method accepts a pagination:: paginator = client.get_paginator('list_objects') - page_iterator = paginator.paginate(Bucket='my-bucket', + page_iterator = paginator.paginate(Bucket='amzn-s3-demo-bucket', PaginationConfig={'MaxItems': 10}) ``MaxItems`` @@ -83,7 +83,7 @@ to the client:: session = botocore.session.get_session() client = session.create_client('s3', region_name='us-west-2') paginator = client.get_paginator('list_objects') - operation_parameters = {'Bucket': 'my-bucket', + operation_parameters = {'Bucket': 'amzn-s3-demo-bucket', 'Prefix': 'foo/baz'} page_iterator = paginator.paginate(**operation_parameters) for page in page_iterator: @@ -101,7 +101,7 @@ JMESPath expressions that are applied to each page of results through the .. code-block:: python paginator = client.get_paginator('list_objects') - page_iterator = paginator.paginate(Bucket='my-bucket') + page_iterator = paginator.paginate(Bucket='amzn-s3-demo-bucket') filtered_iterator = page_iterator.search("Contents[?Size > `100`][]") for key_data in filtered_iterator: print(key_data)