Skip to content

Commit

Permalink
Issue argoproj#45: get aws partition based on region
Browse files Browse the repository at this point in the history
  • Loading branch information
Harry Zhang committed Aug 28, 2017
1 parent 1855c77 commit 693f450
Show file tree
Hide file tree
Showing 9 changed files with 100 additions and 69 deletions.
4 changes: 2 additions & 2 deletions common/python/ax/cloud/aws/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
from .instance_profile import InstanceProfile
from .aws_s3 import AXS3Bucket, BUCKET_CLEAN_KEYWORD
from .security import SecurityToken
from .util import default_aws_retry
from .util import default_aws_retry, get_aws_partition_from_region
from .ec2 import EC2InstanceState, EC2, EC2IPPermission
from .autoscaling import ASGInstanceLifeCycle, ASG
from .launch_config import LaunchConfig
from .ebs import RawEBSVolume
from .consts import AWS_DEFAULT_PROFILE
from .consts import AWS_DEFAULT_PROFILE, AWS_ALL_RESOURCES
9 changes: 9 additions & 0 deletions common/python/ax/cloud/aws/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,12 @@


AWS_DEFAULT_PROFILE = "default"


AWS_ALL_RESOURCES = "*"


class AWSPartitions:
PARTITION_AWS = "aws"
PARTITION_US_GOV = "aws-us-gov"
PARTITION_CHINA = "aws-cn"
16 changes: 16 additions & 0 deletions common/python/ax/cloud/aws/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import boto3
from botocore.exceptions import ClientError, EndpointConnectionError, ConnectionClosedError, BotoCoreError

from .consts import AWSPartitions

def default_aws_retry(exception):
# Boto has 3 types of errors (see botocore/exceptions.py)
Expand Down Expand Up @@ -69,3 +70,18 @@ def tag_dict_to_aws_filter(tags):
)
return filters


def get_aws_partition_from_region(region_name):
"""
Given region, return the partition name. Partition is used to form Amazon Resource Number (ARNs)
See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
:param region_name:
:return:
"""
region_string = region_name.lower()
if region_string.startswith("cn-"):
return AWSPartitions.PARTITION_CHINA
elif region_string.startswith("us-gov"):
return AWSPartitions.PARTITION_US_GOV
else:
return AWSPartitions.PARTITION_AWS
4 changes: 2 additions & 2 deletions platform/cluster/aws/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -757,11 +757,11 @@ function delete-tag {

# Creates the IAM roles (if they do not already exist)
function create-iam-profiles {
/ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE
/ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION}
}

function delete-iam-profiles {
/ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE
/ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION}
}

# Wait for instance to be in specified state
Expand Down
3 changes: 2 additions & 1 deletion platform/source/lib/ax/platform/ax_master_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,8 @@ def upgrade(self):
self.master_instance = self.ec2.Instance(instance_id)
logger.info("Running master %s.", instance_id)
self.aws_image = ami_id
self.instance_profile = AXClusterInstanceProfile(self.cluster_name_id, aws_profile=self.profile).get_master_arn()
self.instance_profile = AXClusterInstanceProfile(self.cluster_name_id, region_name=self.region,
aws_profile=self.profile).get_master_arn()
self.populate_attributes()
master_tag_updated = self.ensure_master_tags()
# TODO: Possible race here.
Expand Down
5 changes: 3 additions & 2 deletions platform/source/lib/ax/platform/ax_minion_upgrade.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,9 @@ def _update_launch_config(self, old_name, new_name, retain_spot_price=False):

# Replace ImageId and everything listed in default_kube_up_env.
config["ImageId"] = ami_id
config["IamInstanceProfile"] = AXClusterInstanceProfile(self._cluster_name_id,
aws_profile=self._profile).get_minion_instance_profile_name()
config["IamInstanceProfile"] = AXClusterInstanceProfile(
self._cluster_name_id, region_name=self._region, aws_profile=self._profile
).get_minion_instance_profile_name()
user_data = zlib.decompressobj(32 + zlib.MAX_WBITS).decompress(user_data)
user_data = kube_env_update(user_data, updates)
comp = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16)
Expand Down
49 changes: 11 additions & 38 deletions platform/source/lib/ax/platform/cluster_buckets.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

from ax.aws.profiles import AWSAccountInfo
from ax.cloud import Cloud
from ax.cloud.aws import get_aws_partition_from_region
from ax.meta import AXClusterId, AXClusterConfigPath, AXClusterDataPath, AXSupportConfigPath, AXUpgradeConfigPath

from ax.platform.exceptions import AXPlatformException
Expand All @@ -29,43 +30,14 @@
}
}

upgrade_bucket_policy_template = """
SUPPORT_BUCKET_POLICY_TEMPLATE = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{id}:root"
},
"Action": [
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:ListBucketMultipartUploads",
"s3:ListBucketVersions"
],
"Resource": "arn:aws:s3:::{s3}"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{id}:root"
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::{s3}/*"
}
]
}
"""

support_bucket_policy_template = """
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{id}:root"
"AWS": "arn:{partition}:iam::{account}:root"
},
"Action": [
"s3:ListBucket",
Expand All @@ -74,12 +46,12 @@
"s3:ListBucketVersions",
"s3:GetBucketAcl"
],
"Resource": "arn:aws:s3:::{s3}"
"Resource": "arn:{partition}:s3:::{bucket_name}"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::{id}:root"
"AWS": "arn:{partition}:iam::{account}:root"
},
"Action": [
"s3:GetObject",
Expand All @@ -88,7 +60,7 @@
"s3:AbortMultipartUpload",
"s3:ListMultipartUploadParts"
],
"Resource": "arn:aws:s3:::{s3}/*"
"Resource": "arn:{partition}:s3:::{bucket_name}/*"
}
]
}
Expand Down Expand Up @@ -185,6 +157,7 @@ def __init__(self, name_id, aws_profile, aws_region):
self._name_id = name_id
self._aws_profile = aws_profile
self._aws_region = aws_region
self._aws_partition = get_aws_partition_from_region(self._aws_region)

def update(self, iam):
"""
Expand All @@ -204,7 +177,7 @@ def update(self, iam):
if not support_bucket.get_policy():
logger.info("Argo support bucket policy does not exist, creating new one...")
if not support_bucket.put_policy(
policy=self._generate_bucket_policy_string(template=support_bucket_policy_template,
policy=self._generate_bucket_policy_string(template=SUPPORT_BUCKET_POLICY_TEMPLATE,
bucket_name=support_bucket.get_bucket_name(),
iam=iam)
):
Expand All @@ -217,7 +190,7 @@ def update(self, iam):
if not upgrade_bucket.get_policy():
logger.info("Argo upgrade bucket policy does not exist, creating new one...")
if not upgrade_bucket.put_policy(
policy=self._generate_bucket_policy_string(template=support_bucket_policy_template,
policy=self._generate_bucket_policy_string(template=SUPPORT_BUCKET_POLICY_TEMPLATE,
bucket_name=upgrade_bucket.get_bucket_name(),
iam=iam)
):
Expand All @@ -241,6 +214,6 @@ def _generate_bucket_policy_string(self, template, bucket_name, iam):
aws_cid = AWSAccountInfo(aws_profile=self._aws_profile).get_account_id_from_iam(iam)
policy = json.loads(template)
for s in policy["Statement"]:
s["Principal"]["AWS"] = s["Principal"]["AWS"].format(id=aws_cid)
s["Resource"] = s["Resource"].format(s3=bucket_name)
s["Principal"]["AWS"] = s["Principal"]["AWS"].format(partition=self._aws_partition, account=aws_cid)
s["Resource"] = s["Resource"].format(partition=self._aws_partition, bucket_name=bucket_name)
return json.dumps(policy)
72 changes: 52 additions & 20 deletions platform/source/lib/ax/platform/cluster_instance_profile.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,18 @@
#
# Module to manage instance profiles for AX cluster.

import copy
import logging

from ax.aws.profiles import AWSAccountInfo
from ax.cloud.aws import get_aws_partition_from_region, AWS_ALL_RESOURCES
from ax.cloud.aws.instance_profile import InstanceProfile

logger = logging.getLogger(__name__)


# Default instance profile statement for master.
master_policy = {
MASTER_POLICY_TEMPLATE = {
"Version": "2012-10-17",
"Statement": [
{
Expand All @@ -35,7 +37,7 @@
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": "arn:aws:s3:::applatix-*"
"Resource": "arn:{partition}:s3:::applatix-*"
},
{
"Action": [
Expand All @@ -51,20 +53,28 @@
"Resource": "*",
"Effect": "Allow"
},
{
"Action": "iam:PassRole",
"Resource": [
"arn:{partition}:iam::{account}:role/{master_name}",
"arn:{partition}:iam::{account}:role/{minion_name}"
],
"Effect": "Allow"
}
]
}

# Default instance profile statement for minions.
minion_policy = {
MINION_POLICY_TEMPLATE = {
"Version": "2012-10-17",
"Statement": [
{
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::applatix-*",
"arn:aws:s3:::*axawss3-test*",
"arn:aws:s3:::ax-public",
"arn:aws:s3:::ax-public/*"
"arn:{partition}:s3:::applatix-*",
"arn:{partition}:s3:::*axawss3-test*",
"arn:{partition}:s3:::ax-public",
"arn:{partition}:s3:::ax-public/*"
],
"Effect": "Allow"
},
Expand Down Expand Up @@ -145,33 +155,33 @@
],
"Resource": "*",
"Effect": "Allow"
},
{
"Action": "iam:PassRole",
"Resource": [
"arn:{partition}:iam::{account}:role/{master_name}",
"arn:{partition}:iam::{account}:role/{minion_name}"
],
"Effect": "Allow"
}
]
}


class AXClusterInstanceProfile(object):
def __init__(self, name_id, aws_profile=None):
def __init__(self, name_id, region_name, aws_profile=None):
self._name_id = name_id
self._master_name = name_id + "-master"
self._minion_name = name_id + "-minion"
self._aws_partition = get_aws_partition_from_region(region_name)
self._master_profile = InstanceProfile(self._master_name, aws_profile=aws_profile)
self._minion_profile = InstanceProfile(self._minion_name, aws_profile=aws_profile)

# Create pass_role statement specific to this cluster.
self._account = AWSAccountInfo(aws_profile=aws_profile).get_account_id()
pass_role = {
"Action": "iam:PassRole",
"Resource": [
"arn:aws:iam::{}:role/{}".format(self._account, self._master_name),
"arn:aws:iam::{}:role/{}".format(self._account, self._minion_name),
],
"Effect": "Allow"
}
self._master_policy = master_policy
self._master_policy["Statement"] += [pass_role]
self._minion_policy = minion_policy
self._minion_policy["Statement"] += [pass_role]

self._master_policy = self._format_policy_resources(MASTER_POLICY_TEMPLATE)
self._minion_policy = self._format_policy_resources(MINION_POLICY_TEMPLATE)

def update(self):
"""
Expand Down Expand Up @@ -202,3 +212,25 @@ def get_minion_instance_profile_name(self):
No need to lookup from AWS as name is sufficient.
"""
return self._minion_name

def _format_policy_resources(self, policy):
"""
Dynamically insert critical information into policy: aws partition, aws account id,
master name, minion name
:param policy:
:return:
"""
p = copy.deepcopy(policy)
for statement in p.get("Statement", []):
resource = statement.get("Resource", AWS_ALL_RESOURCES)
if resource != AWS_ALL_RESOURCES:
new_resource = []
if isinstance(resource, list):
for r in resource:
new_resource.append(r.format(partition=self._aws_partition, account=self._account,
master_name=self._master_name, minion_name=self._minion_name))
else:
new_resource.append(resource.format(partition=self._aws_partition, account=self._account,
master_name=self._master_name, minion_name=self._minion_name))
statement["Resource"] = new_resource
return p
7 changes: 3 additions & 4 deletions platform/source/tools/ax-upgrade-misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,13 +46,12 @@
args = ax_parser.parse_args()
if args.ensure_aws_iam or args.delete_aws_iam:
from ax.platform.cluster_instance_profile import AXClusterInstanceProfile
aws_profile = args.aws_profile
assert aws_profile, "Missing AWS profile to ensure aws iam"
assert args.cluster_name_id, "Missing cluster name id to ensure aws iam"
assert args.aws_region, "Missing AWS region to ensure aws iam"
if args.ensure_aws_iam:
AXClusterInstanceProfile(args.cluster_name_id, aws_profile=aws_profile).update()
AXClusterInstanceProfile(args.cluster_name_id, args.aws_region, aws_profile=args.aws_profile).update()
elif args.delete_aws_iam:
AXClusterInstanceProfile(args.cluster_name_id, aws_profile=aws_profile).delete()
AXClusterInstanceProfile(args.cluster_name_id, args.aws_region, aws_profile=args.aws_profile).delete()

if args.ensure_aws_s3:
from ax.platform.cluster_buckets import AXClusterBuckets
Expand Down

0 comments on commit 693f450

Please sign in to comment.