Skip to content

Commit

Permalink
Issue argoproj#17: take None as a valid cloud profile
Browse files Browse the repository at this point in the history
  • Loading branch information
Harry Zhang authored and wokeGit committed Nov 22, 2017
1 parent 22db619 commit 06a85e0
Show file tree
Hide file tree
Showing 9 changed files with 47 additions and 26 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,6 @@ def _clean_up_kubernetes_cluster(self):

if self._cfg.cloud_profile:
env["AWS_DEFAULT_PROFILE"] = self._cfg.cloud_profile
else:
env["AWS_DEFAULT_PROFILE"] = AWS_DEFAULT_PROFILE

logger.info("\n\n%sCalling kube-down ...%s\n", COLOR_GREEN, COLOR_NORM)
AXKubeUpDown(cluster_name_id=self._name_id, env=env, aws_profile=self._cfg.cloud_profile).down()
Expand Down
2 changes: 0 additions & 2 deletions common/python/ax/cluster_management/app/cluster_upgrader.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,6 @@ def _upgrade_kube(self):

if self._cfg.cloud_profile:
env["ARGO_AWS_PROFILE"] = self._cfg.cloud_profile
else:
env["ARGO_AWS_PROFILE"] = AWS_DEFAULT_PROFILE

logger.info("Upgrading Kubernetes with environments %s", pformat(env))
env.update(os.environ)
Expand Down
14 changes: 11 additions & 3 deletions common/python/ax/cluster_management/app/options/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,13 @@
import os
from future.utils import with_metaclass

from ax.cloud.aws import AWS_DEFAULT_PROFILE
from ax.platform.component_config import AXPlatformConfigDefaults, SoftwareInfo

# We should set aws profile to None if user does not provide one
# because in python None type is different from str, we convert None
# to string first and then finally it to None
AWS_NO_PROFILE = "None"


def typed_raw_input_with_default(prompt, default, type_converter):
real_prompt = prompt + " (Default: {}): ".format(default)
Expand All @@ -20,7 +24,7 @@ def typed_raw_input_with_default(prompt, default, type_converter):

class ClusterOperationDefaults:
CLOUD_PROVIDER = "aws"
CLOUD_PROFILE = AWS_DEFAULT_PROFILE
CLOUD_PROFILE = AWS_NO_PROFILE
PLATFORM_SERVICE_MANIFEST_ROOT = AXPlatformConfigDefaults.DefaultManifestRoot
PLATFORM_BOOTSTRAP_CONFIG_FILE = AXPlatformConfigDefaults.DefaultPlatformConfigFile

Expand Down Expand Up @@ -71,7 +75,7 @@ def default_or_wizard(self):
if self.cloud_profile is None:
self.cloud_profile = typed_raw_input_with_default(
prompt="Please enter your cloud provider profile. If you don't provide one, we are going to use the default you configured on host.",
default=AWS_DEFAULT_PROFILE,
default=AWS_NO_PROFILE,
type_converter=str
)

Expand All @@ -82,6 +86,10 @@ def default_or_wizard(self):
confirmation += "\n\nPlease press ENTER to continue or press Ctrl-C to terminate:"
raw_input(confirmation)

# TODO: revise this once we bring GCP into picture
if self.cloud_profile == AWS_NO_PROFILE:
self.cloud_profile = None


def validate_software_info(software_info):
assert isinstance(software_info, SoftwareInfo)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
from netaddr import IPAddress

from ax.cloud import Cloud
from ax.cloud.aws import EC2, AWS_DEFAULT_PROFILE
from ax.cloud.aws import EC2
from ax.platform.cluster_config import AXClusterSize, AXClusterType, SpotInstanceOption
from ax.platform.component_config import SoftwareInfo
from .common import add_common_flags, add_software_info_flags, validate_software_info, \
ClusterManagementOperationConfigBase, typed_raw_input_with_default
ClusterManagementOperationConfigBase, typed_raw_input_with_default, AWS_NO_PROFILE


logger = logging.getLogger(__name__)
Expand All @@ -26,7 +26,7 @@ class ClusterInstallDefaults:
CLUSTER_SIZE = "small"
CLUSTER_TYPE = "standard"
CLOUD_REGION = "us-west-2"
CLOUD_PROFILE = AWS_DEFAULT_PROFILE
CLOUD_PROFILE = AWS_NO_PROFILE
CLOUD_PLACEMENT = "us-west-2a"
VPC_CIDR_BASE = "172.20"
SUBNET_MASK_SIZE = 22
Expand Down Expand Up @@ -121,7 +121,7 @@ def default_or_wizard(self):
if self.cloud_profile is None:
self.cloud_profile = typed_raw_input_with_default(
prompt="Please enter your cloud provider profile. If you don't provide one, we are going to use the default you configured on host.",
default=AWS_DEFAULT_PROFILE,
default=AWS_NO_PROFILE,
type_converter=str
)

Expand Down Expand Up @@ -192,9 +192,13 @@ def default_or_wizard(self):
confirmation += "Trusted CIDRs: {}\n".format(self.trusted_cidrs)
confirmation += "Spot Instance Option: {}\n".format(self.spot_instances_option)
confirmation += "User On-Demand Nodes: {}\n".format(self.user_on_demand_nodes)
confirmation += "\n\nPlease press ENTER to continue or press Ctrl-C to terminate the program if these configurations are not what you want:\n"
confirmation += "\n\nPlease press ENTER to continue or press Ctrl-C to terminate the program if these configurations are not what you want:"
raw_input(confirmation)

# TODO: revise this once we bring GCP into picture
if self.cloud_profile == AWS_NO_PROFILE:
self.cloud_profile = None



def validate(self):
Expand Down
12 changes: 10 additions & 2 deletions platform/cluster/aws/util.sh
Original file line number Diff line number Diff line change
Expand Up @@ -747,11 +747,19 @@ function delete-tag {

# Creates the IAM roles (if they do not already exist)
function create-iam-profiles {
/ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION}
local aws_profile_arg=""
if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then
aws_profile_arg="--aws-profile ${AWS_DEFAULT_PROFILE}"
fi
/ax/bin/ax-upgrade-misc --ensure-aws-iam --cluster-name-id $CLUSTER_ID --aws-region ${AWS_REGION} ${aws_profile_arg}
}

function delete-iam-profiles {
/ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-profile $AWS_DEFAULT_PROFILE --aws-region ${AWS_REGION}
local aws_profile_arg=""
if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then
aws_profile_arg="--aws-profile ${AWS_DEFAULT_PROFILE}"
fi
/ax/bin/ax-upgrade-misc --delete-aws-iam --cluster-name-id $CLUSTER_ID --aws-region ${AWS_REGION} ${aws_profile_arg}
}

# Wait for instance to be in specified state
Expand Down
2 changes: 0 additions & 2 deletions platform/source/lib/ax/platform/kube_env_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,6 @@ def prepare_kube_install_config(name_id, aws_profile, cluster_info, cluster_conf

if aws_profile:
env["AWS_DEFAULT_PROFILE"] = aws_profile
else:
env["AWS_DEFAULT_PROFILE"] = AWS_DEFAULT_PROFILE

optional_env = {
# Start off directly with all spot instances only for dev clusters.
Expand Down
7 changes: 2 additions & 5 deletions platform/source/tools/ax-upgrade-misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@
if args.ensure_aws_iam or args.delete_aws_iam:
from ax.platform.cluster_instance_profile import AXClusterInstanceProfile
assert args.cluster_name_id, "Missing cluster name id to ensure aws iam"
assert args.aws_region, "Missing AWS region to ensure aws iam"
if args.ensure_aws_iam:
AXClusterInstanceProfile(args.cluster_name_id, args.aws_region, aws_profile=args.aws_profile).update()
elif args.delete_aws_iam:
Expand All @@ -58,10 +57,8 @@
name_id = args.cluster_name_id
aws_profile = args.aws_profile
aws_region = args.aws_region
assert name_id and aws_profile and aws_region, \
"Missing parameters to ensure s3. name_id: {}, aws_profile: {}, aws_region: {}".format(name_id,
aws_profile,
aws_region)
assert name_id and aws_region, \
"Missing parameters to ensure s3. name_id: {}, aws_region: {}".format(name_id, aws_region)

AXClusterBuckets(name_id, aws_profile, aws_region).update()

2 changes: 1 addition & 1 deletion platform/source/tools/master_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def run():
parser.add_argument("cluster_name_id", help="Name of the cluster")
parser.add_argument("command", help="Command, server or upgrade")
parser.add_argument("--region", help="Region name")
parser.add_argument("--profile", help="Profile name")
parser.add_argument("--profile", default=None, help="Profile name")
parser.add_argument('--version', action='version', version="%(prog)s {}".format(__version__))
usr_args = parser.parse_args()

Expand Down
18 changes: 14 additions & 4 deletions platform/source/tools/upgrade-kubernetes.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ ensure-aws-envs () {
echo "Setting AWS profile to ${ARGO_AWS_PROFILE}"
export AWS_DEFAULT_PROFILE="${ARGO_AWS_PROFILE}"
else
export AWS_DEFAULT_PROFILE="default"
echo "Not setting AWS profile"
fi
echo "Setting AWS region to ${ARGO_AWS_REGION}"
aws configure set region ${ARGO_AWS_REGION}
Expand Down Expand Up @@ -209,14 +209,18 @@ get-instance-counts() {

upgrade-launch-config() {
echo "Upgrading launch configurations ..."
local aws_profile_arg=""
if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then
aws_profile_arg="--profile ${AWS_DEFAULT_PROFILE}"
fi
/ax/bin/minion_upgrade --new-kube-version ${NEW_KUBE_VERSION} \
--new-kube-server-hash ${NEW_KUBE_SERVER_SHA1} \
--new-cluster-install-version ${NEW_CLUSTER_INSTALL_VERSION} \
--new-kube-salt-hash ${NEW_KUBE_SALT_SHA1} \
--profile ${AWS_DEFAULT_PROFILE} \
--region ${ARGO_AWS_REGION} \
--ax-vol-disk-type ${AX_VOL_DISK_TYPE} \
--cluster-name-id ${CLUSTER_NAME_ID}
--cluster-name-id ${CLUSTER_NAME_ID} \
${aws_profile_arg}
}


Expand All @@ -226,7 +230,13 @@ upgrade-master () {
echo
echo "=== Step 2. Configure Kubernetes master."
echo
/ax/bin/master_manager ${CLUSTER_NAME_ID} upgrade --region ${ARGO_AWS_REGION} --profile ${AWS_DEFAULT_PROFILE}

local aws_profile_arg=""
if [[ ! -z ${AWS_DEFAULT_PROFILE+x} ]]; then
aws_profile_arg="--profile ${AWS_DEFAULT_PROFILE}"
fi

/ax/bin/master_manager ${CLUSTER_NAME_ID} upgrade --region ${ARGO_AWS_REGION} ${aws_profile_arg}
rm -f ~/.ssh/known_hosts
}

Expand Down

0 comments on commit 06a85e0

Please sign in to comment.