Skip to content

Commit

Permalink
Skip onboarding prechecks for AKS-HCI (Azure#6778)
Browse files Browse the repository at this point in the history
  • Loading branch information
9lash authored Sep 20, 2023
1 parent ded42d4 commit 263e2df
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 31 deletions.
4 changes: 4 additions & 0 deletions src/connectedk8s/HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
Release History
===============

1.4.1
++++++
* Skip Onboarding prechecks for AKS-HCI.

1.4.0
++++++
* Added support for reading ARM metadata 2022-09-01.
Expand Down
79 changes: 49 additions & 30 deletions src/connectedk8s/azext_connectedk8s/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,38 +155,54 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat
is_arm64_cluster = check_arm64_node(node_api_response)

required_node_exists = check_linux_node(node_api_response)
# Pre onboarding checks

# check if this is AKS_HCI
aks_hci = False
if distribution == 'aks_workload' and infrastructure == 'azure_stack_hci':
aks_hci = True

# Install kubectl and helm
try:
kubectl_client_location = install_kubectl_client()
helm_client_location = install_helm_client()
diagnostic_checks = "Failed"
batchv1_api_instance = kube_client.BatchV1Api()
storage_space_available = True

current_time = time.ctime(time.time())
time_stamp = ""
for elements in current_time:
if(elements == ' '):
time_stamp += '-'
continue
elif(elements == ':'):
time_stamp += '.'
continue
time_stamp += elements
time_stamp = cluster_name + '-' + time_stamp

# Generate the diagnostic folder in a given location
filepath_with_timestamp, diagnostic_folder_status = utils.create_folder_diagnosticlogs(time_stamp, consts.Pre_Onboarding_Check_Logs)

if(diagnostic_folder_status is not True):
storage_space_available = False

# Performing cluster-diagnostic-checks
diagnostic_checks, storage_space_available = precheckutils.fetch_diagnostic_checks_results(api_instance, batchv1_api_instance, helm_client_location, kubectl_client_location, kube_config, kube_context, location, http_proxy, https_proxy, no_proxy, proxy_cert, azure_cloud, filepath_with_timestamp, storage_space_available)
precheckutils.fetching_cli_output_logs(filepath_with_timestamp, storage_space_available, 1)
except Exception as e:
raise CLIInternalError("An exception has occured while trying to perform kubectl or helm install : {}".format(str(e)))
# Handling the user manual interrupt
except KeyboardInterrupt:
raise ManualInterrupt('Process terminated externally.')

if storage_space_available is False:
logger.warning("There is no storage space available on your device and hence not saving cluster diagnostic check logs on your device")
# Pre onboarding checks
try:
# if aks_hci skip, otherwise continue to perform pre-onboarding check
if not aks_hci:
diagnostic_checks = "Failed"
batchv1_api_instance = kube_client.BatchV1Api()
storage_space_available = True

current_time = time.ctime(time.time())
time_stamp = ""
for elements in current_time:
if(elements == ' '):
time_stamp += '-'
continue
elif(elements == ':'):
time_stamp += '.'
continue
time_stamp += elements
time_stamp = cluster_name + '-' + time_stamp

# Generate the diagnostic folder in a given location
filepath_with_timestamp, diagnostic_folder_status = utils.create_folder_diagnosticlogs(time_stamp, consts.Pre_Onboarding_Check_Logs)

if(diagnostic_folder_status is not True):
storage_space_available = False

# Performing cluster-diagnostic-checks
diagnostic_checks, storage_space_available = precheckutils.fetch_diagnostic_checks_results(api_instance, batchv1_api_instance, helm_client_location, kubectl_client_location, kube_config, kube_context, location, http_proxy, https_proxy, no_proxy, proxy_cert, azure_cloud, filepath_with_timestamp, storage_space_available)
precheckutils.fetching_cli_output_logs(filepath_with_timestamp, storage_space_available, 1)

if storage_space_available is False:
logger.warning("There is no storage space available on your device and hence not saving cluster diagnostic check logs on your device")

except Exception as e:
telemetry.set_exception(exception="An exception has occured while trying to execute pre-onboarding diagnostic checks : {}".format(str(e)),
Expand All @@ -202,7 +218,7 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat
raise ManualInterrupt('Process terminated externally.')

# If the checks didnt pass then stop the onboarding
if diagnostic_checks != consts.Diagnostic_Check_Passed:
if diagnostic_checks != consts.Diagnostic_Check_Passed and aks_hci is False:
if storage_space_available:
logger.warning("The pre-check result logs logs have been saved at this path:" + filepath_with_timestamp + " .\nThese logs can be attached while filing a support ticket for further assistance.\n")
if(diagnostic_checks == consts.Diagnostic_Check_Incomplete):
Expand All @@ -212,7 +228,10 @@ def create_connectedk8s(cmd, client, resource_group_name, cluster_name, correlat
telemetry.set_exception(exception='Cluster Diagnostic Prechecks Failed', fault_type=consts.Cluster_Diagnostic_Prechecks_Failed, summary="Cluster Diagnostic Prechecks Failed in the cluster")
raise ValidationError("One or more pre-onboarding diagnostic checks failed and hence not proceeding with cluster onboarding. Please resolve them and try onboarding again.")

print("The required pre-checks for onboarding have succeeded.")
if aks_hci is False:
print("The required pre-checks for onboarding have succeeded.")
else:
print("Skipped onboarding pre-checks for AKS-HCI. Continuing...")

if not required_node_exists:
telemetry.set_user_fault()
Expand Down
2 changes: 1 addition & 1 deletion src/connectedk8s/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.

VERSION = '1.4.0'
VERSION = '1.4.1'

# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
Expand Down

0 comments on commit 263e2df

Please sign in to comment.