From e4c2aa3e35d1ae90bc1d693256325ac1351ac029 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Thu, 13 Sep 2018 15:44:17 -0600 Subject: [PATCH 01/62] Update badges --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bed8c01..fc80bec 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![TravisCI](https://api.travis-ci.org/resero-labs/docker-utils.svg?branch=master)](https://travis-ci.org/resero-labs/docker-utils) [![Coverage](https://codecov.io/gh/rappdw/docker-utils/branch/master/graph/badge.svg)](https://codecov.io/gh/resero-labs/docker-utils) [![PyPi](https://img.shields.io/pypi/v/dockerutils.svg)](https://pypi.org/project/dockerutils/) [![PyPi](https://img.shields.io/pypi/wheel/dockerutils.svg)](https://pypi.org/project/dockerutils/) +[![TravisCI](https://api.travis-ci.org/resero-labs/docker-utils.svg?branch=master)](https://travis-ci.org/resero-labs/docker-utils) [![Coverage](https://codecov.io/gh/resero-labs/docker-utils/branch/master/graph/badge.svg)](https://codecov.io/gh/resero-labs/docker-utils) [![PyPi](https://img.shields.io/pypi/v/dockerutils.svg)](https://pypi.org/project/dockerutils/) [![PyPi](https://img.shields.io/pypi/wheel/dockerutils.svg)](https://pypi.org/project/dockerutils/) [![Python 3.7](https://img.shields.io/badge/python-3.7-blue.svg)](https://www.python.org/downloads/release/python-370/) [![Python 3.6](https://img.shields.io/badge/python-3.6-blue.svg)](https://www.python.org/downloads/release/python-360/) # Docker Utilities/Patterns for Python Projects From c7c3bf80710b3bbe9e9b4b79f7db870b18cf9262 Mon Sep 17 00:00:00 2001 From: veranth-pfpt <43183819+veranth-pfpt@users.noreply.github.com> Date: Thu, 20 Sep 2018 13:43:41 -0600 Subject: [PATCH 02/62] Fix cli prompt (#13) * Fix cli prompt update when docking; add compatibility with virtual environment prompt updates * Fix command line prompt updating for dock/castoff; add simple test script --- bin/test_shell | 118 +++++++++++++++++++++++++++++++++++++++++++++++++ scripts/dock | 76 ++++++++++++++++++++----------- 2 files changed, 168 insertions(+), 26 deletions(-) create mode 100755 bin/test_shell diff --git a/bin/test_shell b/bin/test_shell new file mode 100755 index 0000000..2f7d728 --- /dev/null +++ b/bin/test_shell @@ -0,0 +1,118 @@ +!#/bin/bash + +# To run tests, from project root directory: +# $ source bin/test_shell + +DOCK_INSTANCE=10.93.135.39 +ORIGINAL_PROMPT="$PS1" +echo "Testing dock/castoff with $DOCK_INSTANCE" + +source dock $DOCK_INSTANCE +deactivate +castoff +source activate + +source dock $DOCK_INSTANCE +deactivate +source activate +castoff + +source dock $DOCK_INSTANCE +deactivate +source activate +castoff + +source dock $DOCK_INSTANCE +castoff +deactivate +source activate + +source dock $DOCK_INSTANCE +castoff +source activate +deactivate + +source dock $DOCK_INSTANCE +source activate +deactivate +castoff + +source dock $DOCK_INSTANCE +source activate +castoff +deactivate + +deactivate +source dock $DOCK_INSTANCE +castoff +source activate + +deactivate +source dock $DOCK_INSTANCE +source activate +castoff + +deactivate +castoff +source dock $DOCK_INSTANCE +source activate + +deactivate +castoff +source activate +source dock $DOCK_INSTANCE + +deactivate +source activate +source dock $DOCK_INSTANCE +castoff + +deactivate +source activate +castoff +source dock $DOCK_INSTANCE + +source activate +source dock $DOCK_INSTANCE +deactivate +castoff + +source activate +source dock $DOCK_INSTANCE +castoff +deactivate + +source activate +deactivate +source dock $DOCK_INSTANCE +castoff + +source activate +deactivate +castoff +source dock $DOCK_INSTANCE + +source activate +castoff +source dock $DOCK_INSTANCE +deactivate + +source activate +castoff +deactivate +source dock $DOCK_INSTANCE + + +# Clean Up +deactivate +castoff +echo +echo "Original Prompt:" +echo "$ORIGINAL_PROMPT" +echo "Final Prompt:" +echo "$PS1" +if [ "$ORIGINAL_PROMPT" = "$PS1" ]; then + echo "Success. Prompt match." +else + echo "FAIL: Prompt mismatch." +fi diff --git a/scripts/dock b/scripts/dock index 467bb15..480fae2 100755 --- a/scripts/dock +++ b/scripts/dock @@ -16,7 +16,7 @@ done MONIKER=${1:-none} PORT=2377 -if [ ! -z "$_DOCK_MONIKER" ]; then +if [ -n "$_DOCK_MONIKER" ]; then echo "Remote docker is already configured for '$DOCK_MONIKER'. Try 'castoff' to disconnect." return 0 fi @@ -33,6 +33,8 @@ if [ $MONIKER = "none" ]; then echo "You can now run secure remote docker commands." echo "To undo this configuration:" echo " $ castoff" + echo + echo "If you do not want your command line prompt to be updated, set DOCKERUTILS_DISABLE_PROMPT=true" kill -INT $$ fi @@ -53,31 +55,65 @@ if [ $FOUND_MONIKER = false ]; then kill -INT $$ fi -echo "Docking to $DOCK_USER@$DOCK_IP ($DOCK_MONIKER)" -# echo "MONIKER:$MONIKER" -# echo "IP:$DOCK_IP" -# echo "HOSTNAME:$DOCK_HOSTNAME" +echo "Docking to $DOCK_USER@$DOCK_IP [Moniker: $DOCK_MONIKER]" if [ -z "$DOCK_IP" ]; then kill -INT $$ fi +# Update command line prompt to reflect docked condition. +# Python virtual environment prompt (or lack therof) should remain unchanged. +update_dock_prompt() { + _DOCK_MONIKER="[dock:$DOCK_MONIKER] " + export _DOCK_MONIKER + + if [ -n "$_OLD_VIRTUAL_PS1" ]; then + _UNDOCKED_PS1="$PS1" + _OLD_VIRTUAL_PS1="$_OLD_VIRTUAL_PS1${_DOCK_MONIKER}" + export _OLD_VIRTUAL_PS1 + else + _UNDOCKED_PS1="$PS1" + fi + + if [ "$0" == "zsh" ]; then + PS1=$PS1${_DOCK_MONIKER} + else + PS1=$PS1${_DOCK_MONIKER} + fi + + export PS1 + export _UNDOCKED_PS1 +} + + # Remove docked condition including command line prompt. # Python virtual environment prompt (or lack therof) should remain unchanged. -function castoff() { +castoff() { + _DOCK_MONIKER="[dock:$DOCK_MONIKER] " unset DOCKER_TLS_VERIFY unset DOCKER_CERT_PATH unset DOCKER_HOST unset DOCKER_IP - if [ ! -z "$_OLD_VIRTUAL_PS1" ]; then - _OLD_VIRTUAL_PS1=${_OLD_VIRTUAL_PS1%${_DOCK_MONIKER}} - PS1=${PS1%${_DOCK_MONIKER}} + + if [ -n "$_OLD_VIRTUAL_PS1" ]; then + if [ "$0" == "zsh" ]; then + _OLD_VIRTUAL_PS1="${_OLD_VIRTUAL_PS1%$_DOCK_MONIKER}" + else + _OLD_VIRTUAL_PS1=${_OLD_VIRTUAL_PS1%"$_DOCK_MONIKER"} + fi + export _OLD_VIRTUAL_PS1 + fi + + if [ "$0" == "zsh" ]; then + PS1=${PS1%$_DOCK_MONIKER} else - PS1=$_PS1_ORIGINAL + PS1=${PS1%"$_DOCK_MONIKER"} fi + export PS1 + unset _DOCK_MONIKER + unset _UNDOCKED_PS1 unset -f castoff - unset _PS1_ORIGINAL unset -f sync-up unset -f sync-down unset -f do-sync-up @@ -92,18 +128,6 @@ export DOCKER_CERT_PATH=${HOME}/.docker/${DOCK_IP} export DOCKER_HOST=tcp://${DOCK_IP}:2377 export DOCKER_IP=${DOCK_IP} -# Update command line prompt to reflect docked condition. -# Python virtual environment prompt (or lack therof) should remain unchanged. -_DOCK_MONIKER="[dock:$DOCK_MONIKER] " - -if [ ! -z "$_OLD_VIRTUAL_PS1" ]; then - _PS1_ORIGINAL=${_OLD_VIRTUAL_PS1} - export _PS1_ORIGINAL - _OLD_VIRTUAL_PS1="${_OLD_VIRTUAL_PS1}$_DOCK_MONIKER" -else - _PS1_ORIGINAL=$PS1 -fi - -PS1="$PS1$_DOCK_MONIKER" -export PS1 -export _DOCK_MONIKER +if [ -z $DOCKERUTILS_DISABLE_PROMPT ]; then + update_dock_prompt +fi \ No newline at end of file From 7dc2afd0a785c6efa576e676df4d9458ef70ca0e Mon Sep 17 00:00:00 2001 From: Brendan Veranth Date: Mon, 1 Oct 2018 16:35:19 -0600 Subject: [PATCH 03/62] Fix logic that checks for zsh --- scripts/dock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/dock b/scripts/dock index 480fae2..2d44398 100755 --- a/scripts/dock +++ b/scripts/dock @@ -75,7 +75,7 @@ update_dock_prompt() { _UNDOCKED_PS1="$PS1" fi - if [ "$0" == "zsh" ]; then + if [ "$SHELL" = "/bin/zsh" ]; then PS1=$PS1${_DOCK_MONIKER} else PS1=$PS1${_DOCK_MONIKER} @@ -96,7 +96,7 @@ castoff() { unset DOCKER_IP if [ -n "$_OLD_VIRTUAL_PS1" ]; then - if [ "$0" == "zsh" ]; then + if [ "$SHELL" = "/bin/zsh" ]; then _OLD_VIRTUAL_PS1="${_OLD_VIRTUAL_PS1%$_DOCK_MONIKER}" else _OLD_VIRTUAL_PS1=${_OLD_VIRTUAL_PS1%"$_DOCK_MONIKER"} @@ -104,7 +104,7 @@ castoff() { export _OLD_VIRTUAL_PS1 fi - if [ "$0" == "zsh" ]; then + if [ "$SHELL" = "/bin/zsh" ]; then PS1=${PS1%$_DOCK_MONIKER} else PS1=${PS1%"$_DOCK_MONIKER"} From 12ed48994a8bed980019b6ab4b442cfc542a9fb7 Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Mon, 8 Oct 2018 12:13:04 -0600 Subject: [PATCH 04/62] Support env expansion and pre/post build scripts (#17) * Initial changes to dockerutils to support env expansion and also pre/post build scripts * Fixed a couple of little items in the build-image command * Added the changes requested on the PR --- .gitignore | 1 + README.md | 11 +++++++++-- scripts/build-image | 35 ++++++++++++++++++++++++++++++++--- scripts/publish-image | 2 ++ scripts/run-image | 2 ++ 5 files changed, 46 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index fc1f062..19ef7ae 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ build activate .coverage .pytest_cache +.venv diff --git a/README.md b/README.md index fc80bec..a7d8fc1 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,8 @@ docker directory tree. Each of these sections may contain one of the following: * `pull_FROM_on_force` - defaults to False, if True, add --pull to build command when force building image (or base image) * `image_repo` - the repository to publish the image to * `publication_tag` - the tag for publication (full image name + tag) +* `pre_build_script` - A shell command or script to run before a docker build is issued +* `post_build_script` - A shell command or script to run after a docker build has been compeleted (successfully) ### Synthetic Images Additionally, "synthetic" images can be specified by adding a `run-image` section with a `synthetic_images` definition @@ -102,14 +104,19 @@ tag=experiment.2017.12.16 ... ``` ### Volume Replacement Variables -The volume specification may contain replacement variable designations of the form `{var}`. The supported variables -include: + +The volume specification may contain either environment variables (`$name` and `${name}` formats) as well as specific +variable replacement designations of the form `{var}`. The supported variables include: * `project_root` - will be replaced with the root directory name of the project * `user` - will be replaced with the user name of the user running the command * `project` - replace with project namge ### Image Push Replacement Variables + +The `publication_tag` may contain either environment variables (`$name` and `${name}` formats) as well as specific +variable replacement designations of the form `{var}`. The supported variables include: + * `account` - AWS account designation * `region` - AWS region * `image` - Image name diff --git a/scripts/build-image b/scripts/build-image index a409986..b39773a 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -16,7 +16,26 @@ def is_multistage(mode): return 'as builder' in open(f'docker/{mode}/Dockerfile').read() -def build(image, image_name, image_tag, pull=False): +def run_pre_script(script: str, config: dict) -> int: + print(f'Running pre-build-script: "{script}"') + return subprocess.call(shlex.split(script), cwd=os.getcwd()) + + +def run_post_script(script: str, config: dict) -> int: + print(f'Running post-build-script: "{script}"') + return subprocess.call(shlex.split(script), cwd=os.getcwd()) + + +def build(image, image_name, image_tag, config={}, pull=False): + pre_script = config.get('pre_build_script', None) + post_script = config.get('post_build_script', None) + + if pre_script: + rc = run_pre_script(pre_script, config=config) + if rc != 0: + print(f'pre-build-script failed: {rc}') + return rc + rc = 0 pull_base = '' if pull: @@ -30,6 +49,14 @@ def build(image, image_name, image_tag, pull=False): if not rc: rc = image_operation(f'docker build {pull_base} --compress -t {image_name}:{image_tag} ' f'-f docker/{image}/Dockerfile .') + + if rc != 0: + print(f'docker build failed: {rc}') + return rc + + if post_script: + rc = run_post_script(post_script, config=config) + return rc @@ -92,11 +119,13 @@ if __name__ == '__main__': image_tag = args.image_tag pull_FROM_on_force = False + image_config = {} if image in config.sections(): - if 'pull_FROM_on_force' in config[image]: + image_config = config[image] + if 'pull_FROM_on_force' in image_config: pull_FROM_on_force = config[image]['pull_FROM_on_force'] - rc = fn(image, image_name, image_tag, args.pull_base or (args.force_build_base and pull_FROM_on_force)) + rc = fn(image, image_name, image_tag, config=image_config, pull=args.pull_base or (args.force_build_base and pull_FROM_on_force)) # because an image may not be present on the clean, ignore a non-zero return code if rc and not args.image == 'clean': sys.exit(rc) diff --git a/scripts/publish-image b/scripts/publish-image index 3dd51af..6f62b01 100755 --- a/scripts/publish-image +++ b/scripts/publish-image @@ -87,6 +87,7 @@ if __name__ == '__main__': tag=image_tag, user=getpass.getuser() ) + publication_tag = os.path.expandvars(publication_tag) ecr_client = boto3.client('ecr') auth_data = ecr_client.get_authorization_token() user, password = base64.b64decode(auth_data['authorizationData'][0]['authorizationToken']).decode().split( @@ -103,6 +104,7 @@ if __name__ == '__main__': tag=image_tag, user=getpass.getuser() ) + publication_tag = os.path.expandvars(publication_tag) # TODO: should we pick up user and password for docker.com? Maybe via credstash? rc = docker_login_dockerhub() diff --git a/scripts/run-image b/scripts/run-image index e166477..1886aa0 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -45,6 +45,8 @@ def run(mode, image_name, image_tag, **kwargs): user=user, project=os.path.split(get_root_dir())[1] ) + volumes = os.path.expandvars(volumes) + if kwargs['network']: kwargs['network'] = f"--network {kwargs['network']}" From 3e4e0b37559e40d50cbc15299ea4dfb849baeeb2 Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Mon, 8 Oct 2018 12:22:11 -0600 Subject: [PATCH 05/62] Adjusted the bin scripts to have some other support --- bin/publish | 10 ++++++++++ bin/setup-dev | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100755 bin/setup-dev diff --git a/bin/publish b/bin/publish index fad410a..8d81278 100755 --- a/bin/publish +++ b/bin/publish @@ -5,11 +5,21 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" cd $DIR cd .. +echo "Making sure we have the latest tags" +git pull --tags + if [ -z "$VIRTUAL_ENV" ]; then . activate fi +echo "Cleaning out old built wheels" +rm -f dist/* +echo "Installing/Verifying wheel and twine" pip install wheel twine + +echo "Creating the new wheel and storing in the dist/ directory" python setup.py bdist_wheel -d dist + +echo "Uploading to pypi (be sure to have entry in ~/.pypirc)" twine upload -r pypi dist/* diff --git a/bin/setup-dev b/bin/setup-dev new file mode 100755 index 0000000..3b4a22d --- /dev/null +++ b/bin/setup-dev @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +#vim :set filetype=sh: + +set -e + +cd "$(dirname $0)"; cd .. + +WORK_VENV=${WORK_VENV:-$PWD/.venv} +IN_VENV=${VIRTUAL_ENV:-} + +function display { + msg=$1 + echo -e "\e[32m${msg}\e[0m" +} + +if [ ! -z $IN_VENV ]; then + if [[ $WORK_VENV = $VIRTUAL_ENV ]]; then + display "You are already in your activated virtual environment" + else + display "Your current virtual environment does not match the project virtual environment" + display "\t$WORK_VENV != $VIRTUAL_ENV" + display "Either deactivate your current environment or set WORK_ENV to your current venv" + exit 1 + fi +else + if [ ! -d $WORK_VENV ]; then + display "No venv found at $WORK_VENV so creating" + python3 -m venv $WORK_VENV + if [ ! -e activate ]; then + echo -e "Creating an alias activate for virtual environment" + ln -s $WORK_VENV/bin/activate activate + fi + fi + + display "Activating virtual environment at $WORK_VENV" + source $WORK_VENV/bin/activate +fi + +display "Installing application (and dependencies)" +#echo -e "Installing application (and dependencies)" +pip install -e . + +if [ -z $IN_VENV ]; then + display "Environment is setup, activate using either of the following commands" + display "\tsource activate\n\tsource $WORK_VENV/bin/activate" +fi From ed5634b95c8d4cdac9094f43824dfb7973792316 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Tue, 9 Oct 2018 21:12:28 -0600 Subject: [PATCH 06/62] Fixes #11 (#18) * https://github.com/resero-labs/docker-utils/issues/11 Add additional scripts: * destroy-dock * start-dock * stop-dock * ssh-dock * Update destroy-dock to funcition similar to stop/start-dock --- README.md | 11 +++ .../notes.md | 16 ++++ scripts/create-dock | 11 +-- scripts/destroy-dock | 80 ++++++++++++++++++ scripts/ssh-dock | 32 ++++++++ scripts/start-dock | 68 ++++++++++++++++ scripts/stop-dock | 81 +++++++++++++++++++ setup.py | 5 ++ 8 files changed, 294 insertions(+), 10 deletions(-) create mode 100644 issues/15 - Docker not running after stop/notes.md create mode 100755 scripts/destroy-dock create mode 100755 scripts/ssh-dock create mode 100755 scripts/start-dock create mode 100755 scripts/stop-dock diff --git a/README.md b/README.md index a7d8fc1..fccde6a 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,17 @@ if it exists, or [rappdw/docker-ds](https://github.com/rappdw/docker-ds) otherwi into the container for use in the Juypter notebook environment. There are a couple of environment variable to be aware of with this command: +`create-dock` will start an ec2 instance that can be used for remote docking. This instance is configured to provide +secure interaction with the docker server, as well as to support GPU utliziation (`-g` option with `run-image`) + +`stop-dock` will change the instances state of a remote dock to `stopped` + +`start-dock` will change the instance state of a remote dock to `running` + +`destroy-dock` will terminate a remote dock instance and delete any local configuration files + +`ssh-dock` opens a terminal on the remote dock with ssh + * DOCKER_DS_DONT_PULL - if set, the version of rappdw/docker-ds currently available will be used rather than pulling the latest version from docker hub. * DOCKER_DS_DIFFS - if set, diff --git a/issues/15 - Docker not running after stop/notes.md b/issues/15 - Docker not running after stop/notes.md new file mode 100644 index 0000000..ff80755 --- /dev/null +++ b/issues/15 - Docker not running after stop/notes.md @@ -0,0 +1,16 @@ + +[systemctl](https://www.digitalocean.com/community/tutorials/how-to-use-systemctl-to-manage-systemd-services-and-units) + +It looks like docker is already registered as a service that will auto restart + +``` +ubuntu@ip-10-93-135-30:~$ sudo systemctl enable docker +Synchronizing state of docker.service with SysV service script with /lib/systemd/systemd-sysv-install. +Executing: /lib/systemd/systemd-sysv-install enable docker +Failed to enable unit: File /etc/systemd/system/multi-user.target.wants/docker.service already exists. +``` + +Need to look at what exactly stop/start does + +According to [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html), the instance +performs a normal shutdown. \ No newline at end of file diff --git a/scripts/create-dock b/scripts/create-dock index bd479f5..a8af5e9 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -1,4 +1,4 @@ -#!/bin/bash -e +#!/usr/bin/env bash -e # Default Values INSTANCE_TYPE="m5.xlarge" @@ -18,16 +18,7 @@ check_aws_connectivity() { fi } -ensure_dependencies() { - if [ -z $(which register-dock) ]; then - echo -e "Unable to find required dependencies from docker-utils, something is amiss in your dockerutils installation. Consider updating:" - echo -e " pip install -U dockerutils" - exit 1 - fi -} - check_aws_connectivity -ensure_dependencies INSTANCE_NAME="${USERNAME}-dock" confirm_create() { diff --git a/scripts/destroy-dock b/scripts/destroy-dock new file mode 100755 index 0000000..1ab182e --- /dev/null +++ b/scripts/destroy-dock @@ -0,0 +1,80 @@ +#!/usr/bin/env bash -e + +# Default Values +MONIKER=${1:-"$DOCKER_IP"} +GREEN='\033[0;32m' +NO_COLOR='\033[0m' + +confirm_destroy() { + echo -e "Destroy dock with the following values?" + echo -e "Instance ID: ${GREEN}${2:-none}${NO_COLOR}" + echo -e "Config Dir: ${GREEN}${1:-none}${NO_COLOR}" + read -e -p "Type enter to Cancel, h for Help, y to Destroy: " RESPONSE + + if [ "$RESPONSE" == "h" ]; then print_help; fi + +} + +print_help() { + echo "Destroy dock - Help" + echo + echo "Description" + echo " This script uses the aws cli to terminate an existing ec2 dock instance." + echo " Either the dock ip or the moniker must be provided" + echo + echo "Usage" + echo " $ destroy-dock moniker|ip" + echo + + exit 0 +} + +# Parse command line arguments in any order +while getopts 'h' flag; do # if a character is followed by a colon, that argument is expected to have an argument. + case "${flag}" in + h) hflag='true';; + *) error "Unexpected option ${flag}" ;; + esac +done + +# Look up IP from moniker +FOUND_MONIKER=false +for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [[ $DOCK_MONIKER = $MONIKER || $DOCK_IP = $MONIKER ]]; then + FOUND_MONIKER=true + break + fi + fi +done + +if [ $FOUND_MONIKER = false ]; then + echo "Can't find dock configuration for $MONIKER" + exit -1 +fi + + +get_instance_id() { + aws ec2 describe-instances \ + --filters Name=private-ip-address,Values="$1" \ + --query 'Reservations[*].Instances[*].InstanceId' --output text +} + +# destroy dock +INSTANCE_ID=$(get_instance_id $DOCK_IP) + +# Confirmation +confirm_destroy "$f" "$INSTANCE_ID" +if [ "$RESPONSE" != "y" ] && [ "$RESPONSE" != "h" ]; then + echo "Canceled" + exit 0 +fi + +if [ -n "$f" ]; then + rm -rf $f +fi + +if [ -n "$INSTANCE_ID" ]; then + aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" +fi \ No newline at end of file diff --git a/scripts/ssh-dock b/scripts/ssh-dock new file mode 100755 index 0000000..655335c --- /dev/null +++ b/scripts/ssh-dock @@ -0,0 +1,32 @@ +#!/usr/bin/env bash -e + +if [[ -z "$DOCKER_IP" && -z "$1" ]]; then + echo "You must either be docked, or provide a argument specifying the 'moniker' of the dock you want to ssh to" + exit -1 +fi + +if [ -n "$1" ]; then + # Look up IP from moniker + FOUND_MONIKER=false + for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [ $DOCK_MONIKER = $1 ]; then + FOUND_MONIKER=true + break + fi + fi + done + + if [ $FOUND_MONIKER = false ]; then + echo "Can't find dock configuration for $1" + exit -1 + fi + DOCKER_IP=$DOCK_IP +fi + +echo "Opening ssh connection to ${DOCKER_IP}" +ssh ${DOCKER_IP} + + + diff --git a/scripts/start-dock b/scripts/start-dock new file mode 100755 index 0000000..573cdf0 --- /dev/null +++ b/scripts/start-dock @@ -0,0 +1,68 @@ +#!/usr/bin/env bash -e + +# Default Values +MONIKER=${1:-"$DOCKER_IP"} +GREEN='\033[0;32m' +NO_COLOR='\033[0m' + +print_help() { + echo "Start dock - Help" + echo + echo "Description" + echo " This script uses the aws cli to start an ec2 dock instance that has been stopped." + echo " Either the dock ip or the moniker must be provided" + echo + echo "Usage" + echo " $ start-dock moniker|ip" + echo + + exit 0 +} + +if [[ -z "$MONIKER" ]]; then + print_help + exit -1 +fi + + +# Parse command line arguments in any order +while getopts 'h' flag; do # if a character is followed by a colon, that argument is expected to have an argument. + case "${flag}" in + h) hflag='true';; + *) error "Unexpected option ${flag}" ;; + esac +done + +# Look up IP from moniker +FOUND_MONIKER=false +for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [[ $DOCK_MONIKER = $MONIKER || $DOCK_IP = $MONIKER ]]; then + FOUND_MONIKER=true + break + fi + fi +done + +if [ $FOUND_MONIKER = false ]; then + echo "Can't find dock configuration for $MONIKER" + exit -1 +fi + + +get_instance_id() { + aws ec2 describe-instances \ + --filters Name=private-ip-address,Values="$1" \ + --query 'Reservations[*].Instances[*].InstanceId' --output text +} + +# start dock +INSTANCE_ID=$(get_instance_id $DOCK_IP) + +if [ -n "$INSTANCE_ID" ]; then + echo "Starting instance..." + aws ec2 start-instances --instance-ids "${INSTANCE_ID}" + echo "Waiting for instance to start..." + aws ec2 wait system-status-ok --instance-ids $INSTANCE_ID +fi \ No newline at end of file diff --git a/scripts/stop-dock b/scripts/stop-dock new file mode 100755 index 0000000..26bb10e --- /dev/null +++ b/scripts/stop-dock @@ -0,0 +1,81 @@ +#!/usr/bin/env bash -e + +# Default Values +MONIKER=${1:-"$DOCKER_IP"} +GREEN='\033[0;32m' +NO_COLOR='\033[0m' + +confirm_stop() { + if [ -z "$1" ]; then + echo -e "Stop dock?" + read -e -p "Type enter to Cancel, h for Help, y to Create: " RESPONSE + fi + + if [ "$RESPONSE" == "h" ]; then print_help; fi +} + +print_help() { + echo "Stop dock - Help" + echo + echo "Description" + echo " This script uses the aws cli to stop an existing ec2 dock instance." + echo " Either the dock ip or the moniker must be provided" + echo + echo "Usage" + echo " $ stop-dock moniker|ip" + echo + + exit 0 +} + +if [[ -z "$MONIKER" ]]; then + print_help + exit -1 +fi + + +# Parse command line arguments in any order +while getopts 'h' flag; do # if a character is followed by a colon, that argument is expected to have an argument. + case "${flag}" in + h) hflag='true';; + *) error "Unexpected option ${flag}" ;; + esac +done + +# Look up IP from moniker +FOUND_MONIKER=false +for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [[ $DOCK_MONIKER = $MONIKER || $DOCK_IP = $MONIKER ]]; then + FOUND_MONIKER=true + break + fi + fi +done + +if [ $FOUND_MONIKER = false ]; then + echo "Can't find dock configuration for $MONIKER" + exit -1 +fi + + +get_instance_id() { + aws ec2 describe-instances \ + --filters Name=private-ip-address,Values="$1" \ + --query 'Reservations[*].Instances[*].InstanceId' --output text +} + +# stop dock +INSTANCE_ID=$(get_instance_id $DOCK_IP) + +# Confirmation +confirm_stop +if [ "$RESPONSE" != "y" ] && [ "$RESPONSE" != "h" ]; then + echo "Canceled" + exit 0 +fi + +if [ -n "$INSTANCE_ID" ]; then + aws ec2 stop-instances --instance-ids "${INSTANCE_ID}" +fi \ No newline at end of file diff --git a/setup.py b/setup.py index 8071e6f..9af83e3 100644 --- a/setup.py +++ b/setup.py @@ -27,6 +27,7 @@ scripts=[ 'scripts/create-dock', 'scripts/build-image', + 'scripts/destroy-dock', 'scripts/dock', 'scripts/dock-sync', 'scripts/genversion', @@ -34,6 +35,9 @@ 'scripts/register-dock', 'scripts/run-image', 'scripts/run-notebook', + 'scripts/ssh-dock', + 'scripts/start-dock', + 'scripts/stop-dock', 'scripts/transfer-image' ], include_package_data=True, @@ -49,6 +53,7 @@ 'Programming Language :: Python :: 3.7', ), install_requires=[ + 'aws', 'boto3' ], extras_require={ From 689961d5cae4f0a996a56de463793175b0582a3d Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Wed, 10 Oct 2018 09:55:27 -0600 Subject: [PATCH 07/62] Fix issue with docker not restarting after instance stop/start (#19) * Fix bug in setup.py (not sure where this came from) * Rework packer script for clarity and for bug identified as issue 15 Modify utility scripts to `--output text` instead of json --- .../notes.md | 58 ++++++++++++++++++- packer/configure-docker-v1.sh | 6 ++ packer/docker-setup-v1.sh | 10 ---- packer/resero-labs-nvidia-docker.packer | 34 +++++------ packer/setup-v1.sh | 23 ++++++++ scripts/destroy-dock | 2 +- scripts/start-dock | 4 +- scripts/stop-dock | 2 +- setup.py | 6 +- 9 files changed, 107 insertions(+), 38 deletions(-) create mode 100755 packer/configure-docker-v1.sh delete mode 100755 packer/docker-setup-v1.sh create mode 100755 packer/setup-v1.sh diff --git a/issues/15 - Docker not running after stop/notes.md b/issues/15 - Docker not running after stop/notes.md index ff80755..040a546 100644 --- a/issues/15 - Docker not running after stop/notes.md +++ b/issues/15 - Docker not running after stop/notes.md @@ -13,4 +13,60 @@ Failed to enable unit: File /etc/systemd/system/multi-user.target.wants/docker.s Need to look at what exactly stop/start does According to [AWS Docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html), the instance -performs a normal shutdown. \ No newline at end of file +performs a normal shutdown. + +Steps to Diagnose + +1) create-dock -m test +2) ssh-dock test +3) verify docker is running +4) `date >timestamp.log` +5) `sudo journalctl -u docker.service >docker.log` +6) exit +7) stop-dock test +8) start-dock test +9) ssh-dock test +10) verify docker is not running +11) `date >timestamp.restart.log` +12) `sudo journalctl -u docker.service >docker.reboot.log` + + +# On Jeremy's system +``` +root@ip-10-93-135-93:/home/ubuntu# systemctl status docker +● docker.service - Docker Application Container Engine + Loaded: loaded (/lib/systemd/system/docker.service; disabled; vendor preset: enabled) + Active: inactive (dead) + Docs: https://docs.docker.com + + +``` +from journalctl -u docker +``` +Oct 10 13:23:35 ip-10-93-135-93 systemd[1]: Started Docker Application Container Engine. +Oct 10 13:23:36 ip-10-93-135-93 dockerd[16334]: http: TLS handshake error from 10.92.8.39:52312: remote error: tls: bad certificate +Oct 10 13:23:36 ip-10-93-135-93 dockerd[16334]: http: TLS handshake error from 10.92.8.39:52313: remote error: tls: bad certificate +Oct 10 13:35:11 ip-10-93-135-93 systemd[1]: Stopping Docker Application Container Engine... +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.152596710Z" level=info msg="Processing signal 'terminated'" +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.154355239Z" level=info msg="stopping event stream following graceful shutdown" error=" +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.154381095Z" level=info msg="stopping healthcheck following graceful shutdown" module=l +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.154438958Z" level=info msg="stopping event stream following graceful shutdown" error=" +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.154853556Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc4204532c0, +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.154876801Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc4204532c0, +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.155033817Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc42003a330, +Oct 10 13:35:11 ip-10-93-135-93 dockerd[16334]: time="2018-10-10T13:35:11.155051869Z" level=info msg="pickfirstBalancer: HandleSubConnStateChange: 0xc42003a330, +Oct 10 13:35:12 ip-10-93-135-93 systemd[1]: Stopped Docker Application Container Engine. +``` +Current time is: Wed Oct 10 13:45:33 UTC 2018 + +## Resolution + +The problem was in the `packer/configure-docker-v1.sh` script which had the line: + +``` +sudo sed -i 's"dockerd\ -H\ fd://"dockerd"g' /etc/systemd/system/multi-user.target.wants/docker.service +``` + +`/etc/systemd/system/multi-user.target.wants/docker.service` is a symlink to `/lib/systemd/system/docker.service`. +Running sed against it, turned it into a file. This caused the service to not be recognized as enabled +by `systemd`. \ No newline at end of file diff --git a/packer/configure-docker-v1.sh b/packer/configure-docker-v1.sh new file mode 100755 index 0000000..6db232c --- /dev/null +++ b/packer/configure-docker-v1.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +sudo usermod -aG docker ubuntu +sudo systemctl stop docker +sudo sed -i 's"dockerd\ -H\ fd://"dockerd"g' /lib/systemd/system/docker.service +sudo systemctl daemon-reload +sudo systemctl start docker diff --git a/packer/docker-setup-v1.sh b/packer/docker-setup-v1.sh deleted file mode 100755 index e73e57e..0000000 --- a/packer/docker-setup-v1.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -## Here we are installing docker, just to test -# sudo curl -fsSL get.docker.com -o /tmp/get-docker.sh -# sudo sh /tmp/get-docker.sh -sudo usermod -aG docker ubuntu -sudo systemctl stop docker -sudo sed -i 's"dockerd\ -H\ fd://"dockerd"g' /etc/systemd/system/multi-user.target.wants/docker.service -sudo sed -i 's"dockerd\ -H\ fd://"dockerd"g' /lib/systemd/system/docker.service -sudo systemctl daemon-reload -sudo systemctl start docker diff --git a/packer/resero-labs-nvidia-docker.packer b/packer/resero-labs-nvidia-docker.packer index 68943e5..88e34bb 100644 --- a/packer/resero-labs-nvidia-docker.packer +++ b/packer/resero-labs-nvidia-docker.packer @@ -33,40 +33,34 @@ ], "post-processors": [], "provisioners": [ + { + "type": "file", + "source": "setup-v1.sh", + "destination": "/home/ubuntu/setup-v1.sh" + }, { "type": "shell", "inline": [ - "sleep 30", - "sudo apt-get update", - "sudo apt-get install -y gcc make", - "wget -P /tmp http://us.download.nvidia.com/tesla/396.44/NVIDIA-Linux-x86_64-396.44.run", - "chmod +x /tmp/NVIDIA-Linux-x86_64-396.44.run", - "sudo /tmp/NVIDIA-Linux-x86_64-396.44.run -silent", - "sudo apt-get update", - "sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common", - "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -", - "curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -", - "distribution=$(. /etc/os-release;echo $ID$VERSION_ID)", - "sudo add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable'", - "curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list", - "sudo apt-get update", - "sudo apt-get install -y docker-ce=18.06.0~ce~3-0~ubuntu", - "sudo apt-get install -y nvidia-docker2" + "sudo /home/ubuntu/setup-v1.sh", + "rm /home/ubuntu/setup-v1.sh" ] }, { "type": "file", - "source": "docker-setup-v1.sh", - "destination": "~/docker-setup-v1.sh" + "source": "configure-docker-v1.sh", + "destination": "/home/ubuntu/configure-docker-v1.sh" }, { "type": "shell", - "inline": ["sudo ~/docker-setup-v1.sh"] + "inline": [ + "sudo /home/ubuntu/configure-docker-v1.sh", + "rm /home/ubuntu/configure-docker-v1.sh" + ] }, { "type": "file", "source": "etc-rc.local", - "destination": "~/rc.local" + "destination": "/home/ubuntu/rc.local" }, { "type": "shell", diff --git a/packer/setup-v1.sh b/packer/setup-v1.sh new file mode 100755 index 0000000..cf6b774 --- /dev/null +++ b/packer/setup-v1.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# wait just a bit to allow everything to settle down +sleep 30 + +# update apt and install dependencies +sudo apt-get update +sudo apt-get install -y gcc make apt-transport-https ca-certificates curl software-properties-common + +# get the latest nvidia drivers and install them +wget -P /tmp http://us.download.nvidia.com/tesla/396.44/NVIDIA-Linux-x86_64-396.44.run +chmod +x /tmp/NVIDIA-Linux-x86_64-396.44.run +sudo /tmp/NVIDIA-Linux-x86_64-396.44.run -silent + +# now get docker and nvidia-docker +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +sudo add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable' +curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list +sudo apt-get update +sudo apt-get install -y docker-ce=18.06.0~ce~3-0~ubuntu +sudo apt-get install -y nvidia-docker2 diff --git a/scripts/destroy-dock b/scripts/destroy-dock index 1ab182e..a4a1e4b 100755 --- a/scripts/destroy-dock +++ b/scripts/destroy-dock @@ -76,5 +76,5 @@ if [ -n "$f" ]; then fi if [ -n "$INSTANCE_ID" ]; then - aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" + aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" --output text fi \ No newline at end of file diff --git a/scripts/start-dock b/scripts/start-dock index 573cdf0..8983500 100755 --- a/scripts/start-dock +++ b/scripts/start-dock @@ -62,7 +62,7 @@ INSTANCE_ID=$(get_instance_id $DOCK_IP) if [ -n "$INSTANCE_ID" ]; then echo "Starting instance..." - aws ec2 start-instances --instance-ids "${INSTANCE_ID}" + aws ec2 start-instances --instance-ids "${INSTANCE_ID}" --output text echo "Waiting for instance to start..." - aws ec2 wait system-status-ok --instance-ids $INSTANCE_ID + aws ec2 wait system-status-ok --instance-ids $INSTANCE_ID --output text fi \ No newline at end of file diff --git a/scripts/stop-dock b/scripts/stop-dock index 26bb10e..45b7580 100755 --- a/scripts/stop-dock +++ b/scripts/stop-dock @@ -77,5 +77,5 @@ if [ "$RESPONSE" != "y" ] && [ "$RESPONSE" != "h" ]; then fi if [ -n "$INSTANCE_ID" ]; then - aws ec2 stop-instances --instance-ids "${INSTANCE_ID}" + aws ec2 stop-instances --instance-ids "${INSTANCE_ID}" --output text fi \ No newline at end of file diff --git a/setup.py b/setup.py index 9af83e3..15e0b74 100644 --- a/setup.py +++ b/setup.py @@ -44,16 +44,16 @@ packages=find_packages(exclude=['tests*']), license="MIT License", python_requires='>=3.6', - classifiers=( + classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', - ), + ], install_requires=[ - 'aws', + 'awscli', 'boto3' ], extras_require={ From 618d0fcf5c1fc02b5c7798d51bb5cc425688bfcf Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Thu, 11 Oct 2018 21:20:42 -0600 Subject: [PATCH 08/62] Fixed a few of bugs in sync (#20) * not handling generated version files correctly on sync up * not ignoring generated (proxy) version files on sync down * remove --delete from sync down * clean up code a bit * handle mis-specified setup.cfg error * create /data/workspaces directory in base AMI --- dockerutils/gen_version.py | 3 + packer/resero-labs-nvidia-docker.packer | 2 +- scripts/dock-sync | 96 ++++++++++++++++--------- 3 files changed, 66 insertions(+), 35 deletions(-) diff --git a/dockerutils/gen_version.py b/dockerutils/gen_version.py index 802b105..6619595 100644 --- a/dockerutils/gen_version.py +++ b/dockerutils/gen_version.py @@ -41,6 +41,9 @@ def get_versions(): return json.loads(version_json) """.format(json.dumps(version, sort_keys=True, indent=4, separators=(',', ': ')))) + except AssertionError as e: + if str(e) != 'please set versioneer.versionfile_source': + raise(e) except Exception as e: if not str(type(e)) in ["", ""]: raise(e) diff --git a/packer/resero-labs-nvidia-docker.packer b/packer/resero-labs-nvidia-docker.packer index 88e34bb..e7d8096 100644 --- a/packer/resero-labs-nvidia-docker.packer +++ b/packer/resero-labs-nvidia-docker.packer @@ -72,7 +72,7 @@ { "type": "shell", "inline": [ - "sudo mkdir /data", + "sudo mkdir -p /data/workspaces", "sudo chown -R ubuntu /data" ] } diff --git a/scripts/dock-sync b/scripts/dock-sync index 3813d3b..ad55e44 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -1,41 +1,69 @@ #!/usr/bin/env bash +# the need for a seperate sync up and down stems from the filters +# on sync up we need to use the `:-` filter syntax +# on sync down we need to use the `.-` filter syntax +# also on sync down we don't want to use --delete and we add a third parameter that is an additional ignore file + +_REMOTE_SHELL_COMMAND="ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" + function do-sync-up() { + echo "Syncing $1 to remote dock ($2)" if [ -e .gitignore ]; then - if [ -e .dockerignore ]; then - rsync -azq --delete --filter=":- .gitignore" --filter=":- .dockerignore" -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - else - rsync -azq --delete --filter=":- .gitignore" -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - fi - else - if [ -e .dockerignore ]; then - rsync -azq --delete --filter=":- .dockerignore" -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - else - rsync -azq --delete -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - fi + local HAS_GITIGNORE=1 + fi + if [ -e .dockerignore ]; then + local HAS_DOCKERIGNORE=1 fi + rsync -azq --delete \ + ${HAS_GITIGNORE:+--filter=":- .gitignore"} \ + ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} \ + -e $_REMOTE_SHELL_COMMAND $1 $2 + echo "Sync complete" } function do-sync-down() { + echo "Syncing from remote dock ($1)" if [ -e .gitignore ]; then - if [ -e .dockerignore ]; then - rsync -azq --delete --filter=".- .gitignore" --filter=".- .dockerignore" -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - else - rsync -azq --delete --filter=".- .gitignore" -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - fi - else - if [ -e .dockerignore ]; then - rsync -azq --delete --filter=".- .dockerignore" -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - else - rsync -azq --delete -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress $1 $2 - fi + local HAS_GITIGNORE=1 + fi + if [ -e .dockerignore ]; then + local HAS_DOCKERIGNORE=1 + fi + set -x + rsync -azq \ + ${HAS_GITIGNORE:+--filter=".- .gitignore"} \ + ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ + ${3:+--exclude=$3} \ + -e $_REMOTE_SHELL_COMMAND $1 $2 + set +x + echo "Sync complete" +} + +function trim() { + local var="$*" + # remove leading whitespace characters + var="${var#"${var%%[![:space:]]*}"}" + # remove trailing whitespace characters + var="${var%"${var##*[![:space:]]}"}" + echo -n "$var" +} + +function get_version_file() { + # first get the directory name for the repo + local CODE_DIR=${PWD##*/} + + # now find out where the version file is + local LINE=$(grep versionfile_source $PWD/setup.cfg) + local FILE=$(trim ${LINE#*=}) + + if [[ -f "$PWD/_version.py.bld" && -f "$PWD/$FILE" ]]; then + echo -n $CODE_DIR/$FILE fi } function sync-up() { local user=$(whoami) - ssh ubuntu@$DOCKER_IP 'sudo chown -R ubuntu /data/workspaces || true' - ssh ubuntu@$DOCKER_IP 'sudo chown -R ubuntu /data/workspaces/'$USER' || true' ssh ubuntu@$DOCKER_IP 'mkdir -p /data/workspaces/'$USER'/code' do-sync-up $PWD ubuntu@$DOCKER_IP:/data/workspaces/$USER/code @@ -46,21 +74,21 @@ function sync-up() { # generate the _version.py.bld genversion - # first get the directory name for the repo - local CODE_DIR=${PWD##*/} + # get the version file + local VER_FILE=$(get_version_file) - # now find out where the version file is - local LINE=$(grep versionfile_source $PWD/setup.cfg) - local SPLIT=(${LINE//=/ }) - local FILE=${SPLIT[1]} - - # finally sync _version.py.bld into the version file - do-sync-up $PWD/_version.py.bld ubuntu@$DOCKER_IP:/data/workspaces/$USER/code/$CODE_DIR/$FILE + if [ -n "$VER_FILE" ]; then + # finally sync _version.py.bld into the version file + do-sync-up $PWD/_version.py.bld ubuntu@$DOCKER_IP:/data/workspaces/$USER/code/$VER_FILE + fi fi return 0 } function sync-down() { - do-sync-down "ubuntu@$DOCKER_IP:/data/workspaces/$(whoami)/code/${PWD##*/}/*" . + # if we have a proxy version file, ignore it on the sync down + local VER_FILE=$(get_version_file) + VER_FILE=${VER_FILE#*/} + do-sync-down "ubuntu@$DOCKER_IP:/data/workspaces/$(whoami)/code/${PWD##*/}/*" . ${VER_FILE:+$VER_FILE} return 0 } From 3a6da73757d2254ef4c602a85a9b2c285cb90c33 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Thu, 11 Oct 2018 21:23:00 -0600 Subject: [PATCH 09/62] Allow specification of env variables on the command line (#21) --- scripts/run-image | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/run-image b/scripts/run-image index 1886aa0..044d204 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -19,9 +19,13 @@ from dockerutils import * _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ '{volumes} {ports} {image_name}:{image_tag} {cmd}' -def fetch_env_variables(config, image): +def fetch_env_variables(config, image, args_env=None): # a special use case. retrieve env variables from a config server. env_vars = {} + if args_env: + for arg_env in args_env: + env_var = arg_env.split('=') + env_vars[env_var[0]] = env_var[1] if 'env_var_names' in config.sections(): endpoint = f"http://{os.environ.get(config['env_var_names']['remote_ip'])}:" \ f"{os.environ.get(config['env_var_names']['remote_port'], '5000')}/config" @@ -88,6 +92,8 @@ if __name__ == '__main__': help="Start the container with gpu support") parser.add_argument("-p", "--pre", default=False, action='store_true', help="use pre 1.25 API") + parser.add_argument("-e", "--env", action='append', + help="environment variables to pass to running container, e.g. foo=bar") args = parser.parse_args() if args.use_gpu: @@ -100,7 +106,7 @@ if __name__ == '__main__': init = '--init' run_config = { - 'environment': fetch_env_variables(config, args.image), + 'environment': fetch_env_variables(config, args.image, args.env), 'keep_container': args.keep or '--rm', 'interactive': '-d' if args.keep else '-it', 'gpu': gpu, From 106b632a46585107e2b81d1623f3b04a2d9219fc Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Mon, 15 Oct 2018 15:40:50 -0600 Subject: [PATCH 10/62] Refactor prompt augmentation (#23) * Refactor prompt augmentation Add comments, explanation on dock and dock-sync (no longer has shebang) Add ssh commands to register-dock to suppress spurious warning messages * As per suggestion, export _DOCK_MONIKER regardless of prompt mofdification env var --- scripts/dock | 128 +++++++++++++++++++----------------------- scripts/dock-sync | 10 ++-- scripts/register-dock | 67 +++++++++++----------- 3 files changed, 98 insertions(+), 107 deletions(-) diff --git a/scripts/dock b/scripts/dock index 2d44398..5e9ee04 100755 --- a/scripts/dock +++ b/scripts/dock @@ -1,17 +1,19 @@ -#!/bin/bash -i - -# The 'moniker' parameter is optional. If moniker is not specified, -# moniker will be set to the second argument (hostname | IP). - -FORCE_REBUILD=false -while getopts "f:" OPTION -do - case $OPTION in - --help | -h) - shift - ;; - esac -done +#@IgnoreInspection BashAddShebang +# This file must be used with "source bin/activate" *from bash or zsh* +# you cannot run it directly + +# As this script is sourced, the functions will be available and executed in the underlying shell. +# because parameter expansion works slightly differently between bash and zsh, we need to ensure +# that this works under either shell + +if [[ -z "$BASH" && -z "$ZSH_NAME" && -z "$DOCKERUTILS_DISABLE_PROMPT" ]]; then + # If we are going to modify the prompt, then we need to be running + # either bash or zsh + echo "" + echo "You mush either be running bash or zsh" + ehco "" + kill -INT $$ +fi MONIKER=${1:-none} PORT=2377 @@ -22,7 +24,7 @@ if [ -n "$_DOCK_MONIKER" ]; then fi if [ $MONIKER = "none" ]; then - echo "This script is used in conjunction with 'register-dock'" + echo "This script is used to setup a dock with a remote instance" echo echo "USAGE" echo " $ source dock " @@ -61,64 +63,41 @@ if [ -z "$DOCK_IP" ]; then kill -INT $$ fi -# Update command line prompt to reflect docked condition. -# Python virtual environment prompt (or lack therof) should remain unchanged. -update_dock_prompt() { - _DOCK_MONIKER="[dock:$DOCK_MONIKER] " - export _DOCK_MONIKER - - if [ -n "$_OLD_VIRTUAL_PS1" ]; then - _UNDOCKED_PS1="$PS1" - _OLD_VIRTUAL_PS1="$_OLD_VIRTUAL_PS1${_DOCK_MONIKER}" - export _OLD_VIRTUAL_PS1 - else - _UNDOCKED_PS1="$PS1" - fi - - if [ "$SHELL" = "/bin/zsh" ]; then - PS1=$PS1${_DOCK_MONIKER} - else - PS1=$PS1${_DOCK_MONIKER} - fi - - export PS1 - export _UNDOCKED_PS1 -} - # Remove docked condition including command line prompt. # Python virtual environment prompt (or lack therof) should remain unchanged. castoff() { - _DOCK_MONIKER="[dock:$DOCK_MONIKER] " - unset DOCKER_TLS_VERIFY - unset DOCKER_CERT_PATH - unset DOCKER_HOST - unset DOCKER_IP - - if [ -n "$_OLD_VIRTUAL_PS1" ]; then - if [ "$SHELL" = "/bin/zsh" ]; then - _OLD_VIRTUAL_PS1="${_OLD_VIRTUAL_PS1%$_DOCK_MONIKER}" - else - _OLD_VIRTUAL_PS1=${_OLD_VIRTUAL_PS1%"$_DOCK_MONIKER"} + _DOCK_MONIKER="\[dock:$DOCK_MONIKER\] " + unset DOCKER_TLS_VERIFY + unset DOCKER_CERT_PATH + unset DOCKER_HOST + unset DOCKER_IP + + if [ -n "$BASH" ]; then + if [ -n "$_OLD_VIRTUAL_PS1" ]; then + _OLD_VIRTUAL_PS1=${_OLD_VIRTUAL_PS1//${_DOCK_MONIKER}/} + export _OLD_VIRTUAL_PS1 + fi + PS1=${PS1//${_DOCK_MONIKER}/} + elif [ -n "$ZSH_NAME" ]; then + if [ -n "$_OLD_VIRTUAL_PS1" ]; then + _OLD_VIRTUAL_PS1=${_OLD_VIRTUAL_PS1//${~_DOCK_MONIKER}/} + export _OLD_VIRTUAL_PS1 + fi + PS1=${PS1//${~_DOCK_MONIKER}/} fi - export _OLD_VIRTUAL_PS1 - fi - - if [ "$SHELL" = "/bin/zsh" ]; then - PS1=${PS1%$_DOCK_MONIKER} - else - PS1=${PS1%"$_DOCK_MONIKER"} - fi - export PS1 - - unset _DOCK_MONIKER - unset _UNDOCKED_PS1 - unset -f castoff - unset -f sync-up - unset -f sync-down - unset -f do-sync-up - unset -f do-sync-down - echo "Castoff complete" + + export PS1 + + unset _DOCK_MONIKER + unset -f castoff + unset -f sync-up + unset -f sync-down + unset -f do-sync-up + unset -f do-sync-down + unset -f trim + unset -f get_version_file + echo "Castoff complete" } . dock-sync @@ -127,7 +106,18 @@ export DOCKER_TLS_VERIFY=1 export DOCKER_CERT_PATH=${HOME}/.docker/${DOCK_IP} export DOCKER_HOST=tcp://${DOCK_IP}:2377 export DOCKER_IP=${DOCK_IP} +export _DOCK_MONIKER="[dock:$DOCK_MONIKER] " if [ -z $DOCKERUTILS_DISABLE_PROMPT ]; then - update_dock_prompt + # Update command line prompt to reflect docked condition. + # Python virtual environment prompt (or lack therof) should remain unchanged. + + if [ -n "$_OLD_VIRTUAL_PS1" ]; then + _OLD_VIRTUAL_PS1="$_OLD_VIRTUAL_PS1${_DOCK_MONIKER}" + export _OLD_VIRTUAL_PS1 + fi + + PS1=$PS1${_DOCK_MONIKER} + + export PS1 fi \ No newline at end of file diff --git a/scripts/dock-sync b/scripts/dock-sync index ad55e44..ec5570f 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -1,12 +1,12 @@ -#!/usr/bin/env bash +#@IgnoreInspection BashAddShebang +# This file must be used with "source bin/activate" *from bash or zsh* +# you cannot run it directly # the need for a seperate sync up and down stems from the filters # on sync up we need to use the `:-` filter syntax # on sync down we need to use the `.-` filter syntax # also on sync down we don't want to use --delete and we add a third parameter that is an additional ignore file -_REMOTE_SHELL_COMMAND="ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - function do-sync-up() { echo "Syncing $1 to remote dock ($2)" if [ -e .gitignore ]; then @@ -18,7 +18,7 @@ function do-sync-up() { rsync -azq --delete \ ${HAS_GITIGNORE:+--filter=":- .gitignore"} \ ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} \ - -e $_REMOTE_SHELL_COMMAND $1 $2 + -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 echo "Sync complete" } @@ -35,7 +35,7 @@ function do-sync-down() { ${HAS_GITIGNORE:+--filter=".- .gitignore"} \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ - -e $_REMOTE_SHELL_COMMAND $1 $2 + -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 set +x echo "Sync complete" } diff --git a/scripts/register-dock b/scripts/register-dock index deb59b7..fb82a89 100755 --- a/scripts/register-dock +++ b/scripts/register-dock @@ -20,6 +20,7 @@ DOCK_HOSTNAME=${2:-none} IP=${2:-none} MONIKER=${3:-$2} PORT=2377 +SSH_OPTIONS="-o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" if [ $USER == "none" ] || [ $DOCK_HOSTNAME == "none" ]; then echo "This script will secure the docker daemon socket on a remote server and download the" @@ -68,7 +69,7 @@ echo "IP:$IP" echo "Connecting to ${IP}..." # If force rebuild flag is not set, then check for already existing key on server and download it. -if [ $FORCE_REBUILD == false ] && ssh $USER@$IP "[ -f $REMOTE_CONFIG_DIR/ca.pem ]"; then +if [ $FORCE_REBUILD == false ] && ssh ${SSH_OPTIONS} $USER@$IP "[ -f $REMOTE_CONFIG_DIR/ca.pem ]"; then if [ -f ~/.docker/${IP}/ca.pem ]; then mv -f ~/.docker/${IP}/ca.pem ~/.docker/${IP}/ca.pem.back mv -f ~/.docker/${IP}/key.pem ~/.docker/${IP}/key.pem.back @@ -82,9 +83,9 @@ if [ $FORCE_REBUILD == false ] && ssh $USER@$IP "[ -f $REMOTE_CONFIG_DIR/ca.pem rm -f ~/.docker/${IP}/cert.pem # Copy new files from remote host - scp $USER@$IP:$REMOTE_CONFIG_DIR/ca.pem ~/.docker/${IP}/ca.pem && - scp $USER@$IP:$REMOTE_CONFIG_DIR/key.pem ~/.docker/${IP}/key.pem && - scp $USER@$IP:$REMOTE_CONFIG_DIR/cert.pem ~/.docker/${IP}/cert.pem && + scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/ca.pem ~/.docker/${IP}/ca.pem && + scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/key.pem ~/.docker/${IP}/key.pem && + scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/cert.pem ~/.docker/${IP}/cert.pem && export DOCKER_TLS_VERIFY=1 && export DOCKER_CERT_PATH=~/.docker/$IP && export DOCKER_HOST=tcp://$IP:$PORT && @@ -93,39 +94,39 @@ if [ $FORCE_REBUILD == false ] && ssh $USER@$IP "[ -f $REMOTE_CONFIG_DIR/ca.pem exit 0 fi -if ssh $USER@$IP "[ -d $REMOTE_CONFIG_DIR ]"; then +if ssh ${SSH_OPTIONS} $USER@$IP "[ -d $REMOTE_CONFIG_DIR ]"; then echo "Backing up remote keys $REMOTE_CONFIG_DIR" - ssh $USER@$IP "sudo rm -rf $REMOTE_CONFIG_DIR-back" && - ssh $USER@$IP "sudo cp -rf $REMOTE_CONFIG_DIR $REMOTE_CONFIG_DIR-back" && - ssh $USER@$IP "sudo rm -rf $REMOTE_CONFIG_DIR" + ssh ${SSH_OPTIONS} $USER@$IP "sudo rm -rf $REMOTE_CONFIG_DIR-back" && + ssh ${SSH_OPTIONS} $USER@$IP "sudo cp -rf $REMOTE_CONFIG_DIR $REMOTE_CONFIG_DIR-back" && + ssh ${SSH_OPTIONS} $USER@$IP "sudo rm -rf $REMOTE_CONFIG_DIR" fi # echo "Creating CA..." -ssh $USER@$IP "sudo mkdir -p $REMOTE_CONFIG_DIR" && -ssh $USER@$IP "sudo openssl genrsa -aes256 -passout pass:$TEMP_PASS -out $REMOTE_CONFIG_DIR/ca-key.pem 4096" && -ssh $USER@$IP "sudo openssl req -new -x509 -days 365 -key $REMOTE_CONFIG_DIR/ca-key.pem -sha256 \ +ssh ${SSH_OPTIONS} $USER@$IP "sudo mkdir -p $REMOTE_CONFIG_DIR" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl genrsa -aes256 -passout pass:$TEMP_PASS -out $REMOTE_CONFIG_DIR/ca-key.pem 4096" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl req -new -x509 -days 365 -key $REMOTE_CONFIG_DIR/ca-key.pem -sha256 \ -out $REMOTE_CONFIG_DIR/ca.pem -passin pass:$TEMP_PASS \ -subj '/C=US/ST=Utah/L=Draper/O=Proofpoint/OU=Resero/CN=proofpoint.com'" && sleep 1 && -ssh $USER@$IP "sudo openssl genrsa -out $REMOTE_CONFIG_DIR/server-key.pem 4096" +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl genrsa -out $REMOTE_CONFIG_DIR/server-key.pem 4096" # echo "Creating new docker keys" -ssh $USER@$IP "sudo openssl req -subj '/CN=$HOSTNAME' -sha256 -new -key $REMOTE_CONFIG_DIR/server-key.pem \ +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl req -subj '/CN=$HOSTNAME' -sha256 -new -key $REMOTE_CONFIG_DIR/server-key.pem \ -out $REMOTE_CONFIG_DIR/server.csr" && -ssh $USER@$IP "echo subjectAltName = DNS:\$HOSTNAME,IP:$IP > /tmp/extfile.cnf" && -ssh $USER@$IP "sudo mv /tmp/extfile.cnf $REMOTE_CONFIG_DIR/extfile.cnf" && -ssh $USER@$IP "sudo openssl x509 -req -days 365 -sha256 -in $REMOTE_CONFIG_DIR/server.csr \ +ssh ${SSH_OPTIONS} $USER@$IP "echo subjectAltName = DNS:\$HOSTNAME,IP:$IP > /tmp/extfile.cnf" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo mv /tmp/extfile.cnf $REMOTE_CONFIG_DIR/extfile.cnf" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl x509 -req -days 365 -sha256 -in $REMOTE_CONFIG_DIR/server.csr \ -CA $REMOTE_CONFIG_DIR/ca.pem -CAkey $REMOTE_CONFIG_DIR/ca-key.pem -CAcreateserial \ -out $REMOTE_CONFIG_DIR/server-cert.pem -extfile $REMOTE_CONFIG_DIR/extfile.cnf -passin pass:$TEMP_PASS" && -ssh $USER@$IP "sudo openssl genrsa -out $REMOTE_CONFIG_DIR/key.pem 4096" && -ssh $USER@$IP "sudo openssl req -subj '/CN=client' -new -key $REMOTE_CONFIG_DIR/key.pem -out $REMOTE_CONFIG_DIR/client.csr" && -ssh $USER@$IP "sudo echo extendedKeyUsage = clientAuth > $REMOTE_CONFIG_DIR/extfile.cnf" && -ssh $USER@$IP "sudo openssl x509 -req -days 365 -sha256 -in $REMOTE_CONFIG_DIR/client.csr \ +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl genrsa -out $REMOTE_CONFIG_DIR/key.pem 4096" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl req -subj '/CN=client' -new -key $REMOTE_CONFIG_DIR/key.pem -out $REMOTE_CONFIG_DIR/client.csr" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo echo extendedKeyUsage = clientAuth > $REMOTE_CONFIG_DIR/extfile.cnf" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl x509 -req -days 365 -sha256 -in $REMOTE_CONFIG_DIR/client.csr \ -CA $REMOTE_CONFIG_DIR/ca.pem -CAkey $REMOTE_CONFIG_DIR/ca-key.pem -CAcreateserial \ -out $REMOTE_CONFIG_DIR/cert.pem -extfile $REMOTE_CONFIG_DIR/extfile.cnf -passin pass:$TEMP_PASS" && -ssh $USER@$IP "sudo rm -v $REMOTE_CONFIG_DIR/client.csr $REMOTE_CONFIG_DIR/server.csr" && -ssh $USER@$IP "sudo chmod 0444 $REMOTE_CONFIG_DIR/ca-key.pem $REMOTE_CONFIG_DIR/key.pem $REMOTE_CONFIG_DIR/server-key.pem" && -ssh $USER@$IP "sudo chmod 0444 $REMOTE_CONFIG_DIR/ca.pem $REMOTE_CONFIG_DIR/server-cert.pem $REMOTE_CONFIG_DIR/cert.pem" +ssh ${SSH_OPTIONS} $USER@$IP "sudo rm -v $REMOTE_CONFIG_DIR/client.csr $REMOTE_CONFIG_DIR/server.csr" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo chmod 0444 $REMOTE_CONFIG_DIR/ca-key.pem $REMOTE_CONFIG_DIR/key.pem $REMOTE_CONFIG_DIR/server-key.pem" && +ssh ${SSH_OPTIONS} $USER@$IP "sudo chmod 0444 $REMOTE_CONFIG_DIR/ca.pem $REMOTE_CONFIG_DIR/server-cert.pem $REMOTE_CONFIG_DIR/cert.pem" ### copy CA to client. mkdir -p ~/.docker/${IP} && @@ -140,20 +141,20 @@ if [ -f ~/.docker/${IP}/ca.pem ]; then mv -f ~/.docker/${IP}/cert.pem ~/.docker/${IP}/cert.pem.back fi -scp $USER@$IP:$REMOTE_CONFIG_DIR/ca.pem ~/.docker/${IP}/ca.pem && -scp $USER@$IP:$REMOTE_CONFIG_DIR/key.pem ~/.docker/${IP}/key.pem && -scp $USER@$IP:$REMOTE_CONFIG_DIR/cert.pem ~/.docker/${IP}/cert.pem && +scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/ca.pem ~/.docker/${IP}/ca.pem && +scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/key.pem ~/.docker/${IP}/key.pem && +scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/cert.pem ~/.docker/${IP}/cert.pem && # TODO: Rather than overwriting the configuration (generally a poor practice) # TODO: look at merging the configuration we are interested in, into the configuration # TODO: that is already present -ssh $USER@$IP 'sudo rm -f /etc/docker/daemon.json' && +ssh ${SSH_OPTIONS} $USER@$IP 'sudo rm -f /etc/docker/daemon.json' && DOCKER_CFG_FILE=$(python3 -c "import pkg_resources; print(pkg_resources.resource_filename('dockerutils', 'docker-server-daemon.json'))") -scp $DOCKER_CFG_FILE $USER@$IP:/tmp/daemon.json && -ssh $USER@$IP 'sudo mkdir -p /etc/docker/' && -ssh $USER@$IP 'sudo mv /tmp/daemon.json /etc/docker/daemon.json' && -ssh $USER@$IP 'sudo systemctl stop docker' && -ssh $USER@$IP 'sudo systemctl daemon-reload' && -ssh $USER@$IP 'sudo systemctl start docker' && +scp ${SSH_OPTIONS} $DOCKER_CFG_FILE $USER@$IP:/tmp/daemon.json && +ssh ${SSH_OPTIONS} $USER@$IP 'sudo mkdir -p /etc/docker/' && +ssh ${SSH_OPTIONS} $USER@$IP 'sudo mv /tmp/daemon.json /etc/docker/daemon.json' && +ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl stop docker' && +ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl daemon-reload' && +ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl start docker' && export DOCKER_TLS_VERIFY=1 && export DOCKER_CERT_PATH=~/.docker/$IP && export DOCKER_HOST=tcp://$IP:$PORT && From 703dd16bb5d0b7764cfb4950b44fe127173d0894 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Tue, 23 Oct 2018 10:52:32 -0600 Subject: [PATCH 11/62] Only honor .dockerignore --- scripts/dock-sync | 8 -------- 1 file changed, 8 deletions(-) diff --git a/scripts/dock-sync b/scripts/dock-sync index ec5570f..1a55052 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -9,14 +9,10 @@ function do-sync-up() { echo "Syncing $1 to remote dock ($2)" - if [ -e .gitignore ]; then - local HAS_GITIGNORE=1 - fi if [ -e .dockerignore ]; then local HAS_DOCKERIGNORE=1 fi rsync -azq --delete \ - ${HAS_GITIGNORE:+--filter=":- .gitignore"} \ ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 echo "Sync complete" @@ -24,15 +20,11 @@ function do-sync-up() { function do-sync-down() { echo "Syncing from remote dock ($1)" - if [ -e .gitignore ]; then - local HAS_GITIGNORE=1 - fi if [ -e .dockerignore ]; then local HAS_DOCKERIGNORE=1 fi set -x rsync -azq \ - ${HAS_GITIGNORE:+--filter=".- .gitignore"} \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 From 4d4e88b367ee847dc11d8104bccda0ab83dd7a74 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Tue, 23 Oct 2018 11:23:01 -0600 Subject: [PATCH 12/62] Remove `set -x` (left over from debugging) (#26) --- scripts/dock-sync | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts/dock-sync b/scripts/dock-sync index 1a55052..b609e12 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -23,12 +23,10 @@ function do-sync-down() { if [ -e .dockerignore ]; then local HAS_DOCKERIGNORE=1 fi - set -x rsync -azq \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 - set +x echo "Sync complete" } From 0c639a99972c93d1dc30cfda21bf68b061df26ca Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Thu, 1 Nov 2018 13:42:40 -0600 Subject: [PATCH 13/62] Generalize configuration and don't create instance if ssh key isn't available (#28) --- scripts/create-dock | 51 ++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/scripts/create-dock b/scripts/create-dock index a8af5e9..25b0529 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -2,11 +2,11 @@ # Default Values INSTANCE_TYPE="m5.xlarge" -SUBNET_ID="subnet-0cf3c97675ecb8e82" MONIKER= GREEN='\033[0;32m' +RED='\033[0;31m' NO_COLOR='\033[0m' check_aws_connectivity() { @@ -41,6 +41,15 @@ print_help() { echo " If no options are passed into the script, it will prompt with defaults." echo " The register-dock script is run automatically after the instance is ready." echo + echo " A number of values used in creating the ec2 instance (security-group-ids, etc.) can be over-ridden" + echo " by specifying a ~/.docker/dock.cfg file with the following values:" + echo + echo ' key_name="..."' + echo ' security_group_ids="..."' + echo ' iam_instance_profile="..."' + echo ' subnet_id="..."' + echo ' tag_specifications="..."' + echo echo "Usage" echo " $ create-dock [options]" echo @@ -54,7 +63,6 @@ print_help() { echo -e "\n -i instance-type\n The default instance type is ${INSTANCE_TYPE} (no GPU).\n Other options include p2.xlarge (GPU), m5.2xlarge, etc." echo -e "\n -a ami-id\n (Optional) The ami to use for the instance." - echo -e "\n -s subnet-id\n (Optional) The subnet-id to use for the instance." echo -e "\n -h help\n This help" echo echo "Examples" @@ -78,15 +86,13 @@ get_private_ip() { nflag='' # dock name flag iflag='' # instance type aflag='' # ami flag -sflag='' # subnet flag mflag='' # moniker flag -while getopts 'hn:i:a:s:m:' flag; do # if a character is followed by a colon, that argument is expected to have an argument. +while getopts 'hn:i:a:m:' flag; do # if a character is followed by a colon, that argument is expected to have an argument. case "${flag}" in h) hflag='true';; n) nflag='true'; INSTANCE_NAME="${OPTARG}" ;; i) iflag='true'; INSTANCE_TYPE="${OPTARG}" ;; a) aflag='true'; AMI_ID="${OPTARG}" ;; - s) sflag='true'; SUBNET_ID="${OPTARG}" ;; m) mflag='true'; MONIKER="${OPTARG}" ;; *) error "Unexpected option ${flag}" ;; esac @@ -115,19 +121,36 @@ fi # Create dock +if [ -e "$HOME/.docker/dock.cfg" ]; then + source "$HOME/.docker/dock.cfg" +else + key_name="resero-staging" + security_group_ids='"sg-213eb35a" "sg-3bde0341" "sg-b93e0dc2" "sg-1bd90461"' + iam_instance_profile="lanista-app" + subnet_id="subnet-b8b440de" + tag_specifications='"ResourceType=instance,Tags=[{Key=Name,Value=${INSTANCE_NAME}},\ + {Key=os,Value=Ubuntu},{Key=os_version,Value=16.04.3},{Key=app_type,Value=model},\ + {Key=business_unit,Value=\"Archiving & Governance\"},{Key=component,Value=\"ec2 instance\"},\ + {Key=product,Value=\"Resero Development\"},{Key=support_level,Value=dev},\ + {Key=created_by,Value=${USERNAME}}]"' +fi +if [ ! -f "$HOME/.ssh/$key_name" ]; then + echo + echo + echo -e "${RED}It appears the the key required to access the EC2 instance doesn't exist ($HOME/.ssh/$key_name).${NO_COLOR}" + echo + echo "Please ensure that the correct ssh-key is configured in ~/.docker/dock.cfg" + exit 1 +fi INSTANCE_ID=$(aws ec2 run-instances \ - --subnet-id ${SUBNET_ID} \ + --subnet-id ${subnet_id} \ --image-id $AMI_ID \ --instance-type "${INSTANCE_TYPE}" \ --block-device-mappings "DeviceName='/dev/sda1',Ebs={VolumeSize=100,VolumeType='gp2'}" \ - --tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=${INSTANCE_NAME}},\ - {Key=os,Value=Ubuntu},{Key=os_version,Value=16.04.3},{Key=app_type,Value=model},\ - {Key=business_unit,Value=\"Archiving & Governance\"},{Key=component,Value=\"ec2 instance\"},\ - {Key=product,Value=\"Resero Development\"},{Key=support_level,Value=dev},\ - {Key=created_by,Value=${USERNAME}}]" \ - --iam-instance-profile Name=lanista-app \ - --key-name resero-staging \ - --security-group-ids "sg-b93e0dc2" "sg-1bd90461" "sg-213eb35a" "sg-a5ac61de" \ + --tag-specifications "${tag_specifications}" \ + --iam-instance-profile Name="${iam_instance_profile}"\ + --key-name "${key_name}" \ + --security-group-ids "${security_sgroup_ids}" \ | grep InstanceId | awk -F '"' '{print $4}' \ ) From 975af361a9283190840ecc556150789392c4ac91 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Mon, 3 Dec 2018 15:23:30 -0700 Subject: [PATCH 14/62] fixes #32 - explicitely use `set -e` (#33) --- scripts/create-dock | 3 ++- scripts/destroy-dock | 3 ++- scripts/run-notebook | 2 +- scripts/ssh-dock | 3 ++- scripts/start-dock | 3 ++- scripts/stop-dock | 3 ++- 6 files changed, 11 insertions(+), 6 deletions(-) diff --git a/scripts/create-dock b/scripts/create-dock index 25b0529..1549173 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -1,4 +1,5 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e # Default Values INSTANCE_TYPE="m5.xlarge" diff --git a/scripts/destroy-dock b/scripts/destroy-dock index a4a1e4b..77475f5 100755 --- a/scripts/destroy-dock +++ b/scripts/destroy-dock @@ -1,4 +1,5 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e # Default Values MONIKER=${1:-"$DOCKER_IP"} diff --git a/scripts/run-notebook b/scripts/run-notebook index 90aa77d..25ec9d6 100755 --- a/scripts/run-notebook +++ b/scripts/run-notebook @@ -18,7 +18,7 @@ if [ -d "./docker/notebook" ]; then run-image notebook else sync-up - run-image dock-notebook + run-image notebook sync-down fi else diff --git a/scripts/ssh-dock b/scripts/ssh-dock index 655335c..82942dd 100755 --- a/scripts/ssh-dock +++ b/scripts/ssh-dock @@ -1,4 +1,5 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e if [[ -z "$DOCKER_IP" && -z "$1" ]]; then echo "You must either be docked, or provide a argument specifying the 'moniker' of the dock you want to ssh to" diff --git a/scripts/start-dock b/scripts/start-dock index 8983500..fbf020c 100755 --- a/scripts/start-dock +++ b/scripts/start-dock @@ -1,4 +1,5 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e # Default Values MONIKER=${1:-"$DOCKER_IP"} diff --git a/scripts/stop-dock b/scripts/stop-dock index 45b7580..10b2bd4 100755 --- a/scripts/stop-dock +++ b/scripts/stop-dock @@ -1,4 +1,5 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e # Default Values MONIKER=${1:-"$DOCKER_IP"} From ccd5a1422724d4a9a01738000fa02a376a51fd56 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Mon, 3 Dec 2018 15:23:43 -0700 Subject: [PATCH 15/62] Add user to ssh-dock command (#34) --- scripts/ssh-dock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/ssh-dock b/scripts/ssh-dock index 82942dd..96f487e 100755 --- a/scripts/ssh-dock +++ b/scripts/ssh-dock @@ -27,7 +27,7 @@ if [ -n "$1" ]; then fi echo "Opening ssh connection to ${DOCKER_IP}" -ssh ${DOCKER_IP} +ssh ubuntu@${DOCKER_IP} From 94e7a7b021a1dfa0881caccf7e467ebd05b22564 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Wed, 5 Dec 2018 11:50:25 -0700 Subject: [PATCH 16/62] Initial support for "configuration only" images. (Requires empty directory as placeholder for image) (#35) --- dockerutils/image_conventions.py | 5 ++++- scripts/build-image | 16 +++++++++++----- scripts/dock-sync | 17 +++++++++++------ 3 files changed, 26 insertions(+), 12 deletions(-) diff --git a/dockerutils/image_conventions.py b/dockerutils/image_conventions.py index 24d83cf..0296c98 100644 --- a/dockerutils/image_conventions.py +++ b/dockerutils/image_conventions.py @@ -38,7 +38,10 @@ def get_image_designation(image, config=None): def get_image_name(config, image): if image in config.sections(): if 'name' in config[image]: - return f'{get_default_project_name()}-{config[image]["name"]}' + if 'prefix' in config[image] and not config[image]['prefix'] in ['False', 'false', 'F', 'f']: + return f'{get_default_project_name()}-{config[image]["name"]}' + else: + return f'{config[image]["name"]}' return f'{get_default_project_name()}-{image}' diff --git a/scripts/build-image b/scripts/build-image index b39773a..6e55ab8 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -84,7 +84,12 @@ if __name__ == '__main__': config.optionxform = str config.read(os.path.join('docker', 'dockerutils.cfg')) - gen_version_file() + if os.path.isfile('setup.cfg'): + config = configparser.ConfigParser() + config.optionxform = str + config.read('setup.cfg') + if 'versioneer' in config: + gen_version_file() with pip_conf(root_dir): image_types = get_image_types() @@ -125,8 +130,9 @@ if __name__ == '__main__': if 'pull_FROM_on_force' in image_config: pull_FROM_on_force = config[image]['pull_FROM_on_force'] - rc = fn(image, image_name, image_tag, config=image_config, pull=args.pull_base or (args.force_build_base and pull_FROM_on_force)) - # because an image may not be present on the clean, ignore a non-zero return code - if rc and not args.image == 'clean': - sys.exit(rc) + if os.path.isfile(f'docker/{image}/Dockerfile'): + rc = fn(image, image_name, image_tag, config=image_config, pull=args.pull_base or (args.force_build_base and pull_FROM_on_force)) + # because an image may not be present on the clean, ignore a non-zero return code + if rc and not args.image == 'clean': + sys.exit(rc) sys.exit(0) diff --git a/scripts/dock-sync b/scripts/dock-sync index b609e12..606dcdd 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -65,11 +65,13 @@ function sync-up() { genversion # get the version file - local VER_FILE=$(get_version_file) + if [ -n "$PWD/setup.cfg" ]; then + local VER_FILE=$(get_version_file) - if [ -n "$VER_FILE" ]; then - # finally sync _version.py.bld into the version file - do-sync-up $PWD/_version.py.bld ubuntu@$DOCKER_IP:/data/workspaces/$USER/code/$VER_FILE + if [ -n "$VER_FILE" ]; then + # finally sync _version.py.bld into the version file + do-sync-up $PWD/_version.py.bld ubuntu@$DOCKER_IP:/data/workspaces/$USER/code/$VER_FILE + fi fi fi return 0 @@ -77,8 +79,11 @@ function sync-up() { function sync-down() { # if we have a proxy version file, ignore it on the sync down - local VER_FILE=$(get_version_file) - VER_FILE=${VER_FILE#*/} + local VER_FILE= + if [ -n "$PWD/setup.cfg" ]; then + VER_FILE=$(get_version_file) + VER_FILE=${VER_FILE#*/} + fi do-sync-down "ubuntu@$DOCKER_IP:/data/workspaces/$(whoami)/code/${PWD##*/}/*" . ${VER_FILE:+$VER_FILE} return 0 } From 99d035f422364673d7b18a0f36df46d75005e5a2 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Wed, 5 Dec 2018 12:57:08 -0700 Subject: [PATCH 17/62] Add prompt for desctructive rsync (#36) --- scripts/dock-sync | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/scripts/dock-sync b/scripts/dock-sync index 606dcdd..b62e638 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -12,7 +12,12 @@ function do-sync-up() { if [ -e .dockerignore ]; then local HAS_DOCKERIGNORE=1 fi - rsync -azq --delete \ + + read -e -p "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " RESPONSE + local DESTRUCTIVE="" + if [ "$RESPONSE" == "y" ]; then DESTRUCTIVE="--delete"; fi + + rsync -azq $DESTRUCTIVE \ ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 echo "Sync complete" @@ -23,7 +28,12 @@ function do-sync-down() { if [ -e .dockerignore ]; then local HAS_DOCKERIGNORE=1 fi - rsync -azq \ + + read -e -p "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " RESPONSE + local DESTRUCTIVE="" + if [ "$RESPONSE" == "y" ]; then DESTRUCTIVE="--delete"; fi + + rsync -azq $DESTRUCTIVE \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 From 72271219b02fe81a2dc6bb2a84968d603c999159 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Thu, 6 Dec 2018 11:43:00 -0700 Subject: [PATCH 18/62] Fix errors in quoting on run-instances command Add awscli to base image --- packer/setup-v1.sh | 9 ++++++++- scripts/create-dock | 19 +++++++++---------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/packer/setup-v1.sh b/packer/setup-v1.sh index cf6b774..fc5f63a 100755 --- a/packer/setup-v1.sh +++ b/packer/setup-v1.sh @@ -5,7 +5,14 @@ sleep 30 # update apt and install dependencies sudo apt-get update -sudo apt-get install -y gcc make apt-transport-https ca-certificates curl software-properties-common +sudo apt-get install -y \ + apt-transport-https \ + aws-cli \ + ca-certificates \ + curl \ + gcc \ + make \ + software-properties-common # get the latest nvidia drivers and install them wget -P /tmp http://us.download.nvidia.com/tesla/396.44/NVIDIA-Linux-x86_64-396.44.run diff --git a/scripts/create-dock b/scripts/create-dock index 1549173..700ba06 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -65,11 +65,11 @@ print_help() { echo -e "\n -i instance-type\n The default instance type is ${INSTANCE_TYPE} (no GPU).\n Other options include p2.xlarge (GPU), m5.2xlarge, etc." echo -e "\n -a ami-id\n (Optional) The ami to use for the instance." echo -e "\n -h help\n This help" - echo + echo echo "Examples" echo echo -e " $ create-dock\n Create default instance type ($INSTANCE_TYPE) named ${INSTANCE_NAME}.\n CLI prompt moniker when docked will be [dock:IP address]." - echo + echo echo -e " $ create-dock -i t2.micro -m my-dock\n Create t2.micro instance type named ${INSTANCE_NAME}.\n CLI prompt moniker when docked will be [dock:my-dock]." @@ -126,14 +126,13 @@ if [ -e "$HOME/.docker/dock.cfg" ]; then source "$HOME/.docker/dock.cfg" else key_name="resero-staging" - security_group_ids='"sg-213eb35a" "sg-3bde0341" "sg-b93e0dc2" "sg-1bd90461"' + security_group_ids="sg-213eb35a sg-3bde0341 sg-b93e0dc2 sg-1bd90461" iam_instance_profile="lanista-app" subnet_id="subnet-b8b440de" - tag_specifications='"ResourceType=instance,Tags=[{Key=Name,Value=${INSTANCE_NAME}},\ - {Key=os,Value=Ubuntu},{Key=os_version,Value=16.04.3},{Key=app_type,Value=model},\ - {Key=business_unit,Value=\"Archiving & Governance\"},{Key=component,Value=\"ec2 instance\"},\ - {Key=product,Value=\"Resero Development\"},{Key=support_level,Value=dev},\ - {Key=created_by,Value=${USERNAME}}]"' + tag_specifications="ResourceType=instance,Tags=[{Key=Name,Value=${INSTANCE_NAME}},\ + {Key=business_unit,Value='Archiving & Governance'},{Key=component,Value='ec2 instance'},\ + {Key=product,Value='Resero Development'},{Key=support_level,Value=dev},\ + {Key=created_by,Value=${USERNAME}}]" fi if [ ! -f "$HOME/.ssh/$key_name" ]; then echo @@ -149,9 +148,9 @@ INSTANCE_ID=$(aws ec2 run-instances \ --instance-type "${INSTANCE_TYPE}" \ --block-device-mappings "DeviceName='/dev/sda1',Ebs={VolumeSize=100,VolumeType='gp2'}" \ --tag-specifications "${tag_specifications}" \ - --iam-instance-profile Name="${iam_instance_profile}"\ + --iam-instance-profile Name="${iam_instance_profile}" \ --key-name "${key_name}" \ - --security-group-ids "${security_sgroup_ids}" \ + --security-group-ids "${security_group_ids}" \ | grep InstanceId | awk -F '"' '{print $4}' \ ) From a5440ec2e3cff4cd8fc31c2f692a01282200eeba Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Thu, 6 Dec 2018 12:05:37 -0700 Subject: [PATCH 19/62] fix destructive delete prompts --- scripts/dock-sync | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/scripts/dock-sync b/scripts/dock-sync index b62e638..61df9aa 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -13,11 +13,13 @@ function do-sync-up() { local HAS_DOCKERIGNORE=1 fi - read -e -p "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " RESPONSE + local RESPONSE local DESTRUCTIVE="" - if [ "$RESPONSE" == "y" ]; then DESTRUCTIVE="--delete"; fi + echo "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " + read RESPONSE + if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi - rsync -azq $DESTRUCTIVE \ + rsync -azq ${DESTRUCTIVE} \ ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 echo "Sync complete" @@ -29,11 +31,13 @@ function do-sync-down() { local HAS_DOCKERIGNORE=1 fi - read -e -p "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " RESPONSE + local RESPONSE local DESTRUCTIVE="" - if [ "$RESPONSE" == "y" ]; then DESTRUCTIVE="--delete"; fi + echo "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " + read RESPONSE + if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi - rsync -azq $DESTRUCTIVE \ + rsync -azq ${DESTRUCTIVE} \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 From dc248ba916d666e080875911357d9df2f144a3d9 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Thu, 6 Dec 2018 12:08:55 -0700 Subject: [PATCH 20/62] Add echo of rsync to sync-up/down --- scripts/dock-sync | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/dock-sync b/scripts/dock-sync index 61df9aa..7ba22e0 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -19,6 +19,7 @@ function do-sync-up() { read RESPONSE if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi + echo rsync -azq ${DESTRUCTIVE} ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 rsync -azq ${DESTRUCTIVE} \ ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} \ -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 @@ -37,6 +38,8 @@ function do-sync-down() { read RESPONSE if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi + echo rsync -azq ${DESTRUCTIVE} ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} ${3:+--exclude=$3} -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 + rsync -azq ${DESTRUCTIVE} \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ From 34cfe1dfeb14d57ce245f7b03dbc042aed7cd382 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Mon, 10 Dec 2018 13:03:25 -0700 Subject: [PATCH 21/62] Update documentation, add section for "configuration-only" images --- README.md | 31 +++++++++++++++++++++++-------- scripts/dock-sync | 1 - 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index fccde6a..177235f 100644 --- a/README.md +++ b/README.md @@ -17,12 +17,12 @@ for containers that are dependent solely on the project In this directory tree there should be one sub-directory for each unique docker container type that is desired. Each of these sub-directories would contain the `Dockerfile` that will be used to create the image as well as any source specific to that image. -2) Use versioneer for project versioning. +2) Use versioneer for project versioning (Optional). As part of the image build, a file, `_version.py.bld`, will be generated and placed at the project root. A `Dockerfile` can add that file to the image on creation to prevent the need for including the .git directory tree in the container context (usually quite expensive). -3) Create a docker/base directory to make use of built in external dependency isolation +3) Create a docker/base directory to make use of built in external dependency isolation (optional) This capability supports environments where a docker build isn't able to access external dependencies (Docker Hub, pypi, etc.), for instance a server in a "locked-down" environment. A base image can be defined to isolate any @@ -39,6 +39,7 @@ for containers that are dependent solely on the project ## Command-line Interface +### Image cli `build-image` takes the name of one of the sub-directories in the `docker` directory and builds the image defined therein. The image is named \-\:\ @@ -50,26 +51,34 @@ together with any of the configuration for that image defined in `dockerutils.cf `publish-image` takes the name of one of the sub-directories in the `docker` directory and pushes the image built by the docker file to the defined repository (AWS or Docker) +### Notebook cli `run-notebook` will start a docker container using either the notebook container found in the `docker/notebook` directory if it exists, or [rappdw/docker-ds](https://github.com/rappdw/docker-ds) otherwise. The current directory will be mounted into the container for use in the Juypter notebook environment. There are a couple of environment variable to be aware of with this command: +* DOCKER_DS_DONT_PULL - if set, the version of rappdw/docker-ds currently available will be used rather than pulling +the latest version from docker hub. +* DOCKER_DS_DIFFS - if set, + +### Dock cli + +A "dock" is a remote system that has a docker daemon running and configured in a secure fashion (generally an EC2 +instance). You can "dock" your terminal to a remote instance and any docker commands, including image and notebook cli +above will be run against the remote docker server. Once a "dock" is created, you can dock your terminal by issuing +the command `source dock ` + `create-dock` will start an ec2 instance that can be used for remote docking. This instance is configured to provide secure interaction with the docker server, as well as to support GPU utliziation (`-g` option with `run-image`) +`destroy-dock` will terminate a remote dock instance and delete any local configuration files + `stop-dock` will change the instances state of a remote dock to `stopped` `start-dock` will change the instance state of a remote dock to `running` -`destroy-dock` will terminate a remote dock instance and delete any local configuration files - `ssh-dock` opens a terminal on the remote dock with ssh -* DOCKER_DS_DONT_PULL - if set, the version of rappdw/docker-ds currently available will be used rather than pulling -the latest version from docker hub. -* DOCKER_DS_DIFFS - if set, - ## `dockerutils.cfg` Format Configuration in `docker/dockerutils.cfg` is used to configure behavior of the `dockerutils` scripts. @@ -105,6 +114,12 @@ name=dev ... ``` +### Configuration-only Images +If there is a docker container that does what you want already, you can create a configuration-only image by +specifying `name`, `tag` and `prefix=False` in the configuration section for the image. For example the base notebook +image `rappdw/docker-ds` is often sufficient for running a Jupyter notebook against your code, as it auto detects a +`setup.py` upon container start and installs the module into the notebook environment. + ### Image Tagging The default tag for any image created/run/etc. is the user name in the host environment when running the utility. This can be overriden by adding a `tag` value to the desired section. For example: diff --git a/scripts/dock-sync b/scripts/dock-sync index 7ba22e0..ad0e10b 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -39,7 +39,6 @@ function do-sync-down() { if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi echo rsync -azq ${DESTRUCTIVE} ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} ${3:+--exclude=$3} -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 - rsync -azq ${DESTRUCTIVE} \ ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} \ ${3:+--exclude=$3} \ From 07ccb5063d174678606f10a012e53c00c070524e Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Wed, 12 Dec 2018 16:41:45 -0700 Subject: [PATCH 22/62] Fix bug in configuration only images --- dockerutils/image_conventions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dockerutils/image_conventions.py b/dockerutils/image_conventions.py index 0296c98..d4f96dc 100644 --- a/dockerutils/image_conventions.py +++ b/dockerutils/image_conventions.py @@ -38,7 +38,8 @@ def get_image_designation(image, config=None): def get_image_name(config, image): if image in config.sections(): if 'name' in config[image]: - if 'prefix' in config[image] and not config[image]['prefix'] in ['False', 'false', 'F', 'f']: + if 'prefix' not in config[image] or \ + ('prefix' in config[image] and not config[image]['prefix'] in ['False', 'false', 'F', 'f']): return f'{get_default_project_name()}-{config[image]["name"]}' else: return f'{config[image]["name"]}' From 19157576dbe4a8710dc02ecfde27d25f3e349826 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Mon, 17 Dec 2018 16:34:10 -0700 Subject: [PATCH 23/62] fixes #37 - error in security group handling --- scripts/create-dock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/create-dock b/scripts/create-dock index 700ba06..c342bd7 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -150,7 +150,7 @@ INSTANCE_ID=$(aws ec2 run-instances \ --tag-specifications "${tag_specifications}" \ --iam-instance-profile Name="${iam_instance_profile}" \ --key-name "${key_name}" \ - --security-group-ids "${security_group_ids}" \ + --security-group-ids ${security_group_ids} \ | grep InstanceId | awk -F '"' '{print $4}' \ ) From 90113386485f81531cec06a61dd1a22f4e624131 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Fri, 21 Dec 2018 06:32:34 -0500 Subject: [PATCH 24/62] fixes #38 - inadvertently overwriting configuration with versioneer info if present --- scripts/build-image | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/build-image b/scripts/build-image index 6e55ab8..b98c62d 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -85,10 +85,10 @@ if __name__ == '__main__': config.read(os.path.join('docker', 'dockerutils.cfg')) if os.path.isfile('setup.cfg'): - config = configparser.ConfigParser() - config.optionxform = str - config.read('setup.cfg') - if 'versioneer' in config: + config_versioneer = configparser.ConfigParser() + config_versioneer.optionxform = str + config_versioneer.read('setup.cfg') + if 'versioneer' in config_versioneer: gen_version_file() with pip_conf(root_dir): From 6f5ffe1ea39f4d0ff9465c9738f08bfd3c1ec89b Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Thu, 3 Jan 2019 22:19:23 -0700 Subject: [PATCH 25/62] Establish conventions for promoting new AMI to "production" Update nvidia drivers to 410.79 Single quote cmd on run-image command line --- docs/packer.md | 14 ++++++++++---- packer/configure-docker-v1.sh | 2 +- packer/resero-labs-nvidia-docker.packer | 4 ++-- packer/setup-v1.sh | 16 +++++++++------- scripts/run-image | 2 +- 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/docs/packer.md b/docs/packer.md index f32baea..dee7c4e 100644 --- a/docs/packer.md +++ b/docs/packer.md @@ -1,6 +1,12 @@ # To build new packer image -```bash -$ cd packer -$ packer build resero-labs-nvidia-docker.packer -``` \ No newline at end of file +1. Update version in ami-name in `packer/resero-labs-nvidia-docker.packer` +2. Update nvidia driver versions in `packer/setup-v1.sh` +3. Run the following: + ```bash + $ cd packer + $ packer build resero-labs-nvidia-docker.packer + ``` +4. after testing, use the AWS console to change the image currently named "resero-labs-nvidia-docker" to +"resero-labs-nvidia-docker-", and change the name of the image just created, +"resero-labs-nvidia-docker-latest" to "resero-labs-nvidia-docker" \ No newline at end of file diff --git a/packer/configure-docker-v1.sh b/packer/configure-docker-v1.sh index 6db232c..62e3553 100755 --- a/packer/configure-docker-v1.sh +++ b/packer/configure-docker-v1.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash sudo usermod -aG docker ubuntu sudo systemctl stop docker -sudo sed -i 's"dockerd\ -H\ fd://"dockerd"g' /lib/systemd/system/docker.service +sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service sudo systemctl daemon-reload sudo systemctl start docker diff --git a/packer/resero-labs-nvidia-docker.packer b/packer/resero-labs-nvidia-docker.packer index e7d8096..55462ca 100644 --- a/packer/resero-labs-nvidia-docker.packer +++ b/packer/resero-labs-nvidia-docker.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-nvidia-docker", + "ami_name": "resero-labs-nvidia-docker-2019.01", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -27,7 +27,7 @@ "iam_instance_profile": "lanista-app", "subnet_id": "subnet-b8b440de", "tags": { - "Name": "resero-labs-nvidia-docker" + "Name": "resero-labs-nvidia-docker-latest" } } ], diff --git a/packer/setup-v1.sh b/packer/setup-v1.sh index fc5f63a..1e8fd77 100755 --- a/packer/setup-v1.sh +++ b/packer/setup-v1.sh @@ -7,24 +7,26 @@ sleep 30 sudo apt-get update sudo apt-get install -y \ apt-transport-https \ - aws-cli \ ca-certificates \ curl \ gcc \ make \ + python3 \ + python3-pip \ software-properties-common +pip3 install awscli --upgrade --user # get the latest nvidia drivers and install them -wget -P /tmp http://us.download.nvidia.com/tesla/396.44/NVIDIA-Linux-x86_64-396.44.run -chmod +x /tmp/NVIDIA-Linux-x86_64-396.44.run -sudo /tmp/NVIDIA-Linux-x86_64-396.44.run -silent +wget -qP /tmp http://us.download.nvidia.com/tesla/410.79/NVIDIA-Linux-x86_64-410.79.run +chmod +x /tmp/NVIDIA-Linux-x86_64-410.79.run +sudo /tmp/NVIDIA-Linux-x86_64-410.79.run -silent # now get docker and nvidia-docker curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - distribution=$(. /etc/os-release;echo $ID$VERSION_ID) -sudo add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable' +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list sudo apt-get update -sudo apt-get install -y docker-ce=18.06.0~ce~3-0~ubuntu -sudo apt-get install -y nvidia-docker2 +sudo apt-get install -y docker-ce \ + nvidia-docker2 diff --git a/scripts/run-image b/scripts/run-image index 044d204..ff284a0 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -17,7 +17,7 @@ import urllib.request from dockerutils import * _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ - '{volumes} {ports} {image_name}:{image_tag} {cmd}' + '{volumes} {ports} {image_name}:{image_tag} \'{cmd}\'' def fetch_env_variables(config, image, args_env=None): # a special use case. retrieve env variables from a config server. From 68e6a9428e9dd61a810933b52d38ed95b98997fc Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Tue, 8 Jan 2019 12:57:47 -0700 Subject: [PATCH 26/62] Rework AMI: * Turn of unattended-updates * Perform dist-upgrade when setting up AMI so latest and greatest is available --- packer/configure-docker-v1.sh | 6 ----- packer/etc-rc.local | 16 ----------- packer/resero-labs-nvidia-docker.packer | 36 +++++++++---------------- packer/{setup-v1.sh => setup.sh} | 22 +++++++++++---- packer/upgrade.sh | 7 +++++ scripts/stop-dock | 2 +- 6 files changed, 38 insertions(+), 51 deletions(-) delete mode 100755 packer/configure-docker-v1.sh delete mode 100755 packer/etc-rc.local rename packer/{setup-v1.sh => setup.sh} (57%) create mode 100755 packer/upgrade.sh diff --git a/packer/configure-docker-v1.sh b/packer/configure-docker-v1.sh deleted file mode 100755 index 62e3553..0000000 --- a/packer/configure-docker-v1.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash -sudo usermod -aG docker ubuntu -sudo systemctl stop docker -sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service -sudo systemctl daemon-reload -sudo systemctl start docker diff --git a/packer/etc-rc.local b/packer/etc-rc.local deleted file mode 100755 index 0117630..0000000 --- a/packer/etc-rc.local +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh -e -# -# rc.local -# -# This script is executed at the end of each multiuser runlevel. -# Make sure that the script will "exit 0" on success or any other -# value on error. -# -# In order to enable or disable this script just change the execution -# bits. -# -# By default this script does nothing. - -nvidia-smi - -exit 0 \ No newline at end of file diff --git a/packer/resero-labs-nvidia-docker.packer b/packer/resero-labs-nvidia-docker.packer index 55462ca..1e0e95a 100644 --- a/packer/resero-labs-nvidia-docker.packer +++ b/packer/resero-labs-nvidia-docker.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-nvidia-docker-2019.01", + "ami_name": "resero-labs-nvidia-docker-2019.01.1", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -35,39 +35,29 @@ "provisioners": [ { "type": "file", - "source": "setup-v1.sh", - "destination": "/home/ubuntu/setup-v1.sh" - }, - { - "type": "shell", - "inline": [ - "sudo /home/ubuntu/setup-v1.sh", - "rm /home/ubuntu/setup-v1.sh" - ] + "source": "upgrade.sh", + "destination": "/home/ubuntu/upgrade.sh" }, { "type": "file", - "source": "configure-docker-v1.sh", - "destination": "/home/ubuntu/configure-docker-v1.sh" + "source": "setup.sh", + "destination": "/home/ubuntu/setup.sh" }, { "type": "shell", "inline": [ - "sudo /home/ubuntu/configure-docker-v1.sh", - "rm /home/ubuntu/configure-docker-v1.sh" - ] - }, - { - "type": "file", - "source": "etc-rc.local", - "destination": "/home/ubuntu/rc.local" + "sudo /home/ubuntu/upgrade.sh" + ], + "expect_disconnect": true }, { "type": "shell", "inline": [ - "sudo cp /home/ubuntu/rc.local /etc/rc.local", - "rm /home/ubuntu/rc.local" - ] + "rm /home/ubuntu/upgrade.sh", + "sudo /home/ubuntu/setup.sh", + "rm /home/ubuntu/setup.sh" + ], + "pause_before": "30s" }, { "type": "shell", diff --git a/packer/setup-v1.sh b/packer/setup.sh similarity index 57% rename from packer/setup-v1.sh rename to packer/setup.sh index 1e8fd77..5386616 100755 --- a/packer/setup-v1.sh +++ b/packer/setup.sh @@ -1,10 +1,6 @@ #!/usr/bin/env bash -# wait just a bit to allow everything to settle down -sleep 30 - -# update apt and install dependencies -sudo apt-get update +# install dependencies sudo apt-get install -y \ apt-transport-https \ ca-certificates \ @@ -30,3 +26,19 @@ curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.li sudo apt-get update sudo apt-get install -y docker-ce \ nvidia-docker2 + +# configure docker: +# put ubuntu user in docker group +# remove unix socket from docker config (we are going to allow TLS network sockets only) +# key/cert setup is in register-dock +sudo usermod -aG docker ubuntu +sudo systemctl stop docker +sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service +sudo systemctl daemon-reload +sudo systemctl start docker + +# disable unattended-updates +# kernel updates, though infrequent, can mess up both the docker daemon and the nvidia drivers +# rather than doing unattended-updates, we should periodically update the AMI by simply rebuilding +# the image +sudo sed -i 's/APT::Periodic::Unattended-Upgrade "1";/APT::Periodic::Unattended-Upgrade "0";/g' /etc/apt/apt.conf.d/20auto-upgrades diff --git a/packer/upgrade.sh b/packer/upgrade.sh new file mode 100755 index 0000000..842e6e3 --- /dev/null +++ b/packer/upgrade.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +sudo apt-get update +sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade +sudo reboot +sleep 10 + diff --git a/scripts/stop-dock b/scripts/stop-dock index 10b2bd4..772551f 100755 --- a/scripts/stop-dock +++ b/scripts/stop-dock @@ -9,7 +9,7 @@ NO_COLOR='\033[0m' confirm_stop() { if [ -z "$1" ]; then echo -e "Stop dock?" - read -e -p "Type enter to Cancel, h for Help, y to Create: " RESPONSE + read -e -p "Type enter to Cancel, h for Help, y to Stop Instance: " RESPONSE fi if [ "$RESPONSE" == "h" ]; then print_help; fi From f6c679ab80cf4e2a041596c7abaede3137452ee2 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Tue, 8 Jan 2019 16:10:57 -0700 Subject: [PATCH 27/62] remove single quotes on command for run-image --- scripts/run-image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-image b/scripts/run-image index ff284a0..044d204 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -17,7 +17,7 @@ import urllib.request from dockerutils import * _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ - '{volumes} {ports} {image_name}:{image_tag} \'{cmd}\'' + '{volumes} {ports} {image_name}:{image_tag} {cmd}' def fetch_env_variables(config, image, args_env=None): # a special use case. retrieve env variables from a config server. From fac978c1de4056b1d2d0001a2a0588309deb944f Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Wed, 9 Jan 2019 14:14:34 -0700 Subject: [PATCH 28/62] Require python3.6 or above --- setup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.py b/setup.py index 15e0b74..5f39ff4 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,9 @@ #!/usr/bin/env python from os import path import versioneer +import sys +if sys.version_info < (3,6): + sys.exit('Sorry, Python < 3.6 is not supported') from setuptools import setup, find_packages From 8169bc972ce38f1bbb947b5e503463e84690058c Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Wed, 16 Jan 2019 11:04:34 -0700 Subject: [PATCH 29/62] Add ls-dock script --- scripts/create-dock | 2 +- scripts/ls-dock | 52 +++++++++++++++++++++++++++++++++++++++++++ scripts/register-dock | 3 ++- scripts/stop-dock | 11 +++++---- 4 files changed, 60 insertions(+), 8 deletions(-) create mode 100755 scripts/ls-dock diff --git a/scripts/create-dock b/scripts/create-dock index c342bd7..db6075c 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -163,7 +163,7 @@ if [ ! -z "$INSTANCE_ID" ]; then if [ $(echo $IP_ADDRESS | grep -c -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') == 1 ]; then echo 'Registering secure remote docker api' - register-dock ubuntu "$IP_ADDRESS" "$MONIKER" + register-dock ubuntu "$IP_ADDRESS" "$MONIKER" "$INSTANCE_ID" fi echo "New EC2 instance available to dock at $IP_ADDRESS" diff --git a/scripts/ls-dock b/scripts/ls-dock new file mode 100755 index 0000000..e86a179 --- /dev/null +++ b/scripts/ls-dock @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -e + +# Default Values +GREEN='\033[0;32m' +NO_COLOR='\033[0m' + +print_help() { + echo "List dockx - Help" + echo + echo "Description" + echo " This script enumerates all docks defined and prints their current state." + echo + echo "Usage" + echo " $ ls-dock" + echo + + exit 0 +} + +# Parse command line arguments in any order +while getopts 'h' flag; do # if a character is followed by a colon, that argument is expected to have an argument. + case "${flag}" in + h) hflag='true';; + *) error "Unexpected option ${flag}" ;; + esac +done + +get_instance_id() { + aws ec2 describe-instances \ + --filters Name=private-ip-address,Values="$1" \ + --query 'Reservations[*].Instances[*].InstanceId' --output text +} + +# for each directory in ~/.docker, describe the instance +for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [ -z ${DOCK_MONIKER+x} ]; then + DOCK_MONIKER=$(basename $f) + else + DOCK_MONIKER=$DOCK_MONIKER" ($(basename $f))" + fi + if [ -z ${DOCK_INSTANCE_ID+x} ]; then + echo $DOCK_MONIKER": "$(aws ec2 describe-instances --filters Name=private-ip-address,Values="$DOCK_IP" --query 'Reservations[*].Instances[*].State.Name' --output text) + else + echo $DOCK_MONIKER": "$(aws ec2 describe-instances --instance-ids $DOCK_INSTANCE_ID --query 'Reservations[*].Instances[*].State.Name' --output text) + fi + + fi +done + diff --git a/scripts/register-dock b/scripts/register-dock index fb82a89..dcf719a 100755 --- a/scripts/register-dock +++ b/scripts/register-dock @@ -19,6 +19,7 @@ USER=${1:-none} DOCK_HOSTNAME=${2:-none} IP=${2:-none} MONIKER=${3:-$2} +INSTANCE_ID=${3:-none} PORT=2377 SSH_OPTIONS="-o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" @@ -158,5 +159,5 @@ ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl start docker' && export DOCKER_TLS_VERIFY=1 && export DOCKER_CERT_PATH=~/.docker/$IP && export DOCKER_HOST=tcp://$IP:$PORT && -printf "DOCK_USER=$USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$DOCK_HOSTNAME\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt && +printf "DOCK_INSTANCE_ID=$INSTANCE_ID\nDOCK_USER=$USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$DOCK_HOSTNAME\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt && docker version diff --git a/scripts/stop-dock b/scripts/stop-dock index 772551f..bff9ebe 100755 --- a/scripts/stop-dock +++ b/scripts/stop-dock @@ -29,12 +29,6 @@ print_help() { exit 0 } -if [[ -z "$MONIKER" ]]; then - print_help - exit -1 -fi - - # Parse command line arguments in any order while getopts 'h' flag; do # if a character is followed by a colon, that argument is expected to have an argument. case "${flag}" in @@ -43,6 +37,11 @@ while getopts 'h' flag; do # if a character is followed by a colon, that argu esac done +# Help +if [ -n "$hflag" ] || [ "$RESPONSE" == "h" ]; then + print_help +fi + # Look up IP from moniker FOUND_MONIKER=false for f in $HOME/.docker/*; do From 594df906fc778b144ea3ad589b589303cc117b18 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Wed, 16 Jan 2019 14:54:16 -0700 Subject: [PATCH 30/62] Add ls-dock script --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 5f39ff4..bdaca43 100644 --- a/setup.py +++ b/setup.py @@ -34,6 +34,7 @@ 'scripts/dock', 'scripts/dock-sync', 'scripts/genversion', + 'scripts/ls-dock', 'scripts/publish-image', 'scripts/register-dock', 'scripts/run-image', From 34e2bd988f4d5e3ccc93313427120ca687ece1aa Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Tue, 22 Jan 2019 08:42:45 -0700 Subject: [PATCH 31/62] Python2 support (#39) * Support for Python 2 --- .travis.yml | 3 ++ README.md | 7 +++- dockerutils/_version.py | 1 + dockerutils/cd.py | 7 ++-- dockerutils/gen_version.py | 6 ++-- dockerutils/image_conventions.py | 38 +++++++++++++-------- scripts/build-image | 58 +++++++++++++++++++------------- scripts/genversion | 1 - scripts/publish-image | 32 +++++++++++------- scripts/run-image | 51 +++++++++++++--------------- setup.py | 12 ++++--- tests/test_general_components.py | 16 ++++----- tests/test_image_convetions.py | 6 ++-- 13 files changed, 137 insertions(+), 101 deletions(-) diff --git a/.travis.yml b/.travis.yml index 56c1b58..4462557 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,6 +4,9 @@ dist: xenial python: - "3.6" - "3.7" + - "3.5" + - "3.4" + - "2.7" install: - pip install codecov - pip install -e ".[test]" diff --git a/README.md b/README.md index 177235f..27baf3e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,9 @@ -[![TravisCI](https://api.travis-ci.org/resero-labs/docker-utils.svg?branch=master)](https://travis-ci.org/resero-labs/docker-utils) [![Coverage](https://codecov.io/gh/resero-labs/docker-utils/branch/master/graph/badge.svg)](https://codecov.io/gh/resero-labs/docker-utils) [![PyPi](https://img.shields.io/pypi/v/dockerutils.svg)](https://pypi.org/project/dockerutils/) [![PyPi](https://img.shields.io/pypi/wheel/dockerutils.svg)](https://pypi.org/project/dockerutils/) [![Python 3.7](https://img.shields.io/badge/python-3.7-blue.svg)](https://www.python.org/downloads/release/python-370/) [![Python 3.6](https://img.shields.io/badge/python-3.6-blue.svg)](https://www.python.org/downloads/release/python-360/) +[![TravisCI](https://api.travis-ci.org/resero-labs/docker-utils.svg?branch=master)](https://travis-ci.org/resero-labs/docker-utils) [![Coverage](https://codecov.io/gh/resero-labs/docker-utils/branch/master/graph/badge.svg)](https://codecov.io/gh/resero-labs/docker-utils) [![PyPi](https://img.shields.io/pypi/v/dockerutils.svg)](https://pypi.org/project/dockerutils/) [![PyPi](https://img.shields.io/pypi/wheel/dockerutils.svg)](https://pypi.org/project/dockerutils/) +[![Python 3.7](https://img.shields.io/badge/python-3.7-blue.svg)](https://www.python.org/downloads/release/python-370/) +[![Python 3.6](https://img.shields.io/badge/python-3.6-blue.svg)](https://www.python.org/downloads/release/python-360/) +[![Python 3.5](https://img.shields.io/badge/python-3.5-blue.svg)](https://www.python.org/downloads/release/python-350/) +[![Python 3.4](https://img.shields.io/badge/python-3.4-blue.svg)](https://www.python.org/downloads/release/python-340/) +[![Python 2.7](https://img.shields.io/badge/python-2.7-blue.svg)](https://www.python.org/downloads/release/python-270/) # Docker Utilities/Patterns for Python Projects diff --git a/dockerutils/_version.py b/dockerutils/_version.py index 3f25ef3..72e9d17 100644 --- a/dockerutils/_version.py +++ b/dockerutils/_version.py @@ -10,6 +10,7 @@ """Git implementation of _version.py.""" +from __future__ import print_function import errno import os import re diff --git a/dockerutils/cd.py b/dockerutils/cd.py index 85ac0bf..0a38119 100644 --- a/dockerutils/cd.py +++ b/dockerutils/cd.py @@ -1,11 +1,12 @@ """Holds the current working directory context class. A python version of pushd/popd""" import os + # pylint: disable=too-few-public-methods -class cd: # pylint: disable=invalid-name +class cd: # pylint: disable=invalid-name """Context manager for changing the current working directory""" - def __init__(self, newPath): - self.new_path = os.path.expanduser(newPath) + def __init__(self, new_path): + self.new_path = os.path.expanduser(new_path) self.saved_path = os.getcwd() def __enter__(self): diff --git a/dockerutils/gen_version.py b/dockerutils/gen_version.py index 6619595..09c26f7 100644 --- a/dockerutils/gen_version.py +++ b/dockerutils/gen_version.py @@ -7,6 +7,7 @@ from importlib import import_module from . import get_root_dir + def gen_version_file(filename='_version.py.bld'): """ Generates a versioneer version file that can be used in @@ -43,9 +44,8 @@ def get_versions(): """.format(json.dumps(version, sort_keys=True, indent=4, separators=(',', ': ')))) except AssertionError as e: if str(e) != 'please set versioneer.versionfile_source': - raise(e) + raise e except Exception as e: if not str(type(e)) in ["", ""]: - raise(e) + raise e # otherwise, we are operating on a project that isn't versioneer... don't do anything - diff --git a/dockerutils/image_conventions.py b/dockerutils/image_conventions.py index d4f96dc..a03c011 100644 --- a/dockerutils/image_conventions.py +++ b/dockerutils/image_conventions.py @@ -1,26 +1,30 @@ """Functions that enforce conventions surrounding docker images.""" -import configparser +import sys import logging import os import getpass +if sys.version_info < (3, 0): + from ConfigParser import ConfigParser +else: + from configparser import ConfigParser logger = logging.getLogger(__name__) def get_root_dir(): - ''' + """ get_root_dir will return the root directory of the project that contains a docker file. The constraints for our project with regard to docker are that there id a docker directoy as a top level sub-directory under the project root. The project root directory name is the base name of the docker file. There are sub-directories under docker that serve as the modes of various docker files that will be build (e.g. dev, lanista, jenkins, etc.) :return: root_directory for docker builda - ''' + """ docker_dir = os.path.join(os.getcwd(), 'docker') if os.path.exists(docker_dir) and os.path.isdir(docker_dir): return os.getcwd() else: - raise ValueError(f'Unable to find docker directory. Invalid root: {os.getcwd()}') + raise ValueError('Unable to find docker directory. Invalid root: {dir}'.format(dir=os.getcwd())) def get_default_project_name(): @@ -30,26 +34,32 @@ def get_default_project_name(): def get_image_designation(image, config=None): if not config: os.path.join(get_root_dir(), os.path.join('docker', 'dockerutils.cfg')) - config = configparser.ConfigParser() + config = ConfigParser() config.read(os.path.join(get_root_dir(), os.path.join('docker', 'dockerutils.cfg'))) - return (get_image_name(config, image), get_image_tag(config, image)) + return get_image_name(config, image), get_image_tag(config, image) def get_image_name(config, image): if image in config.sections(): - if 'name' in config[image]: - if 'prefix' not in config[image] or \ - ('prefix' in config[image] and not config[image]['prefix'] in ['False', 'false', 'F', 'f']): - return f'{get_default_project_name()}-{config[image]["name"]}' + if 'name' in config.options(image): + if 'prefix' not in config.options(image) or \ + ('prefix' in config.options(image) and not config.get(image, 'prefix') in ['False', 'false', 'F', 'f']): + return '{proj_name}-{image_name}'.format( + proj_name=get_default_project_name(), + image_name=config.get(image, "name") + ) else: - return f'{config[image]["name"]}' - return f'{get_default_project_name()}-{image}' + return config.get(image, "name") + return '{proj_name}-{image_name}'.format( + proj_name=get_default_project_name(), + image_name=image + ) def get_image_tag(config, image): if image in config.sections(): - if 'tag' in config[image]: - return config[image]['tag'] + if 'tag' in config.options(image): + return config.get(image, 'tag') return getpass.getuser() diff --git a/scripts/build-image b/scripts/build-image index b98c62d..e4fb9a9 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -2,38 +2,44 @@ # Python script to build/clean containers associated for a project # + +from __future__ import print_function import argparse -import configparser import os import shlex import subprocess import sys - from dockerutils import * +if sys.version_info < (3, 0): + from ConfigParser import ConfigParser +else: + from configparser import ConfigParser def is_multistage(mode): - return 'as builder' in open(f'docker/{mode}/Dockerfile').read() + return 'as builder' in open('docker/{mode}/Dockerfile'.format(mode=mode)).read() -def run_pre_script(script: str, config: dict) -> int: - print(f'Running pre-build-script: "{script}"') +def run_pre_script(script, config): + print('Running pre-build-script: "{script}"'.format(script=script)) return subprocess.call(shlex.split(script), cwd=os.getcwd()) -def run_post_script(script: str, config: dict) -> int: - print(f'Running post-build-script: "{script}"') +def run_post_script(script, config): + print('Running post-build-script: "{script}"'.format(script=script)) return subprocess.call(shlex.split(script), cwd=os.getcwd()) -def build(image, image_name, image_tag, config={}, pull=False): +def build(image, image_name, image_tag, config=None, pull=False): + if config is None: + config = {} pre_script = config.get('pre_build_script', None) post_script = config.get('post_build_script', None) if pre_script: rc = run_pre_script(pre_script, config=config) if rc != 0: - print(f'pre-build-script failed: {rc}') + print('pre-build-script failed: {rc}'.format(rc=rc)) return rc rc = 0 @@ -44,14 +50,16 @@ def build(image, image_name, image_tag, config={}, pull=False): # if this is a multistage build and it follows the conventions, tag the builder image # otherwise, a prune will remove the layers used during the builder phase and subsequent # builds will take longer than required - rc = image_operation(f'docker build {pull_base} --compress -t {image_name}-builder:{image_tag} ' - f'-f docker/{image}/Dockerfile --target builder .') + rc = image_operation( + 'docker build {pull_base} --compress -t {image_name}-builder:{image_tag} -f docker/{image}/Dockerfile --target builder .' + .format(pull_base=pull_base, image_name=image_name, image=image, image_tag=image_tag)) if not rc: - rc = image_operation(f'docker build {pull_base} --compress -t {image_name}:{image_tag} ' - f'-f docker/{image}/Dockerfile .') + rc = image_operation( + 'docker build {pull_base} --compress -t {image_name}:{image_tag} -f docker/{image}/Dockerfile .' + .format(pull_base=pull_base, image_name=image_name, image=image, image_tag=image_tag)) if rc != 0: - print(f'docker build failed: {rc}') + print('docker build failed: {rc}'.format(rc=rc)) return rc if post_script: @@ -61,15 +69,15 @@ def build(image, image_name, image_tag, config={}, pull=False): def clean(image, image_name, image_tag, pull=False): - rc = image_operation(f'docker rmi {image_name}:{image_tag}') + rc = image_operation('docker rmi {image_name}:{image_tag}'.format(image_name=image_name, image_tag=image_tag)) if is_multistage(image): - image_operation(f'docker rmi {image_name}-builder:{image_tag}') + image_operation('docker rmi {image_name}-builder:{image_tag}'.format(image_name=image_name, image_tag=image_tag)) return rc def image_operation(operation): print('\n\n============================================================================') - print(f'{operation}\n\n') + print('{operation}\n\n'.format(operation=operation)) return subprocess.call(shlex.split(operation), cwd=os.getcwd()) @@ -80,12 +88,12 @@ if __name__ == '__main__': print(str(e)) sys.exit(1) with cd(root_dir): - config = configparser.ConfigParser() + config = ConfigParser() config.optionxform = str config.read(os.path.join('docker', 'dockerutils.cfg')) if os.path.isfile('setup.cfg'): - config_versioneer = configparser.ConfigParser() + config_versioneer = ConfigParser() config_versioneer.optionxform = str config_versioneer.read('setup.cfg') if 'versioneer' in config_versioneer: @@ -100,7 +108,8 @@ if __name__ == '__main__': action='store_true') parser.add_argument("-i", "--image_name", help="use this image name rather than the default") parser.add_argument("-t", "--image_tag", help="use this image tag rather than the default") - parser.add_argument("-p", "--pull_base", help="pull the base image as part fo the build", action='store_true') + parser.add_argument("-p", "--pull_base", help="pull the base image as part fo the build", + action='store_true') args = parser.parse_args() images_to_build = [] @@ -126,12 +135,13 @@ if __name__ == '__main__': pull_FROM_on_force = False image_config = {} if image in config.sections(): - image_config = config[image] + image_config = config.options(image) if 'pull_FROM_on_force' in image_config: - pull_FROM_on_force = config[image]['pull_FROM_on_force'] + pull_FROM_on_force = config.get(image, 'pull_FROM_on_force') - if os.path.isfile(f'docker/{image}/Dockerfile'): - rc = fn(image, image_name, image_tag, config=image_config, pull=args.pull_base or (args.force_build_base and pull_FROM_on_force)) + if os.path.isfile('docker/{image}/Dockerfile'.format(image=image)): + rc = fn(image, image_name, image_tag, config=image_config, + pull=args.pull_base or (args.force_build_base and pull_FROM_on_force)) # because an image may not be present on the clean, ignore a non-zero return code if rc and not args.image == 'clean': sys.exit(rc) diff --git a/scripts/genversion b/scripts/genversion index 5a799bd..649e44b 100755 --- a/scripts/genversion +++ b/scripts/genversion @@ -5,4 +5,3 @@ from dockerutils import * if __name__ == '__main__': with cd(get_root_dir()): gen_version_file() - diff --git a/scripts/publish-image b/scripts/publish-image index 6f62b01..68e7f6c 100755 --- a/scripts/publish-image +++ b/scripts/publish-image @@ -1,21 +1,25 @@ #!/usr/bin/env python3 +from __future__ import print_function +import sys import argparse import base64 -import configparser import getpass import os import shlex import subprocess -import sys import traceback - import boto3 from dockerutils import * +if sys.version_info < (3, 0): + from ConfigParser import ConfigParser +else: + from configparser import ConfigParser -def docker_login_aws(user: str, password: str, endpoint: str): - return run(f'docker login -u {user} -p {password} {endpoint}') +def docker_login_aws(user, password, endpoint): + return run('docker login -u {user} -p {password} {endpoint}' + .format(user=user, password=password, endpoint=endpoint)) def docker_login_dockerhub(): @@ -23,11 +27,13 @@ def docker_login_dockerhub(): def docker_tag(image_name, image_tag, publication_tag): - return run(f'docker tag {image_name}:{image_tag} {publication_tag}') + return run('docker tag {image_name}:{image_tag} {publication_tag}' + .format(image_name=image_name, image_tag=image_tag, publication_tag=publication_tag)) def docker_push(publication_tag): - return run(f'docker push {publication_tag}') + return run('docker push {publication_tag}'.format(publication_tag=publication_tag)) + def aws_create_repo_if_necessary(ecr_client, image_name): try: @@ -39,14 +45,16 @@ def aws_create_repo_if_necessary(ecr_client, image_name): return -1 return 0 -def run(cmd: str): + +def run(cmd): print('\n\n============================================================================') - print(f'{cmd}\n\n') + print('{cmd}\n\n'.format(cmd=cmd)) return subprocess.call(shlex.split(cmd), cwd=os.getcwd()) + if __name__ == '__main__': with cd(get_root_dir()): - config = configparser.ConfigParser() + config = ConfigParser() config.optionxform = str config.read(os.path.join('docker', 'dockerutils.cfg')) @@ -67,8 +75,8 @@ if __name__ == '__main__': } if args.image in config.sections(): for key in push_config.keys(): - if not push_config[key] and key in config[args.image]: - push_config[key] = config[args.image][key] + if not push_config[key] and key in config.options(args.image): + push_config[key] = config.get(args.image, key) image_name, image_tag = get_image_designation(args.image, config) diff --git a/scripts/run-image b/scripts/run-image index 044d204..6217d7e 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -2,23 +2,26 @@ # Python script to run containers associated with a project # +from __future__ import print_function +import sys import argparse -import configparser import datetime import getpass -import json import os import shlex import subprocess -import sys -import urllib.parse -import urllib.request from dockerutils import * +if sys.version_info < (3, 0): + from ConfigParser import ConfigParser +else: + from configparser import ConfigParser + _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ '{volumes} {ports} {image_name}:{image_tag} {cmd}' + def fetch_env_variables(config, image, args_env=None): # a special use case. retrieve env variables from a config server. env_vars = {} @@ -26,20 +29,11 @@ def fetch_env_variables(config, image, args_env=None): for arg_env in args_env: env_var = arg_env.split('=') env_vars[env_var[0]] = env_var[1] - if 'env_var_names' in config.sections(): - endpoint = f"http://{os.environ.get(config['env_var_names']['remote_ip'])}:" \ - f"{os.environ.get(config['env_var_names']['remote_port'], '5000')}/config" - try: - with urllib.request.urlopen(endpoint, timeout=1) as response_f: - response = json.load(response_f) - env_vars = response.get('env_variables') or {} - except urllib.error.URLError: - pass if image in config and 'env' in config[image]: env_section = config[image]['env'] - for vars in config[env_section]: - env_vars[vars] = config[env_section][vars] - return ' '.join([f'-e {key}={value}' for key, value in env_vars.items()]) + for evars in config[env_section]: + env_vars[evars] = config[env_section][evars] + return ' '.join(['-e {key}={value}'.format(key=key, value=value) for key, value in env_vars.items()]) def run(mode, image_name, image_tag, **kwargs): @@ -52,12 +46,13 @@ def run(mode, image_name, image_tag, **kwargs): volumes = os.path.expandvars(volumes) if kwargs['network']: - kwargs['network'] = f"--network {kwargs['network']}" + kwargs['network'] = "--network {network}".format(network=kwargs['network']) timestamp = datetime.datetime.now().strftime("%y-%m-%d_%H.%M.%S") cmd = _base_cmd.format(image_name=image_name, image_tag=image_tag, - name=f"{getpass.getuser()}_{mode}_{timestamp}", + name="{user}_{mode}_{timestamp}".format( + user=getpass.getuser(), mode=mode, timestamp=timestamp), keep_container=kwargs['keep_container'], interactive=kwargs['interactive'], environment=kwargs['environment'], @@ -68,18 +63,18 @@ def run(mode, image_name, image_tag, **kwargs): cmd=kwargs['cmd'], init=kwargs['init']) print('\n\n============================================================================') - print(f'{cmd}\n\n') + print('{cmd}\n\n'.format(cmd=cmd)) return subprocess.call(shlex.split(cmd), cwd=os.getcwd()) if __name__ == '__main__': with cd(get_root_dir()): - config = configparser.ConfigParser() + config = ConfigParser() config.optionxform = str config.read(os.path.join('docker', 'dockerutils.cfg')) - if 'run_image' in config and 'synthetic_images' in config['run_image']: - image_types = get_image_types(config['run_image']['synthetic_images'].split(',')) + if 'run_image' in config.sections() and 'synthetic_images' in config.options('run_image'): + image_types = get_image_types(config.get('run_image', 'synthetic_images').split(',')) else: image_types = get_image_types() @@ -121,11 +116,11 @@ if __name__ == '__main__': if args.image in config.sections(): for key in run_config.keys(): - docked_key = f"{key}_docked" - if is_docked and not run_config[key] and docked_key in config[args.image]: - run_config[key] = config[args.image][docked_key] - elif not run_config[key] and key in config[args.image]: - run_config[key] = config[args.image][key] + docked_key = "{key}_docked".format(key=key) + if is_docked and not run_config[key] and docked_key in config.options(args.image): + run_config[key] = config.get(args.image, docked_key) + elif not run_config[key] and key in config.options(args.image): + run_config[key] = config.get(args.image, key) image_name, image_tag = get_image_designation(args.image, config) diff --git a/setup.py b/setup.py index bdaca43..6e740e4 100644 --- a/setup.py +++ b/setup.py @@ -2,8 +2,8 @@ from os import path import versioneer import sys -if sys.version_info < (3,6): - sys.exit('Sorry, Python < 3.6 is not supported') +if sys.version_info < (3,0): + from io import open from setuptools import setup, find_packages @@ -47,18 +47,22 @@ include_package_data=True, packages=find_packages(exclude=['tests*']), license="MIT License", - python_requires='>=3.6', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', ], install_requires=[ + 'ConfigParser;python_version<="2.7"', 'awscli', - 'boto3' + 'boto3', + 'future' ], extras_require={ 'dev': [ diff --git a/tests/test_general_components.py b/tests/test_general_components.py index 4b15af3..ab8931a 100644 --- a/tests/test_general_components.py +++ b/tests/test_general_components.py @@ -1,5 +1,5 @@ +import os import tempfile -from pathlib import Path from dockerutils.gen_version import gen_version_file from dockerutils.cd import cd from dockerutils import __version__ as project_version @@ -7,7 +7,7 @@ def gen_version(test_dir, tmp_ver_file): - test_env = Path(__file__).parent / test_dir + test_env = os.path.join(os.path.dirname(__file__), test_dir) with cd(test_env): gen_version_file(tmp_ver_file[1]) with open(tmp_ver_file[1], 'r') as f: @@ -16,7 +16,7 @@ def gen_version(test_dir, tmp_ver_file): def test_versioneer(): tmp_file = tempfile.mkstemp() version_contents = gen_version('sample-dir-versioneer', tmp_file) - assert f'"version": "{project_version}"' in version_contents + assert '"version": "{project_version}"'.format(project_version=project_version) in version_contents def test_non_versioneer(): tmp_file = tempfile.mkstemp() @@ -24,10 +24,10 @@ def test_non_versioneer(): assert version_contents == '' def test_pip_conf(): - test_env = Path(__file__).parent / 'sample-dir' + test_env = os.path.join(os.path.dirname(__file__), 'sample-dir') with cd(test_env): - pip_conf_file = test_env / 'pip.conf' - assert not pip_conf_file.exists() + pip_conf_file = os.path.join(test_env, 'pip.conf') + assert not os.path.exists(pip_conf_file) with pip_conf(test_env): - assert pip_conf_file.exists() - assert not pip_conf_file.exists() + assert os.path.exists(pip_conf_file) + assert not os.path.exists(pip_conf_file) diff --git a/tests/test_image_convetions.py b/tests/test_image_convetions.py index 2c57337..81ff2bb 100644 --- a/tests/test_image_convetions.py +++ b/tests/test_image_convetions.py @@ -1,15 +1,15 @@ -from pathlib import Path +import os from dockerutils.cd import cd from dockerutils.image_conventions import get_image_designation, get_image_types def test_image_designation(): - test_project = Path(__file__).parent / 'sample-dir' + test_project = os.path.join(os.path.dirname(__file__), 'sample-dir') with cd(test_project): designation = get_image_designation('test') assert designation == ('sample-dir-test', 'latest') def test_image_types(): - test_project = Path(__file__).parent / 'sample-dir' + test_project = os.path.join(os.path.dirname(__file__), 'sample-dir') with cd(test_project): assert get_image_types() == ['test'] assert get_image_types(['a', 'b']) == ['test', 'a', 'b'] From 97ba766160a611b448919d4633d102cc80b8caab Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Fri, 25 Jan 2019 08:31:49 -0700 Subject: [PATCH 32/62] Support for utilizing AWS DL AMI (ubuntu/conda version) as our base AMI (#40) * Support for utilizing AWS DL AMI (ubuntu/conda version) as our base AMI * Remove obsolete env setting * * Add scripts to build resero-labs-dlami from a base image that we create from the AWS Deep Learning AMI (see packer/notes.dlami.md) * remove anaconda2 and python2 versions or environments * configure jupyter to our liking * Fix instanceID bug in create-dock/register-dock * Get credstash retrieval of notebook password working * Fix aws configuration Add nb-dock script * Update Readme --- README.md | 27 +++++-- packer/aws_config.cfg | 5 ++ packer/configure-docker.sh | 11 +++ packer/configure-jupyter.sh | 38 ++++++++++ packer/jupyter_notebook_config.py | 72 +++++++++++++++++++ packer/notes.dlami.md | 17 +++++ ...ia-docker.packer => resero-labs-dl.packer} | 45 +++++++----- packer/setup.sh | 44 ------------ packer/update-anaconda.sh | 57 +++++++++++++++ packer/upgrade-docker.sh | 26 +++++++ packer/upgrade.sh | 2 +- scripts/create-dock | 25 +++++-- scripts/nb-dock | 33 +++++++++ scripts/register-dock | 3 +- scripts/run-image | 2 +- setup.py | 1 + 16 files changed, 332 insertions(+), 76 deletions(-) create mode 100644 packer/aws_config.cfg create mode 100755 packer/configure-docker.sh create mode 100755 packer/configure-jupyter.sh create mode 100644 packer/jupyter_notebook_config.py create mode 100644 packer/notes.dlami.md rename packer/{resero-labs-nvidia-docker.packer => resero-labs-dl.packer} (50%) delete mode 100755 packer/setup.sh create mode 100755 packer/update-anaconda.sh create mode 100755 packer/upgrade-docker.sh create mode 100755 scripts/nb-dock diff --git a/README.md b/README.md index 27baf3e..76d1f74 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,25 @@ [![Python 3.4](https://img.shields.io/badge/python-3.4-blue.svg)](https://www.python.org/downloads/release/python-340/) [![Python 2.7](https://img.shields.io/badge/python-2.7-blue.svg)](https://www.python.org/downloads/release/python-270/) -# Docker Utilities/Patterns for Python Projects +# Docker Utilities/Patterns -`dockerutils` defines a set of patterns and utilities to facilitate use of docker with a python app or library -facilitating: +Dockerutils is a set of utilities and conventions around their use. The intent behind these utilities is to provide a +very light layer of abstraction to: simplify interaction with docker; support seamlessly running docker locally or on +ec2 instance in AWS; allow for multiple images per project; etc. + +Perhaps the best way to think of dockerutils is that it embodies two entities, commonly used when working +with docker, and a set of complementary commands for working with those entities. The two entities are: +* "dock" - the server that is hosing docker (by default, localhost) +* image - the standard docker image + +The commands used to operate against these entities are: + +| CommandSet / Entity | Creation | Execution | Notebook | Utility | +|----------------------|:------------------------:|:--------------------:|:------------:|:----------------------------:| +| Dock | create-dock
destroy-dock | start-dock
stop-dock | nb-dock | source dock
ls-dock
ssh-dock | +| Image | build-image | run-image | run-notebook | publish-image
transfer-image | + +Possible use cases include: * seperating development/test dependencies out of production container, e.g. production container vs. dev/test container * seperating data science notebook container from execution container * environment experimentation @@ -64,7 +79,7 @@ with this command: * DOCKER_DS_DONT_PULL - if set, the version of rappdw/docker-ds currently available will be used rather than pulling the latest version from docker hub. -* DOCKER_DS_DIFFS - if set, +* RESERO_JUPYTER_DIFFS - if set, on save, `.py` files and `.html` files for the notebook will be created in a `.diffs` subdirectory. ### Dock cli @@ -84,6 +99,10 @@ secure interaction with the docker server, as well as to support GPU utliziation `ssh-dock` opens a terminal on the remote dock with ssh +`ls-dock` list (including state) any created docks + +`nb-dock` run jupyter on the bare AMI of the dock and open a browser window to the notebook server + ## `dockerutils.cfg` Format Configuration in `docker/dockerutils.cfg` is used to configure behavior of the `dockerutils` scripts. diff --git a/packer/aws_config.cfg b/packer/aws_config.cfg new file mode 100644 index 0000000..66cc5cc --- /dev/null +++ b/packer/aws_config.cfg @@ -0,0 +1,5 @@ +[default] +region = us-west-2 + +[profile ds-notebook] +region = us-west-2 diff --git a/packer/configure-docker.sh b/packer/configure-docker.sh new file mode 100755 index 0000000..f95ee0f --- /dev/null +++ b/packer/configure-docker.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +# configure docker: +# put ubuntu user in docker group +# remove unix socket from docker config (we are going to allow TLS network sockets only) +# all key/cert setup is in register-dock script +sudo usermod -aG docker ubuntu +sudo systemctl stop docker +sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service +sudo systemctl daemon-reload +sudo systemctl start docker diff --git a/packer/configure-jupyter.sh b/packer/configure-jupyter.sh new file mode 100755 index 0000000..85900a7 --- /dev/null +++ b/packer/configure-jupyter.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +conda install -y \ + bokeh \ + cython \ + graphviz \ + holoviews \ + ipywidgets \ + jupyter \ + jupyterlab \ + matplotlib \ + networkx \ + nodejs \ + numpy \ + pandas \ + plotly \ + psutil \ + psycopg2 \ + python=3.6 \ + scipy \ + scikit-learn \ + seaborn \ + sympy \ +; + +conda install -y -c plotly plotly-orca + +pip install credstash + +jupyter serverextension enable --py jupyterlab +jupyter nbextension enable --py widgetsnbextension +jupyter labextension install \ + @jupyter-widgets/jupyterlab-manager \ + jupyter-matplotlib \ + jupyterlab_bokeh \ + @pyviz/jupyterlab_pyviz \ + @jupyterlab/plotly-extension \ + @mflevine/jupyterlab_html diff --git a/packer/jupyter_notebook_config.py b/packer/jupyter_notebook_config.py new file mode 100644 index 0000000..838954b --- /dev/null +++ b/packer/jupyter_notebook_config.py @@ -0,0 +1,72 @@ +# from jupyter_core.paths import jupyter_data_dir +# import subprocess +import os +# import errno +# import stat +from subprocess import check_call + +c = get_config() + +c.NotebookApp.allow_origin = '*' +c.NotebookApp.ip = '0.0.0.0' +c.NotebookApp.kernel_spec_manager_class = 'environment_kernels.EnvironmentKernelSpecManager' +c.EnvironmentKernelSpecManager.display_name_template="{}" +c.EnvironmentKernelSpecManager.conda_prefix_template="{}" +c.NotebookApp.iopub_data_rate_limit = 10000000000 +c.NotebookApp.open_browser = False + +session_params = '' +try: + from credstash import get_session_params, listSecrets, getSecret + session_params = get_session_params('ds-notebook', None) + items = [item['name'] for item in listSecrets(**session_params) if item['name'] in [ + 'notebook.password', 'notebook.token', 'github.client_id', 'github.client_secret', 'google.drive.client_id' + ]] +except Exception: + items = [] + +if 'notebook.password' in items: + c.NotebookApp.password = "{secret}".format(secret=getSecret('notebook.password', **session_params)) +if 'notebook.token' in items: + c.NotebookApp.token = "{secret}".format(secret=getSecret('notebook.token', **session_params)) +if 'github.client_id' in items: + c.GitHubConfig.client_id = "{secret}".format(secret=getSecret('github.client_id', **session_params)) +if 'github.client_secret' in items: + c.GitHubConfig.client_secret = "{secret}".format(secret=getSecret('github.client_secret', **session_params)) + +# # Generate a self-signed certificate +# if 'GEN_CERT' in os.environ: +# dir_name = jupyter_data_dir() +# pem_file = os.path.join(dir_name, 'notebook.pem') +# try: +# os.makedirs(dir_name) +# except OSError as exc: # Python >2.5 +# if exc.errno == errno.EEXIST and os.path.isdir(dir_name): +# pass +# else: +# raise +# # Generate a certificate if one doesn't exist on disk +# subprocess.check_call(['openssl', 'req', '-new', +# '-newkey', 'rsa:2048', +# '-days', '365', +# '-nodes', '-x509', +# '-subj', '/C=XX/ST=XX/L=XX/O=generated/CN=generated', +# '-keyout', pem_file, +# '-out', pem_file]) +# # Restrict access to the file +# os.chmod(pem_file, stat.S_IRUSR | stat.S_IWUSR) +# c.NotebookApp.certfile = pem_file + + +# Autosave .html and .py versions of the notebook for easier diffing with version control systems +def post_save(model, os_path, contents_manager): + """post-save hook for converting notebooks to .py scripts""" + if model['type'] != 'notebook': + return # only do this for notebooks + d, fname = os.path.split(os_path) + output_dir = os.path.join(d, '.diffs') + check_call(['jupyter', 'nbconvert', '--to', 'script', '--output-dir', output_dir, fname], cwd=d) + check_call(['jupyter', 'nbconvert', '--to', 'html', '--output-dir', output_dir, fname], cwd=d) + +if 'RESERO_JUPYTER_DIFFS' in os.environ and os.environ['RESERO_JUPYTER_DIFFS'] == '1': + c.FileContentsManager.post_save_hook = post_save \ No newline at end of file diff --git a/packer/notes.dlami.md b/packer/notes.dlami.md new file mode 100644 index 0000000..869212c --- /dev/null +++ b/packer/notes.dlami.md @@ -0,0 +1,17 @@ +# Notes on using the AWS DL AMI as the base image + +We are using verison 20.0 (ami-0d0ff0945ae093aea) currently, but have modified it (by hand) to create our base image +The modifications are as follows: + +1) ssh into image +2) Wait for /var/log/unattended-upgrades/unattended-upgrades.log to emit a line +that looks like: `2019-01-24 17:52:10,320 INFO All upgrades installed` (shoudl be aobut 15 minutes) +3) reboot +4) run upgrade.sh script + +Because this takes such a long time, go ahead and create an image from this instance +and use that as the base instead of building it into the packer script. + +Optionally, we may want to consider running the upgrade-docker script in this +directory, but we are not currently doing so. + diff --git a/packer/resero-labs-nvidia-docker.packer b/packer/resero-labs-dl.packer similarity index 50% rename from packer/resero-labs-nvidia-docker.packer rename to packer/resero-labs-dl.packer index 1e0e95a..f3e44cb 100644 --- a/packer/resero-labs-nvidia-docker.packer +++ b/packer/resero-labs-dl.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-nvidia-docker-2019.01.1", + "ami_name": "resero-labs-dlami-2019.01", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -22,12 +22,12 @@ "sg-b93e0dc2", "sg-1bd90461" ], - "source_ami": "ami-09eb876a926ae86db", + "source_ami": "ami-092c2df3c28ec89b3", "ssh_username": "ubuntu", "iam_instance_profile": "lanista-app", "subnet_id": "subnet-b8b440de", "tags": { - "Name": "resero-labs-nvidia-docker-latest" + "Name": "resero-labs-dlami-latest" } } ], @@ -35,33 +35,44 @@ "provisioners": [ { "type": "file", - "source": "upgrade.sh", - "destination": "/home/ubuntu/upgrade.sh" + "source": "configure-docker.sh", + "destination": "/home/ubuntu/configure-docker.sh" }, { "type": "file", - "source": "setup.sh", - "destination": "/home/ubuntu/setup.sh" + "source": "update-anaconda.sh", + "destination": "/home/ubuntu/update-anaconda.sh" }, { - "type": "shell", - "inline": [ - "sudo /home/ubuntu/upgrade.sh" - ], - "expect_disconnect": true + "type": "file", + "source": "configure-jupyter.sh", + "destination": "/home/ubuntu/configure-jupyter.sh" + }, + { + "type": "file", + "source": "jupyter_notebook_config.py", + "destination": "/home/ubuntu/.jupyter/jupyter_notebook_config.py" }, { "type": "shell", "inline": [ - "rm /home/ubuntu/upgrade.sh", - "sudo /home/ubuntu/setup.sh", - "rm /home/ubuntu/setup.sh" - ], - "pause_before": "30s" + "mkdir /home/ubuntu/.aws" + ] + }, + { + "type": "file", + "source": "aws_config.cfg", + "destination": "/home/ubuntu/.aws/config" }, { "type": "shell", "inline": [ + "/home/ubuntu/configure-docker.sh", + "/home/ubuntu/update-anaconda.sh", + "/home/ubuntu/configure-jupyter.sh", + "rm /home/ubuntu/configure-docker.sh", + "rm /home/ubuntu/update-anaconda.sh", + "rm /home/ubuntu/configure-jupyter.sh", "sudo mkdir -p /data/workspaces", "sudo chown -R ubuntu /data" ] diff --git a/packer/setup.sh b/packer/setup.sh deleted file mode 100755 index 5386616..0000000 --- a/packer/setup.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash - -# install dependencies -sudo apt-get install -y \ - apt-transport-https \ - ca-certificates \ - curl \ - gcc \ - make \ - python3 \ - python3-pip \ - software-properties-common -pip3 install awscli --upgrade --user - -# get the latest nvidia drivers and install them -wget -qP /tmp http://us.download.nvidia.com/tesla/410.79/NVIDIA-Linux-x86_64-410.79.run -chmod +x /tmp/NVIDIA-Linux-x86_64-410.79.run -sudo /tmp/NVIDIA-Linux-x86_64-410.79.run -silent - -# now get docker and nvidia-docker -curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - -curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - -distribution=$(. /etc/os-release;echo $ID$VERSION_ID) -sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" -curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list -sudo apt-get update -sudo apt-get install -y docker-ce \ - nvidia-docker2 - -# configure docker: -# put ubuntu user in docker group -# remove unix socket from docker config (we are going to allow TLS network sockets only) -# key/cert setup is in register-dock -sudo usermod -aG docker ubuntu -sudo systemctl stop docker -sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service -sudo systemctl daemon-reload -sudo systemctl start docker - -# disable unattended-updates -# kernel updates, though infrequent, can mess up both the docker daemon and the nvidia drivers -# rather than doing unattended-updates, we should periodically update the AMI by simply rebuilding -# the image -sudo sed -i 's/APT::Periodic::Unattended-Upgrade "1";/APT::Periodic::Unattended-Upgrade "0";/g' /etc/apt/apt.conf.d/20auto-upgrades diff --git a/packer/update-anaconda.sh b/packer/update-anaconda.sh new file mode 100755 index 0000000..cc2b671 --- /dev/null +++ b/packer/update-anaconda.sh @@ -0,0 +1,57 @@ +#!/usr/bin/env bash + +# update anaconda +echo "Updating Anaconda" +conda update -y -n base -c defaults conda + +# remove the last lines of .profile, .dlamirc, and .zshrc which add anaconda/bin to the path, +# instead, we'll use "current" anaconda style of sourcing the profile +echo 'Converting to "modern" style conda' +sed -i '$ d' .profile +sed -i '$ d' .dlamirc +sed -i '$ d' .zshrc +echo ". /home/ubuntu/anaconda3/etc/profile.d/conda.sh" >> ~/.profile +echo "conda activate base" >> ~/.profile + +# change the MOTD to reflect the "current style", e.g. conda source +sudo sed -i 's/source activate/conda activate/g' /etc/update-motd.d/00-header +sudo sed -i 's/source activate/conda activate/g' /home/ubuntu/README + +# remove the python2 environments... +conda env remove -y --name amazonei_mxnet_p27 +conda env remove -y --name amazonei_tensorflow_p27 +conda env remove -y --name caffe2_p27 +conda env remove -y --name caffe_p27 +conda env remove -y --name chainer_p27 +conda env remove -y --name cntk_p27 +conda env remove -y --name mxnet_p27 +conda env remove -y --name python2 +conda env remove -y --name pytorch_p27 +conda env remove -y --name tensorflow_p27 +conda env remove -y --name theano_p27 + +rm -rf /home/ubuntu/anaconda2 + +sudo sed -i '/amazonei_mxnet_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/amazonei_tensorflow_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/caffe2_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/caffe_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/chainer_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/cntk_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/mxnet_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/python2/d' /etc/update-motd.d/00-header +sudo sed -i '/pytorch_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/tensorflow_p27/d' /etc/update-motd.d/00-header +sudo sed -i '/theano_p27/d' /etc/update-motd.d/00-header + +sudo sed -i '/amazonei_mxnet_p27/d' /home/ubuntu/README +sudo sed -i '/amazonei_tensorflow_p27/d' /home/ubuntu/README +sudo sed -i '/caffe2_p27/d' /home/ubuntu/README +sudo sed -i '/caffe_p27/d' /home/ubuntu/README +sudo sed -i '/chainer_p27/d' /home/ubuntu/README +sudo sed -i '/cntk_p27/d' /home/ubuntu/README +sudo sed -i '/mxnet_p27/d' /home/ubuntu/README +sudo sed -i '/python2/d' /home/ubuntu/README +sudo sed -i '/pytorch_p27/d' /home/ubuntu/README +sudo sed -i '/tensorflow_p27/d' /home/ubuntu/README +sudo sed -i '/theano_p27/d' /home/ubuntu/README diff --git a/packer/upgrade-docker.sh b/packer/upgrade-docker.sh new file mode 100755 index 0000000..43b22f8 --- /dev/null +++ b/packer/upgrade-docker.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +# install dependencies +sudo apt-get install -y \ + apt-transport-https \ + ca-certificates \ + curl \ + gnupg2 + software-properties-common + +# remove docker and nvidia-docker2 so we can reinstall them from appropriate source +sudo apt-get remove -y \ + docker-ce \ + nvidia-docker2 + +# now get docker and nvidia-docker +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - +distribution=$(. /etc/os-release;echo $ID$VERSION_ID) +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list +sudo apt-get update +sudo apt-get install -y \ + docker-ce \ + nvidia-docker2 + diff --git a/packer/upgrade.sh b/packer/upgrade.sh index 842e6e3..1cb9ccb 100755 --- a/packer/upgrade.sh +++ b/packer/upgrade.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash sudo apt-get update -sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade +sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade sudo reboot sleep 10 diff --git a/scripts/create-dock b/scripts/create-dock index db6075c..e178512 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -2,7 +2,7 @@ set -e # Default Values -INSTANCE_TYPE="m5.xlarge" +INSTANCE_TYPE="c5.xlarge" MONIKER= @@ -20,7 +20,6 @@ check_aws_connectivity() { } check_aws_connectivity -INSTANCE_NAME="${USERNAME}-dock" confirm_create() { if [ -z "$1" ]; then @@ -64,6 +63,7 @@ print_help() { echo -e "\n -i instance-type\n The default instance type is ${INSTANCE_TYPE} (no GPU).\n Other options include p2.xlarge (GPU), m5.2xlarge, etc." echo -e "\n -a ami-id\n (Optional) The ami to use for the instance." + echo -e "\n -s skip-regisration\n (Optional) Skip running register-dock." echo -e "\n -h help\n This help" echo echo "Examples" @@ -84,21 +84,25 @@ get_private_ip() { # Parse command line arguments in any order +hflag='' nflag='' # dock name flag iflag='' # instance type aflag='' # ami flag mflag='' # moniker flag -while getopts 'hn:i:a:m:' flag; do # if a character is followed by a colon, that argument is expected to have an argument. +regflag='true' +while getopts 'hn:i:a:m:s' flag; do # if a character is followed by a colon, that argument is expected to have an argument. case "${flag}" in h) hflag='true';; n) nflag='true'; INSTANCE_NAME="${OPTARG}" ;; i) iflag='true'; INSTANCE_TYPE="${OPTARG}" ;; a) aflag='true'; AMI_ID="${OPTARG}" ;; m) mflag='true'; MONIKER="${OPTARG}" ;; + s) regflag='';; *) error "Unexpected option ${flag}" ;; esac done +INSTANCE_NAME="${USERNAME}-${MONIKER:-dock}" # Help if [ -n "$hflag" ] || [ "$RESPONSE" == "h" ]; then @@ -106,7 +110,7 @@ if [ -n "$hflag" ] || [ "$RESPONSE" == "h" ]; then fi if [ -z "$AMI_ID" ]; then - AMI_ID=$(aws ec2 describe-images --filters "Name=tag:Name,Values=resero-labs-nvidia-docker" --query 'Images[*].{ID:ImageId}' --output text) + AMI_ID=$(aws ec2 describe-images --filters "Name=tag:Name,Values=resero-labs-dlami" --query 'Images[*].{ID:ImageId}' --output text) if [ -z "$AMI_ID" ]; then echo "Unable to fetch default AMI ID." exit 1 @@ -161,10 +165,17 @@ if [ ! -z "$INSTANCE_ID" ]; then IP_ADDRESS=$(get_private_ip $INSTANCE_ID) - if [ $(echo $IP_ADDRESS | grep -c -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') == 1 ]; then - echo 'Registering secure remote docker api' - register-dock ubuntu "$IP_ADDRESS" "$MONIKER" "$INSTANCE_ID" + if [ -n "$regflag" ]; then + if [ $(echo $IP_ADDRESS | grep -c -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') == 1 ]; then + echo 'Registering secure remote docker api' + register-dock ubuntu "$IP_ADDRESS" "$MONIKER" "$INSTANCE_ID" + fi + else + mkdir -p ~/.docker/${IP_ADDRESS} + printf "DOCK_USER=ubuntu\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$IP_ADDRESS\nDOCK_IP=$IP_ADDRESS\n" > $HOME/.docker/${IP_ADDRESS}/connection_config.txt fi + printf "DOCK_INSTANCE_ID=$INSTANCE_ID" >> $HOME/.docker/${IP_ADDRESS}/connection_config.txt && + echo "New EC2 instance available to dock at $IP_ADDRESS" echo "try these commands:" diff --git a/scripts/nb-dock b/scripts/nb-dock new file mode 100755 index 0000000..d40ffc7 --- /dev/null +++ b/scripts/nb-dock @@ -0,0 +1,33 @@ +#!/bin/bash + +if [[ -z "$DOCKER_IP" && -z "$1" ]]; then + echo "You must either be docked, or provide a argument specifying the 'moniker' of the dock you want to run your notebook against" + exit -1 +fi + +if [ -n "$1" ]; then + # Look up IP from moniker + FOUND_MONIKER=false + for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [ $DOCK_MONIKER = $1 ]; then + FOUND_MONIKER=true + break + fi + fi + done + + if [ $FOUND_MONIKER = false ]; then + echo "Can't find dock configuration for $1" + exit -1 + fi + DOCKER_IP=$DOCK_IP +fi + +SSH_OPTIONS="-o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +echo "Starting Jupyter on ${DOCKER_IP}" + +ssh ${SSH_OPTIONS} ubuntu@$DOCKER_IP 'bash -l -c "jupyter lab >~/jupyter.out 2>&1 &"' +sleep 10 +open "http://$DOCKER_IP:8888/lab" diff --git a/scripts/register-dock b/scripts/register-dock index dcf719a..fb82a89 100755 --- a/scripts/register-dock +++ b/scripts/register-dock @@ -19,7 +19,6 @@ USER=${1:-none} DOCK_HOSTNAME=${2:-none} IP=${2:-none} MONIKER=${3:-$2} -INSTANCE_ID=${3:-none} PORT=2377 SSH_OPTIONS="-o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" @@ -159,5 +158,5 @@ ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl start docker' && export DOCKER_TLS_VERIFY=1 && export DOCKER_CERT_PATH=~/.docker/$IP && export DOCKER_HOST=tcp://$IP:$PORT && -printf "DOCK_INSTANCE_ID=$INSTANCE_ID\nDOCK_USER=$USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$DOCK_HOSTNAME\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt && +printf "DOCK_USER=$USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$DOCK_HOSTNAME\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt && docker version diff --git a/scripts/run-image b/scripts/run-image index 6217d7e..4b37654 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -92,7 +92,7 @@ if __name__ == '__main__': args = parser.parse_args() if args.use_gpu: - gpu = '--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all -e CPU_GPU_ENV=/gpu-env' + gpu = '--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all' else: gpu = '' if args.pre: diff --git a/setup.py b/setup.py index 6e740e4..c64518e 100644 --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ 'scripts/dock-sync', 'scripts/genversion', 'scripts/ls-dock', + 'scripts/nb-dock', 'scripts/publish-image', 'scripts/register-dock', 'scripts/run-image', From f212e210f2a5e28e9746dcd7e43b3bc8173e4a2a Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Fri, 25 Jan 2019 16:06:06 -0700 Subject: [PATCH 33/62] fix python2 travis build error --- versioneer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/versioneer.py b/versioneer.py index 64fea1c..7f9abef 100644 --- a/versioneer.py +++ b/versioneer.py @@ -277,16 +277,16 @@ """ from __future__ import print_function -try: - import configparser -except ImportError: - import ConfigParser as configparser import errno import json import os import re import subprocess import sys +if sys.version_info < (3, 0): + import ConfigParser as configparser +else: + import configparser class VersioneerConfig: From 53da97d30310bea0e7e92b247d079669d9bffd6b Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Mon, 28 Jan 2019 08:36:51 -0700 Subject: [PATCH 34/62] Update image to fix bug that prevented conda activate from working inside a shell started by Jupyter --- docs/packer.md | 16 +++++++++------- packer/notes.dlami.md | 6 ++++-- packer/resero-labs-dl.packer | 4 ++-- packer/update-anaconda.sh | 7 ++----- scripts/nb-dock | 6 ++++-- 5 files changed, 21 insertions(+), 18 deletions(-) diff --git a/docs/packer.md b/docs/packer.md index dee7c4e..44c39dc 100644 --- a/docs/packer.md +++ b/docs/packer.md @@ -1,12 +1,14 @@ # To build new packer image -1. Update version in ami-name in `packer/resero-labs-nvidia-docker.packer` -2. Update nvidia driver versions in `packer/setup-v1.sh` -3. Run the following: +1. Update version in ami-name in `packer/resero-labs-dl.packer` +2. Run the following: ```bash $ cd packer - $ packer build resero-labs-nvidia-docker.packer + $ packer build resero-labs-dl.packer ``` -4. after testing, use the AWS console to change the image currently named "resero-labs-nvidia-docker" to -"resero-labs-nvidia-docker-", and change the name of the image just created, -"resero-labs-nvidia-docker-latest" to "resero-labs-nvidia-docker" \ No newline at end of file +3. after testing, use the AWS console to change the image currently named "resero-labs-dlami" to +"resero-labs-dlami-", and change the name of the image just created, +"resero-labs-dlami-latest" to "resero-labs-dlami" + +**Note**: the `b` in the AMI name indicates the AWS Deep Learning AMI Version number that was used +as a base for the AMI \ No newline at end of file diff --git a/packer/notes.dlami.md b/packer/notes.dlami.md index 869212c..92a2c2f 100644 --- a/packer/notes.dlami.md +++ b/packer/notes.dlami.md @@ -1,11 +1,11 @@ # Notes on using the AWS DL AMI as the base image -We are using verison 20.0 (ami-0d0ff0945ae093aea) currently, but have modified it (by hand) to create our base image +We are using verison 21.0 (ami-0b294f219d14e6a82) currently, but have modified it (by hand) to create our base image The modifications are as follows: 1) ssh into image 2) Wait for /var/log/unattended-upgrades/unattended-upgrades.log to emit a line -that looks like: `2019-01-24 17:52:10,320 INFO All upgrades installed` (shoudl be aobut 15 minutes) +that looks like: `2019-01-24 17:52:10,320 INFO All upgrades installed` (should be aobut 15 minutes) 3) reboot 4) run upgrade.sh script @@ -15,3 +15,5 @@ and use that as the base instead of building it into the packer script. Optionally, we may want to consider running the upgrade-docker script in this directory, but we are not currently doing so. +## Watch for updates +[Deep Learning AMI (Ubuntu)](https://aws.amazon.com/marketplace/pp/B077GCH38C) diff --git a/packer/resero-labs-dl.packer b/packer/resero-labs-dl.packer index f3e44cb..f3a66df 100644 --- a/packer/resero-labs-dl.packer +++ b/packer/resero-labs-dl.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-dlami-2019.01", + "ami_name": "resero-labs-dlami-2019.01-b21.0", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -22,7 +22,7 @@ "sg-b93e0dc2", "sg-1bd90461" ], - "source_ami": "ami-092c2df3c28ec89b3", + "source_ami": "ami-0815dee169f6ee034", "ssh_username": "ubuntu", "iam_instance_profile": "lanista-app", "subnet_id": "subnet-b8b440de", diff --git a/packer/update-anaconda.sh b/packer/update-anaconda.sh index cc2b671..3d88063 100755 --- a/packer/update-anaconda.sh +++ b/packer/update-anaconda.sh @@ -7,11 +7,8 @@ conda update -y -n base -c defaults conda # remove the last lines of .profile, .dlamirc, and .zshrc which add anaconda/bin to the path, # instead, we'll use "current" anaconda style of sourcing the profile echo 'Converting to "modern" style conda' -sed -i '$ d' .profile -sed -i '$ d' .dlamirc -sed -i '$ d' .zshrc -echo ". /home/ubuntu/anaconda3/etc/profile.d/conda.sh" >> ~/.profile -echo "conda activate base" >> ~/.profile +echo ". /home/ubuntu/anaconda3/etc/profile.d/conda.sh" >> ~/.bashrc +conda init # change the MOTD to reflect the "current style", e.g. conda source sudo sed -i 's/source activate/conda activate/g' /etc/update-motd.d/00-header diff --git a/scripts/nb-dock b/scripts/nb-dock index d40ffc7..0017188 100755 --- a/scripts/nb-dock +++ b/scripts/nb-dock @@ -28,6 +28,8 @@ fi SSH_OPTIONS="-o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" echo "Starting Jupyter on ${DOCKER_IP}" -ssh ${SSH_OPTIONS} ubuntu@$DOCKER_IP 'bash -l -c "jupyter lab >~/jupyter.out 2>&1 &"' -sleep 10 +ssh ${SSH_OPTIONS} ubuntu@$DOCKER_IP 'jupyter lab >~/jupyter.out 2>&1 &' + +echo "Waiting for Jupyter to start... (20s)" +sleep 20 open "http://$DOCKER_IP:8888/lab" From 41a041e182a18fafe2fac83cc868ca0748a10c01 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Tue, 29 Jan 2019 14:15:59 -0700 Subject: [PATCH 35/62] Bug Fix: Config reading messed up. Fix it. --- scripts/build-image | 26 +++++++++++--------------- scripts/dock-sync | 10 ++++++++-- scripts/run-image | 8 ++++---- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/scripts/build-image b/scripts/build-image index e4fb9a9..cbbc95c 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -20,24 +20,22 @@ def is_multistage(mode): return 'as builder' in open('docker/{mode}/Dockerfile'.format(mode=mode)).read() -def run_pre_script(script, config): +def run_pre_script(script): print('Running pre-build-script: "{script}"'.format(script=script)) return subprocess.call(shlex.split(script), cwd=os.getcwd()) -def run_post_script(script, config): +def run_post_script(script): print('Running post-build-script: "{script}"'.format(script=script)) return subprocess.call(shlex.split(script), cwd=os.getcwd()) -def build(image, image_name, image_tag, config=None, pull=False): - if config is None: - config = {} - pre_script = config.get('pre_build_script', None) - post_script = config.get('post_build_script', None) +def build(image, image_name, image_tag, config, pull=False): + pre_script = config.get(image, 'pre_build_script') if config.has_option(image, 'pre_build_script') else None + post_script = config.get(image, 'post_build_script') if config.has_option(image, 'post_build_script') else None if pre_script: - rc = run_pre_script(pre_script, config=config) + rc = run_pre_script(pre_script) if rc != 0: print('pre-build-script failed: {rc}'.format(rc=rc)) return rc @@ -63,7 +61,7 @@ def build(image, image_name, image_tag, config=None, pull=False): return rc if post_script: - rc = run_post_script(post_script, config=config) + rc = run_post_script(post_script) return rc @@ -96,7 +94,7 @@ if __name__ == '__main__': config_versioneer = ConfigParser() config_versioneer.optionxform = str config_versioneer.read('setup.cfg') - if 'versioneer' in config_versioneer: + if config.has_section('versioneer'): gen_version_file() with pip_conf(root_dir): @@ -134,13 +132,11 @@ if __name__ == '__main__': pull_FROM_on_force = False image_config = {} - if image in config.sections(): - image_config = config.options(image) - if 'pull_FROM_on_force' in image_config: - pull_FROM_on_force = config.get(image, 'pull_FROM_on_force') + if config.has_section(image) and config.has_option(image, 'pull_FROM_on_force'): + pull_FROM_on_force = config.get(image, 'pull_FROM_on_force') if os.path.isfile('docker/{image}/Dockerfile'.format(image=image)): - rc = fn(image, image_name, image_tag, config=image_config, + rc = fn(image, image_name, image_tag, config=config, pull=args.pull_base or (args.force_build_base and pull_FROM_on_force)) # because an image may not be present on the clean, ignore a non-zero return code if rc and not args.image == 'clean': diff --git a/scripts/dock-sync b/scripts/dock-sync index ad0e10b..adf30f6 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -7,6 +7,10 @@ # on sync down we need to use the `.-` filter syntax # also on sync down we don't want to use --delete and we add a third parameter that is an additional ignore file +GREEN='\033[0;32m' +RED='\033[0;31m' +NO_COLOR='\033[0m' + function do-sync-up() { echo "Syncing $1 to remote dock ($2)" if [ -e .dockerignore ]; then @@ -15,7 +19,8 @@ function do-sync-up() { local RESPONSE local DESTRUCTIVE="" - echo "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " + echo "" + echo -e "${GREEN}Use 'delete' option with rsync (destructive)? ${RED}enter${GREEN} to bypass, ${RED}y${GREEN} to use 'delete': ${NO_COLOR}" read RESPONSE if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi @@ -34,7 +39,8 @@ function do-sync-down() { local RESPONSE local DESTRUCTIVE="" - echo "Use 'delete' option with rsync (destructive)? Type enter to bypass, y to use 'delete': " + echo "" + echo -e "${GREEN}Use 'delete' option with rsync (destructive)? ${RED}enter${GREEN} to bypass, ${RED}y${GREEN} to use 'delete': ${NO_COLOR}" read RESPONSE if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi diff --git a/scripts/run-image b/scripts/run-image index 4b37654..08409d6 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -29,10 +29,10 @@ def fetch_env_variables(config, image, args_env=None): for arg_env in args_env: env_var = arg_env.split('=') env_vars[env_var[0]] = env_var[1] - if image in config and 'env' in config[image]: - env_section = config[image]['env'] - for evars in config[env_section]: - env_vars[evars] = config[env_section][evars] + if config.has_section(image) and config.has_option(image, 'env'): + env_section = config.get(image, 'env') + for evars in config.options(env_section): + env_vars[evars] = config.get(env_section, evars) return ' '.join(['-e {key}={value}'.format(key=key, value=value) for key, value in env_vars.items()]) From ff044fcc54e3325a1705c0202001d7dad3b6ae66 Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Wed, 30 Jan 2019 16:39:11 -0700 Subject: [PATCH 36/62] Added a few changes to support termination protection in docker-utils (#41) * Added a few changes to support termination protection in docker-utils * Added a few changes to get the termination protection deletion working --- scripts/create-dock | 5 +++-- scripts/destroy-dock | 22 +++++++++++++++++----- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/scripts/create-dock b/scripts/create-dock index e178512..6a9285e 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -150,6 +150,7 @@ INSTANCE_ID=$(aws ec2 run-instances \ --subnet-id ${subnet_id} \ --image-id $AMI_ID \ --instance-type "${INSTANCE_TYPE}" \ + --disable-api-termination \ --block-device-mappings "DeviceName='/dev/sda1',Ebs={VolumeSize=100,VolumeType='gp2'}" \ --tag-specifications "${tag_specifications}" \ --iam-instance-profile Name="${iam_instance_profile}" \ @@ -168,7 +169,7 @@ if [ ! -z "$INSTANCE_ID" ]; then if [ -n "$regflag" ]; then if [ $(echo $IP_ADDRESS | grep -c -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') == 1 ]; then echo 'Registering secure remote docker api' - register-dock ubuntu "$IP_ADDRESS" "$MONIKER" "$INSTANCE_ID" + register-dock -f ubuntu "$IP_ADDRESS" "$MONIKER" "$INSTANCE_ID" fi else mkdir -p ~/.docker/${IP_ADDRESS} @@ -185,4 +186,4 @@ if [ ! -z "$INSTANCE_ID" ]; then echo " $ ssh ubuntu@$IP_ADDRESS" else echo "Failed to create instance." -fi \ No newline at end of file +fi diff --git a/scripts/destroy-dock b/scripts/destroy-dock index 77475f5..e14dcb5 100755 --- a/scripts/destroy-dock +++ b/scripts/destroy-dock @@ -3,6 +3,7 @@ set -e # Default Values MONIKER=${1:-"$DOCKER_IP"} +shift GREEN='\033[0;32m' NO_COLOR='\033[0m' @@ -22,18 +23,23 @@ print_help() { echo "Description" echo " This script uses the aws cli to terminate an existing ec2 dock instance." echo " Either the dock ip or the moniker must be provided" + echo "" + echo " Options:" + echo " -f Disable termination protection and then terminate" echo echo "Usage" - echo " $ destroy-dock moniker|ip" + echo " $ destroy-dock moniker|ip <-f>" echo exit 0 } # Parse command line arguments in any order -while getopts 'h' flag; do # if a character is followed by a colon, that argument is expected to have an argument. +disable_terminate_protection=false +while getopts 'hf' flag; do # if a character is followed by a colon, that argument is expected to have an argument. case "${flag}" in h) hflag='true';; + f) disable_terminate_protection=true;; *) error "Unexpected option ${flag}" ;; esac done @@ -72,10 +78,16 @@ if [ "$RESPONSE" != "y" ] && [ "$RESPONSE" != "h" ]; then exit 0 fi +if [ -n "$INSTANCE_ID" ]; then + if ${disable_terminate_protection}; then + for I in $INSTANCE_ID; do + aws ec2 modify-instance-attribute --no-disable-api-termination --instance-id "$I" + done + fi + aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" --output text +fi + if [ -n "$f" ]; then rm -rf $f fi -if [ -n "$INSTANCE_ID" ]; then - aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" --output text -fi \ No newline at end of file From 579fbd16b375ad217c84bf6757254a9196ff1619 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Fri, 22 Feb 2019 10:57:36 -0700 Subject: [PATCH 37/62] closes #42 Remove prompt for destructive delete. Since we are echoing the rsync command line, it's easy to add the `--delete` on a copy and paste. --- scripts/dock-sync | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/dock-sync b/scripts/dock-sync index adf30f6..fe01cbf 100644 --- a/scripts/dock-sync +++ b/scripts/dock-sync @@ -19,10 +19,10 @@ function do-sync-up() { local RESPONSE local DESTRUCTIVE="" - echo "" - echo -e "${GREEN}Use 'delete' option with rsync (destructive)? ${RED}enter${GREEN} to bypass, ${RED}y${GREEN} to use 'delete': ${NO_COLOR}" - read RESPONSE - if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi +# echo "" +# echo -e "${GREEN}Use 'delete' option with rsync (destructive)? ${RED}enter${GREEN} to bypass, ${RED}y${GREEN} to use 'delete': ${NO_COLOR}" +# read RESPONSE +# if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi echo rsync -azq ${DESTRUCTIVE} ${HAS_DOCKERIGNORE:+--filter=":- .dockerignore"} -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 rsync -azq ${DESTRUCTIVE} \ @@ -39,10 +39,10 @@ function do-sync-down() { local RESPONSE local DESTRUCTIVE="" - echo "" - echo -e "${GREEN}Use 'delete' option with rsync (destructive)? ${RED}enter${GREEN} to bypass, ${RED}y${GREEN} to use 'delete': ${NO_COLOR}" - read RESPONSE - if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi +# echo "" +# echo -e "${GREEN}Use 'delete' option with rsync (destructive)? ${RED}enter${GREEN} to bypass, ${RED}y${GREEN} to use 'delete': ${NO_COLOR}" +# read RESPONSE +# if [ "$RESPONSE" = "y" ]; then DESTRUCTIVE="--delete"; fi echo rsync -azq ${DESTRUCTIVE} ${HAS_DOCKERIGNORE:+--filter=".- .dockerignore"} ${3:+--exclude=$3} -e "ssh -o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" $1 $2 rsync -azq ${DESTRUCTIVE} \ From fe8ae110eec89ad3714e0c34db568ff4157c75e4 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Thu, 21 Mar 2019 10:24:29 -0600 Subject: [PATCH 38/62] Add -g option to run-notebook script --- scripts/run-notebook | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/scripts/run-notebook b/scripts/run-notebook index 25ec9d6..5721678 100755 --- a/scripts/run-notebook +++ b/scripts/run-notebook @@ -7,6 +7,23 @@ DOCKER_ENV="" . dock-sync +# Parse command line arguments in any order +gflag='' +regflag='true' +while getopts 'g' flag; do # if a character is followed by a colon, that argument is expected to have an argument. + case "${flag}" in + g) gflag='-g';; + *) error "Unexpected option ${flag}" ;; + esac +done + +DOCKER_TAG='latest' +GPU_OPTS='' +if [ -n "$gflag" ]; then + DOCKER_TAG='gpu' + GPU_OPTS='--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all' +fi + if [ "$DOCKER_DS_DIFFS" != "no_diffs" ]; then DOCKER_ENV="-e DOCKER_DS_DIFFS=1" fi @@ -15,16 +32,16 @@ if [ -d "./docker/notebook" ]; then build-image -f notebook if [ -z "$DOCKER_HOST" ] then - run-image notebook + run-image $gflag notebook else sync-up - run-image notebook + run-image $gflag notebook sync-down fi else # Script to run rappdw/docker-ds notebok against the current directory if [ "$DOCKER_DS_DONT_PULL" = "pull" ]; then - docker pull rappdw/docker-ds:latest + docker pull rappdw/docker-ds:$DOCKER_TAG fi date_stamp=$(date "+%Y_%m_%d_%H.%M.%S") @@ -46,12 +63,12 @@ else volume_mounts="--mount type=bind,source=$(pwd),target=/home/jovyan/project" fi fi - echo 'docker run --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it '$volume_mounts' -p 8888:8888 rappdw/docker-ds:latest' - docker run --init --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it $volume_mounts -p 8888:8888 rappdw/docker-ds:latest + echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it '$volume_mounts' -p 8888:8888 rappdw/docker-ds:'$DOCKER_TAG + docker run $GPU_OPTS --init --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it $volume_mounts -p 8888:8888 rappdw/docker-ds:$DOCKER_TAG else sync-up - echo 'docker run --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source=/data/workspaces/'$USER'/code/'${PWD##*/}',target=/home/jovyan/project -v /data:/data -p 8888:8888 rappdw/docker-ds:latest' - docker run --init --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source="/data/workspaces/"$USER"/code/"${PWD##*/}",target=/home/jovyan/project" -v /data:/data -p 8888:8888 rappdw/docker-ds:latest + echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source=/data/workspaces/'$USER'/code/'${PWD##*/}',target=/home/jovyan/project -v /data:/data -p 8888:8888 rappdw/docker-ds:'$DOCKER_TAG + docker run --init $GPU_OPTS --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source="/data/workspaces/"$USER"/code/"${PWD##*/}",target=/home/jovyan/project" -v /data:/data -p 8888:8888 rappdw/docker-ds:$DOCKER_TAG sync-down fi From 08e0608e7e3a74bd23203e7d2007faf1b36de2ad Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Sat, 30 Mar 2019 09:32:26 -0600 Subject: [PATCH 39/62] Change candidate ami to 22.0 version of AWS DLAMI --- packer/notes.dlami.md | 2 +- packer/resero-labs-dl.packer | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packer/notes.dlami.md b/packer/notes.dlami.md index 92a2c2f..4310dfb 100644 --- a/packer/notes.dlami.md +++ b/packer/notes.dlami.md @@ -1,6 +1,6 @@ # Notes on using the AWS DL AMI as the base image -We are using verison 21.0 (ami-0b294f219d14e6a82) currently, but have modified it (by hand) to create our base image +We are using verison 22.0 (ami-01a4e5be5f289dd12) currently, but have modified it (by hand) to create our base image The modifications are as follows: 1) ssh into image diff --git a/packer/resero-labs-dl.packer b/packer/resero-labs-dl.packer index f3a66df..f504cff 100644 --- a/packer/resero-labs-dl.packer +++ b/packer/resero-labs-dl.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-dlami-2019.01-b21.0", + "ami_name": "resero-labs-dlami-2019.03-b22.0", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -22,12 +22,12 @@ "sg-b93e0dc2", "sg-1bd90461" ], - "source_ami": "ami-0815dee169f6ee034", + "source_ami": "ami-06d194a9639948a4c", "ssh_username": "ubuntu", "iam_instance_profile": "lanista-app", "subnet_id": "subnet-b8b440de", "tags": { - "Name": "resero-labs-dlami-latest" + "Name": "resero-labs-dlami-candidate" } } ], From 11970df1758e4d25499be5b072861e9bde26b095 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Sun, 31 Mar 2019 09:52:43 -0600 Subject: [PATCH 40/62] Handle different versions of docker starting with different -H options --- packer/configure-docker.sh | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/packer/configure-docker.sh b/packer/configure-docker.sh index f95ee0f..6201487 100755 --- a/packer/configure-docker.sh +++ b/packer/configure-docker.sh @@ -6,6 +6,20 @@ # all key/cert setup is in register-dock script sudo usermod -aG docker ubuntu sudo systemctl stop docker -sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service + +# remove the -H option from the docker service configuration +# +# some versions of docker have the line: +# ExecStart=/usr/bin/dockerd -H fd:// +# others have the line: +# ExecStart=/usr/bin/dockerd -H unix:// +# +if grep -q "/usr/bin/dockerd -H fd://" /lib/systemd/system/docker.service; then + sudo sed -i 's"dockerd\ -H\ fd://"dockerd"g' /lib/systemd/system/docker.service +fi +if grep -q "/usr/bin/dockerd -H unix://" /lib/systemd/system/docker.service; then + sudo sed -i 's"dockerd\ -H\ unix://"dockerd"g' /lib/systemd/system/docker.service +fi + sudo systemctl daemon-reload sudo systemctl start docker From 973b81577c71c52deab83252bfce540594a6c482 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Mon, 1 Apr 2019 12:11:15 -0600 Subject: [PATCH 41/62] update info on unattended-updates --- issues/45 - unattended-upgrades/notes.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 issues/45 - unattended-upgrades/notes.md diff --git a/issues/45 - unattended-upgrades/notes.md b/issues/45 - unattended-upgrades/notes.md new file mode 100644 index 0000000..2c7e872 --- /dev/null +++ b/issues/45 - unattended-upgrades/notes.md @@ -0,0 +1,23 @@ +# Issue description + +`unattended-upgrades` can cause issues with either Docker or NVidia drivers. +Symptoms include: + +* `NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running.` +* Docker deamon not starting + +It is believe these are caused when a system library or kernel update clobbers part of the +NVidia or docker libraries or configuration. + +# Work-around + +Turn off unattended-grades: + +1) edit `/etc/apt/apt.conf.d/20auto-upgrades` +2) change `APT::Periodic::Unattended-Upgrade "1";` +3) to `APT::Periodic::Unattended-Upgrade "0";` + +# Other possibilities + +Figure out which upgrades seem to impact nvidia and docker and then exclude +those in `/etc/apt/apt.conf.d/50unattended-upgrades` \ No newline at end of file From 6f5e036760b2ed95061334835f007a5e5af31ea2 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Wed, 3 Apr 2019 10:26:15 -0600 Subject: [PATCH 42/62] update reference to docker-ds --- README.md | 6 +++--- scripts/run-notebook | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 76d1f74..1611bb5 100644 --- a/README.md +++ b/README.md @@ -73,11 +73,11 @@ the docker file to the defined repository (AWS or Docker) ### Notebook cli `run-notebook` will start a docker container using either the notebook container found in the `docker/notebook` directory -if it exists, or [rappdw/docker-ds](https://github.com/rappdw/docker-ds) otherwise. The current directory will be mounted +if it exists, or [resero-labs/docker-ds](https://github.com/resero-labs/docker-ds) otherwise. The current directory will be mounted into the container for use in the Juypter notebook environment. There are a couple of environment variable to be aware of with this command: -* DOCKER_DS_DONT_PULL - if set, the version of rappdw/docker-ds currently available will be used rather than pulling +* DOCKER_DS_DONT_PULL - if set, the version of resero-labs/docker-ds currently available will be used rather than pulling the latest version from docker hub. * RESERO_JUPYTER_DIFFS - if set, on save, `.py` files and `.html` files for the notebook will be created in a `.diffs` subdirectory. @@ -141,7 +141,7 @@ name=dev ### Configuration-only Images If there is a docker container that does what you want already, you can create a configuration-only image by specifying `name`, `tag` and `prefix=False` in the configuration section for the image. For example the base notebook -image `rappdw/docker-ds` is often sufficient for running a Jupyter notebook against your code, as it auto detects a +image `resero-labs/docker-ds` is often sufficient for running a Jupyter notebook against your code, as it auto detects a `setup.py` upon container start and installs the module into the notebook environment. ### Image Tagging diff --git a/scripts/run-notebook b/scripts/run-notebook index 5721678..0eaaf8c 100755 --- a/scripts/run-notebook +++ b/scripts/run-notebook @@ -39,9 +39,9 @@ if [ -d "./docker/notebook" ]; then sync-down fi else - # Script to run rappdw/docker-ds notebok against the current directory + # Script to run resero-labs/docker-ds notebok against the current directory if [ "$DOCKER_DS_DONT_PULL" = "pull" ]; then - docker pull rappdw/docker-ds:$DOCKER_TAG + docker pull resero-labs/docker-ds:$DOCKER_TAG fi date_stamp=$(date "+%Y_%m_%d_%H.%M.%S") @@ -63,12 +63,12 @@ else volume_mounts="--mount type=bind,source=$(pwd),target=/home/jovyan/project" fi fi - echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it '$volume_mounts' -p 8888:8888 rappdw/docker-ds:'$DOCKER_TAG - docker run $GPU_OPTS --init --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it $volume_mounts -p 8888:8888 rappdw/docker-ds:$DOCKER_TAG + echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it '$volume_mounts' -p 8888:8888 resero-labs/docker-ds:'$DOCKER_TAG + docker run $GPU_OPTS --init --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it $volume_mounts -p 8888:8888 resero-labs/docker-ds:$DOCKER_TAG else sync-up - echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source=/data/workspaces/'$USER'/code/'${PWD##*/}',target=/home/jovyan/project -v /data:/data -p 8888:8888 rappdw/docker-ds:'$DOCKER_TAG - docker run --init $GPU_OPTS --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source="/data/workspaces/"$USER"/code/"${PWD##*/}",target=/home/jovyan/project" -v /data:/data -p 8888:8888 rappdw/docker-ds:$DOCKER_TAG + echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source=/data/workspaces/'$USER'/code/'${PWD##*/}',target=/home/jovyan/project -v /data:/data -p 8888:8888 resero-labs/docker-ds:'$DOCKER_TAG + docker run --init $GPU_OPTS --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source="/data/workspaces/"$USER"/code/"${PWD##*/}",target=/home/jovyan/project" -v /data:/data -p 8888:8888 resero-labs/docker-ds:$DOCKER_TAG sync-down fi From 34f4fe92a83b787f2077ca44c00c4531e5e9c07b Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Wed, 3 Apr 2019 17:02:13 -0600 Subject: [PATCH 43/62] fix version bug --- scripts/build-image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/build-image b/scripts/build-image index cbbc95c..4d7e145 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -94,7 +94,7 @@ if __name__ == '__main__': config_versioneer = ConfigParser() config_versioneer.optionxform = str config_versioneer.read('setup.cfg') - if config.has_section('versioneer'): + if config_versioneer.has_section('versioneer'): gen_version_file() with pip_conf(root_dir): From c1eaf546edf02d1b1c4e9063e32392ab20944b50 Mon Sep 17 00:00:00 2001 From: Dan Rapp Date: Tue, 16 Apr 2019 09:50:29 -0600 Subject: [PATCH 44/62] Update notes for issue #45 --- issues/45 - unattended-upgrades/notes.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/issues/45 - unattended-upgrades/notes.md b/issues/45 - unattended-upgrades/notes.md index 2c7e872..4db1b66 100644 --- a/issues/45 - unattended-upgrades/notes.md +++ b/issues/45 - unattended-upgrades/notes.md @@ -20,4 +20,20 @@ Turn off unattended-grades: # Other possibilities Figure out which upgrades seem to impact nvidia and docker and then exclude -those in `/etc/apt/apt.conf.d/50unattended-upgrades` \ No newline at end of file +those in `/etc/apt/apt.conf.d/50unattended-upgrades` + +# Examples of others with similar issues + +* [AWS G2 GPU vs. Unattended-upgrade](https://lodge.glasgownet.com/2017/03/21/aws-g2-gpu-vs-unattended-upgrade/comment-page-1/) +* [EC2 cannot reach RDS anymore](https://forums.aws.amazon.com/message.jspa?messageID=774087) (see post on 3/21/2017 @ 12:37pm) +* [Does not support the K520 GRID GPU as used on AWS G2 GPU instances](https://bugs.launchpad.net/ubuntu/+source/nvidia-graphics-drivers-375/+bug/1674666) +* [Tensorflow, CUDA, and CudNN on Ubuntu 16.04 with Titan X](https://aichamp.wordpress.com/category/nvidia/) (mentions disabling unattended-upgrades "so machine does not update the driver") +* [NVIDIA_SMI has failed because it couldn't communicate with the NVIDIA driver](https://devtalk.nvidia.com/default/topic/1000340/cuda-setup-and-installation/-quot-nvidia-smi-has-failed-because-it-couldn-t-communicate-with-the-nvidia-driver-quot-ubuntu-16-04/2) +``` +three times happened to me! +my system environment: Ubuntu 16+NVIDIA Driver 384.90 +describe: I am sure it could work before, but after some days(maybe 30 days or more), run command "nvidia-smi", it reminds me:"NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running." +reason:Ubuntu 16 update its kernel automatically! you can check the grub log file, or run command "cat /etc/apt/apt.conf.d/10periodic", you can see the last line:“Unattended-upgrade "1" ” +when the kernel updated, the nvidia driver couldnt work properly. +solution:downgrade the kernel, or select the lower version kernel, or delete the latest version kernel, or set "Unattended-upgrade" as 0, or reinstall the Nvidia driver . +``` \ No newline at end of file From 756d657bfe9d35eacf6a9458d94f7d504e053ca5 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Fri, 19 Apr 2019 11:38:31 -0600 Subject: [PATCH 45/62] Turns off unattended-upgrades (#46) * Fixes issue #45 Turns of unattended-upgrades Updates build instructions Remove obsolete ~/.aws/config * Fix typo in instructions * Fix typo in instructions --- packer/aws_config.cfg | 5 --- packer/disable-unattended-upgrades.sh | 3 ++ packer/notes.dlami.md | 64 ++++++++++++++++++++++----- packer/resero-labs-dl.packer | 16 +++---- 4 files changed, 61 insertions(+), 27 deletions(-) delete mode 100644 packer/aws_config.cfg create mode 100755 packer/disable-unattended-upgrades.sh diff --git a/packer/aws_config.cfg b/packer/aws_config.cfg deleted file mode 100644 index 66cc5cc..0000000 --- a/packer/aws_config.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[default] -region = us-west-2 - -[profile ds-notebook] -region = us-west-2 diff --git a/packer/disable-unattended-upgrades.sh b/packer/disable-unattended-upgrades.sh new file mode 100755 index 0000000..429b498 --- /dev/null +++ b/packer/disable-unattended-upgrades.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +sudo sed -i 's/APT::Periodic::Unattended-Upgrade "1";/APT::Periodic::Unattended-Upgrade "0";/' /etc/apt/apt.conf.d/20auto-upgrades diff --git a/packer/notes.dlami.md b/packer/notes.dlami.md index 4310dfb..4729214 100644 --- a/packer/notes.dlami.md +++ b/packer/notes.dlami.md @@ -1,19 +1,59 @@ -# Notes on using the AWS DL AMI as the base image +# How to Build/Deploy a Resero-labs dlami We are using verison 22.0 (ami-01a4e5be5f289dd12) currently, but have modified it (by hand) to create our base image -The modifications are as follows: -1) ssh into image -2) Wait for /var/log/unattended-upgrades/unattended-upgrades.log to emit a line +1) create instance of DLAMI. (Watch for updates [Deep Learning AMI (Ubuntu)](https://aws.amazon.com/marketplace/pp/B077GCH38C).) + + `create-dock -a ami-01a4e5be5f289dd12 -s -m base-ami -i p3.2xlarge` +2) ssh into image. + + `ssh-dock base-ami` +3) Wait for /var/log/unattended-upgrades/unattended-upgrades.log to emit a line. that looks like: `2019-01-24 17:52:10,320 INFO All upgrades installed` (should be aobut 15 minutes) -3) reboot -4) run upgrade.sh script -Because this takes such a long time, go ahead and create an image from this instance -and use that as the base instead of building it into the packer script. +4) reboot + + ```bash + stop-dock base-ami + start-dock base-ami + ``` +5) run upgrade.sh script + + ```bash + scp upgrade.sh :~/ + ssh-dock base-ami + ./upgrade.sh + ``` +6) reboot and ensure that nvidia drivers still good + + `nvidia-smi` +7) Create an image from this instance + + `aws ec2 create-image --instance-id i-0238495638422d858 --name resero-labs-dlami-base-22.0-2019.04` + + (Instance id can be found in the console or in `~/.docker//connection_config.txt`. + AMI name should follow the convention `resero-labs-dlami-base--`) +8) Terminate the base-ami dock + + `destroy-dock base-ami -f` +9) Use the ami from previous step as the base ami in the packer script + + ```bash + edit resero-labs-dl.packer and update both the "name" and "source_ami" values appropriately + packer build resero-labs-dl.packer + ``` + +10) Validate the new ami + + ```bash + create-dock -a -m test-ami -i p3.2xlarge + source dock test-ami + docker images + ssh-dock + nvidia-smi + castoff + destroy-dock test-ami -f + ``` -Optionally, we may want to consider running the upgrade-docker script in this -directory, but we are not currently doing so. +11) Switch names of previous resero-labs-dlami with the one just created -## Watch for updates -[Deep Learning AMI (Ubuntu)](https://aws.amazon.com/marketplace/pp/B077GCH38C) diff --git a/packer/resero-labs-dl.packer b/packer/resero-labs-dl.packer index f504cff..90c4f88 100644 --- a/packer/resero-labs-dl.packer +++ b/packer/resero-labs-dl.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-dlami-2019.03-b22.0", + "ami_name": "resero-labs-dlami-2019.03-b22.0-2019.04", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -22,7 +22,7 @@ "sg-b93e0dc2", "sg-1bd90461" ], - "source_ami": "ami-06d194a9639948a4c", + "source_ami": "ami-0ba505ea799c46918", "ssh_username": "ubuntu", "iam_instance_profile": "lanista-app", "subnet_id": "subnet-b8b440de", @@ -53,16 +53,10 @@ "source": "jupyter_notebook_config.py", "destination": "/home/ubuntu/.jupyter/jupyter_notebook_config.py" }, - { - "type": "shell", - "inline": [ - "mkdir /home/ubuntu/.aws" - ] - }, { "type": "file", - "source": "aws_config.cfg", - "destination": "/home/ubuntu/.aws/config" + "source": "disable-unattended-upgrades.sh", + "destination": "/home/ubuntu/disable-unattended-upgrades.sh" }, { "type": "shell", @@ -70,9 +64,11 @@ "/home/ubuntu/configure-docker.sh", "/home/ubuntu/update-anaconda.sh", "/home/ubuntu/configure-jupyter.sh", + "/home/ubuntu/disable-unattended-upgrades.sh", "rm /home/ubuntu/configure-docker.sh", "rm /home/ubuntu/update-anaconda.sh", "rm /home/ubuntu/configure-jupyter.sh", + "rm /home/ubuntu/disable-unattended-upgrades.sh", "sudo mkdir -p /data/workspaces", "sudo chown -R ubuntu /data" ] From 8887688b1884e676767e994fb9e5c3015f5f1d38 Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Fri, 19 Apr 2019 11:39:11 -0600 Subject: [PATCH 46/62] Added changes to support AWS connectivity using environment variables (#47) * Added changes to support AWS connectivity using environment variables * Adjusted the run notebook to correctly expand the aws variables when being run --- scripts/run-image | 28 ++++++++++++++++++++++++++-- scripts/run-notebook | 24 +++++++++++++++++------- 2 files changed, 43 insertions(+), 9 deletions(-) diff --git a/scripts/run-image b/scripts/run-image index 08409d6..f9c9cbd 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -11,6 +11,8 @@ import os import shlex import subprocess +from string import Template + from dockerutils import * if sys.version_info < (3, 0): from ConfigParser import ConfigParser @@ -36,6 +38,24 @@ def fetch_env_variables(config, image, args_env=None): return ' '.join(['-e {key}={value}'.format(key=key, value=value) for key, value in env_vars.items()]) +def populate_aws_env_variables(): + AWS_ENVS = [ + 'AWS_ACCESS_KEY_ID', + 'AWS_SECRET_ACCESS_KEY', + 'AWS_DEFAULT_REGION', + 'AWS_REGION', + 'AWS_SESSION_TOKEN', + 'AWS_SECURITY_TOKEN' + ] + + variables = {} + for test_env in AWS_ENVS: + if os.getenv(test_env): + variables[test_env] = "${" + test_env + "}" + + return ' '.join(['-e {key}={value}'.format(key=k, value=v) for k,v in variables.items()]) + + def run(mode, image_name, image_tag, **kwargs): user = getpass.getuser() volumes = kwargs['volumes'].format( @@ -64,7 +84,11 @@ def run(mode, image_name, image_tag, **kwargs): init=kwargs['init']) print('\n\n============================================================================') print('{cmd}\n\n'.format(cmd=cmd)) - return subprocess.call(shlex.split(cmd), cwd=os.getcwd()) + + # Since we are using secure env values I don't want those to print in the above commmand, but + # they need to be expanded for the subprocess.call + expanded_cmd = Template(cmd).substitute(os.environ) + return subprocess.call(shlex.split(expanded_cmd), cwd=os.getcwd()) if __name__ == '__main__': @@ -101,7 +125,7 @@ if __name__ == '__main__': init = '--init' run_config = { - 'environment': fetch_env_variables(config, args.image, args.env), + 'environment': fetch_env_variables(config, args.image, args.env) + populate_aws_env_variables(), 'keep_container': args.keep or '--rm', 'interactive': '-d' if args.keep else '-it', 'gpu': gpu, diff --git a/scripts/run-notebook b/scripts/run-notebook index 0eaaf8c..c006cfa 100755 --- a/scripts/run-notebook +++ b/scripts/run-notebook @@ -4,6 +4,7 @@ DOCKER_DS_DONT_PULL=${DOCKER_DS_DONT_PULL:-pull} DOCKER_DS_DIFFS=${DOCKER_DS_DIFFS:-no_diffs} DOCKER_ENV="" +NOTEBOOK_IMAGE=resero/docker-ds . dock-sync @@ -28,6 +29,14 @@ if [ "$DOCKER_DS_DIFFS" != "no_diffs" ]; then DOCKER_ENV="-e DOCKER_DS_DIFFS=1" fi +AWS_ENV_NAMES=('AWS_ACCESS_KEY_ID' 'AWS_SECRET_ACCESS_KEY' 'AWS_DEFAULT_REGION' 'AWS_REGION' 'AWS_SESSION_TOKEN' 'AWS_SECURITY_TOKEN') +AWS_ENV='' +for aws_env in ${AWS_ENV_NAMES[@]}; do + if [ ! -z "${!aws_env}" ]; then + AWS_ENV+=" -e $aws_env=\${$aws_env}" + fi +done + if [ -d "./docker/notebook" ]; then build-image -f notebook if [ -z "$DOCKER_HOST" ] @@ -41,7 +50,7 @@ if [ -d "./docker/notebook" ]; then else # Script to run resero-labs/docker-ds notebok against the current directory if [ "$DOCKER_DS_DONT_PULL" = "pull" ]; then - docker pull resero-labs/docker-ds:$DOCKER_TAG + docker pull $NOTEBOOK_IMAGE:$DOCKER_TAG fi date_stamp=$(date "+%Y_%m_%d_%H.%M.%S") @@ -63,13 +72,14 @@ else volume_mounts="--mount type=bind,source=$(pwd),target=/home/jovyan/project" fi fi - echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it '$volume_mounts' -p 8888:8888 resero-labs/docker-ds:'$DOCKER_TAG - docker run $GPU_OPTS --init --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it $volume_mounts -p 8888:8888 resero-labs/docker-ds:$DOCKER_TAG + cmd='docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab '$AWS_ENV' --rm -it '$volume_mounts' -p 8888:8888 '$NOTEBOOK_IMAGE':'$DOCKER_TAG + echo $cmd + eval $cmd else sync-up - echo 'docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source=/data/workspaces/'$USER'/code/'${PWD##*/}',target=/home/jovyan/project -v /data:/data -p 8888:8888 resero-labs/docker-ds:'$DOCKER_TAG - docker run --init $GPU_OPTS --name $USER"_notebook_"$date_stamp $DOCKER_ENV -e NOTEBOOK_MODE=lab --rm -it --mount type=bind,source="/data/workspaces/"$USER"/code/"${PWD##*/}",target=/home/jovyan/project" -v /data:/data -p 8888:8888 resero-labs/docker-ds:$DOCKER_TAG + cmd='docker run '$GPU_OPTS' --init --name '$USER'_notebook_'$date_stamp' '$DOCKER_ENV' -e NOTEBOOK_MODE=lab '$AWS_ENV' --rm -it --mount type=bind,source=/data/workspaces/'$USER'/code/'${PWD##*/}',target=/home/jovyan/project -v /data:/data -p 8888:8888 '$NOTEBOOK_IMAGE':'$DOCKER_TAG + echo $cmd + eval $cmd sync-down fi - -fi \ No newline at end of file +fi From 1b252f1ec32a313ab5ec3db4ad84476701c49080 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Fri, 31 May 2019 09:39:08 -0600 Subject: [PATCH 47/62] Dwr/arg support (#49) * add general arg support * fixes #48 - Add build-args support * Address issue caught in PR review --- scripts/build-image | 10 ++++++---- scripts/run-image | 13 +++++-------- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/scripts/build-image b/scripts/build-image index 4d7e145..ccb7a1d 100755 --- a/scripts/build-image +++ b/scripts/build-image @@ -42,6 +42,8 @@ def build(image, image_name, image_tag, config, pull=False): rc = 0 pull_base = '' + args = config.get(image, 'build_args') if config.has_option(image, 'build_args') else '' + if pull: pull_base = '--pull' if is_multistage(image): @@ -49,12 +51,12 @@ def build(image, image_name, image_tag, config, pull=False): # otherwise, a prune will remove the layers used during the builder phase and subsequent # builds will take longer than required rc = image_operation( - 'docker build {pull_base} --compress -t {image_name}-builder:{image_tag} -f docker/{image}/Dockerfile --target builder .' - .format(pull_base=pull_base, image_name=image_name, image=image, image_tag=image_tag)) + 'docker build {pull_base} --compress -t {image_name}-builder:{image_tag} -f docker/{image}/Dockerfile --target builder {args} .' + .format(pull_base=pull_base, image_name=image_name, image=image, image_tag=image_tag, args=args)) if not rc: rc = image_operation( - 'docker build {pull_base} --compress -t {image_name}:{image_tag} -f docker/{image}/Dockerfile .' - .format(pull_base=pull_base, image_name=image_name, image=image, image_tag=image_tag)) + 'docker build {pull_base} --compress -t {image_name}:{image_tag} -f docker/{image}/Dockerfile {args} .' + .format(pull_base=pull_base, image_name=image_name, image=image, image_tag=image_tag, args=args)) if rc != 0: print('docker build failed: {rc}'.format(rc=rc)) diff --git a/scripts/run-image b/scripts/run-image index f9c9cbd..5c44a4b 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -21,7 +21,7 @@ else: _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ - '{volumes} {ports} {image_name}:{image_tag} {cmd}' + '{volumes} {ports} {args} {image_name}:{image_tag} {cmd}' def fetch_env_variables(config, image, args_env=None): @@ -78,6 +78,7 @@ def run(mode, image_name, image_tag, **kwargs): environment=kwargs['environment'], network=kwargs['network'], ports=kwargs['ports'], + args=kwargs['args'], volumes=volumes, gpu=kwargs['gpu'], cmd=kwargs['cmd'], @@ -104,13 +105,12 @@ if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("image", choices=image_types, help="Docker image to run") + parser.add_argument("-a", "--args", help="general docker arguments", default='') parser.add_argument("-k", "--keep", help="keep the image after execution", action='store_true') parser.add_argument("-c", "--command", help="Command for image override") parser.add_argument("-n", "--network", help="Network for image override", default='') parser.add_argument("-g", "--use-gpu", dest='use_gpu', default=False, action='store_true', help="Start the container with gpu support") - parser.add_argument("-p", "--pre", default=False, action='store_true', - help="use pre 1.25 API") parser.add_argument("-e", "--env", action='append', help="environment variables to pass to running container, e.g. foo=bar") args = parser.parse_args() @@ -119,10 +119,6 @@ if __name__ == '__main__': gpu = '--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all' else: gpu = '' - if args.pre: - init = '' - else: - init = '--init' run_config = { 'environment': fetch_env_variables(config, args.image, args.env) + populate_aws_env_variables(), @@ -133,7 +129,8 @@ if __name__ == '__main__': 'volumes': '', 'ports': '', 'cmd': args.command or '', - 'init': init + 'init': '--init', + 'args': args.args } is_docked = bool(os.environ.get('DOCKER_IP')) From 6e9deb5425a2b0806b72d590fdd8075e41ca0ec7 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Wed, 19 Jun 2019 10:07:21 -0600 Subject: [PATCH 48/62] Add docker-machine notes --- issues/50 - docker-machine/notes.md | 69 +++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 issues/50 - docker-machine/notes.md diff --git a/issues/50 - docker-machine/notes.md b/issues/50 - docker-machine/notes.md new file mode 100644 index 0000000..8e8ab44 --- /dev/null +++ b/issues/50 - docker-machine/notes.md @@ -0,0 +1,69 @@ +# Consider replacing some functionality with `docker-machine` + +The following are steps I used to get `docker-machine` to work for the `create-dock` use case: + +1) create a security group named `docker-machine` + + Create inbound access for port 22 and 2376 (docker daemon) with + appropriate cidr blocks (in our case: 198.60.24.141/32, 10.93.0.0/16 and 208.86.202.9/32) + + Do this because `docker-machine` opens up ssh and docker daemon port to the world by default + and we don't want to do this. + +2) make sure your `~/.ssh/config` file isn't setting conflicting information + + By default we'll let `docker-machine` generate a keypair and use that, so don't + specify an IdentityFile for the ip block that you'll be using with `docker-machine` + +3) try `docker-machine create` + + Here is the command line I used: `aws-vault exec resero -- docker-machine create --driver amazonec2 --amazonec2-region us-west-2 --amazonec2-subnet-id subnet-6c555b25 --amazonec2-vpc-id vpc-6298c405 --amazonec2-zone b --amazonec2-security-group-readonly --amazonec2-use-private-address dm-test` + + Along with the reasons for the various options: + + * `--amazonec2-subnet-id subnet-6c555b25` - because default subnet is out of ips + * `--amazonec2-vpc-id vpc-6298c405` - because if you specify a subnet you need to also specify the vpc + * `--amazonec2-zone b` - also required with subnet and vpc + * `--amazonec2-security-group-readonly` - (see step 1) this prevents `docker-machine` from opening 22 and 2376 to the world + * `--amazonec2-use-private-address` - otherwise `docker-machine` will attempt to use the public IP for the ec2 instance, and that doesn't work with our network config + +## Things to be aware of + +### AWS-Vault +Because AWS credentials are in env variables when running the above, those credentials are +captured in the `~/.docker/machine/machines/dm-test/config.json` file. Because the `aws-vault` credentials +expire, `docker-machine` will stop working after credential expiration. + +As a work around, you can remove the applicable lines from the `config.json` and it will work + +### ssh access to `dockerd` + +Recently, `dockerd` has been exposed via ssh ([docker/cli Issue 1014](https://github.com/docker/cli/pull/1014) and [Docker Tips: Access the DOcker Daemon via SSH](https://medium.com/better-programming/docker-tips-access-the-docker-daemon-via-ssh-97cd6b44a53)). + +There is something in how `docker-machine` configures `dockerd` that prevents this from working. + +First, modify `~/.ssh/config` to include: + +``` +Host 10.93.128.135 + User ubuntu + UserKnownHostsFile=/dev/null + StrictHostKeyChecking no + IdentityFile /Users/drapp/.docker/machine/machines/dm-test/id_rsa +``` + +Then run: + +`docker -H ssh://10.93.128.135 images` + +This results in: + +`Cannot connect to the Docker daemon at http://docker. Is the docker daemon running?` + +``` +docker -H ssh://10.93.128.97 images -- 0.03s user 0.02s system 2% cpu 2.261 total +docker -H tcp://10.93.128.97:2377 images -- 0.04s user 0.02s system 7% cpu 0.813 total + +docker -H ssh://10.93.128.97 run -ti alpine echo “hello” -- 0.05s user 0.04s system 1% cpu 7.095 total +docker -H tcp://10.93.128.97:2377 run -ti alpine echo “hello” -- 0.05s user 0.10s system 4% cpu 2.959 total +``` \ No newline at end of file From e4275c9fbfd0493f012739667f06edfe92424a70 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Wed, 19 Jun 2019 10:36:13 -0600 Subject: [PATCH 49/62] Add docker-machine notes --- issues/50 - docker-machine/notes.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/issues/50 - docker-machine/notes.md b/issues/50 - docker-machine/notes.md index 8e8ab44..78e243d 100644 --- a/issues/50 - docker-machine/notes.md +++ b/issues/50 - docker-machine/notes.md @@ -60,10 +60,16 @@ This results in: `Cannot connect to the Docker daemon at http://docker. Is the docker daemon running?` +**Resolved**: Turns out this was due to the `ubuntu` user not being in the `docker` group a simple: `sudo usermod -aG docker ubuntu` fixes this. + +### Timings of ssh ``` docker -H ssh://10.93.128.97 images -- 0.03s user 0.02s system 2% cpu 2.261 total docker -H tcp://10.93.128.97:2377 images -- 0.04s user 0.02s system 7% cpu 0.813 total docker -H ssh://10.93.128.97 run -ti alpine echo “hello” -- 0.05s user 0.04s system 1% cpu 7.095 total docker -H tcp://10.93.128.97:2377 run -ti alpine echo “hello” -- 0.05s user 0.10s system 4% cpu 2.959 total + +docker -H ssh://10.93.128.97 run -ti --rm alpine sleep 30 -- 0.06s user 0.04s system 0% cpu 38.239 total +docker -H tcp://10.93.128.97:2377 run -ti --rm alpine sleep 30 -- 0.05s user 0.03s system 0% cpu 32.623 total ``` \ No newline at end of file From a8742e74e574831ee74d80e1384f4a328036ec3f Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Wed, 19 Jun 2019 11:46:13 -0600 Subject: [PATCH 50/62] update to latest AWS DeepLearning AMI --- packer/notes.dlami.md | 13 +++++++++---- packer/resero-labs-dl.packer | 6 +++--- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/packer/notes.dlami.md b/packer/notes.dlami.md index 4729214..6b213ef 100644 --- a/packer/notes.dlami.md +++ b/packer/notes.dlami.md @@ -1,10 +1,10 @@ # How to Build/Deploy a Resero-labs dlami -We are using verison 22.0 (ami-01a4e5be5f289dd12) currently, but have modified it (by hand) to create our base image +We are using verison 23.0 (ami-058f26d848e91a4e8) currently, but have modified it (by hand) to create our base image 1) create instance of DLAMI. (Watch for updates [Deep Learning AMI (Ubuntu)](https://aws.amazon.com/marketplace/pp/B077GCH38C).) - `create-dock -a ami-01a4e5be5f289dd12 -s -m base-ami -i p3.2xlarge` + `create-dock -a ami-058f26d848e91a4e8 -s -m base-ami -i p3.2xlarge` 2) ssh into image. `ssh-dock base-ami` @@ -26,10 +26,15 @@ that looks like: `2019-01-24 17:52:10,320 INFO All upgrades installed` (should b ``` 6) reboot and ensure that nvidia drivers still good - `nvidia-smi` + ```bash + stop-dock base-ami + start-dock base-ami + ssh-dock base-ami + nvidia-smi + ``` 7) Create an image from this instance - `aws ec2 create-image --instance-id i-0238495638422d858 --name resero-labs-dlami-base-22.0-2019.04` + `aws ec2 create-image --instance-id i-0696b5c8f549f88f2 --name resero-labs-dlami-base-23.0-2019.06` (Instance id can be found in the console or in `~/.docker//connection_config.txt`. AMI name should follow the convention `resero-labs-dlami-base--`) diff --git a/packer/resero-labs-dl.packer b/packer/resero-labs-dl.packer index 90c4f88..47c7469 100644 --- a/packer/resero-labs-dl.packer +++ b/packer/resero-labs-dl.packer @@ -1,7 +1,7 @@ { "builders": [ { - "ami_name": "resero-labs-dlami-2019.03-b22.0-2019.04", + "ami_name": "resero-labs-dlami-2019.06-b23.0-2019.06", "type": "amazon-ebs", "force_deregister": "true", "instance_type": "p3.2xlarge", @@ -22,10 +22,10 @@ "sg-b93e0dc2", "sg-1bd90461" ], - "source_ami": "ami-0ba505ea799c46918", + "source_ami": "ami-01adff0083ebe61e6", "ssh_username": "ubuntu", "iam_instance_profile": "lanista-app", - "subnet_id": "subnet-b8b440de", + "subnet_id": "subnet-6c555b25", "tags": { "Name": "resero-labs-dlami-candidate" } From b0dd0bf69855ddc48f233b2f265203fa6f5648cb Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Fri, 2 Aug 2019 14:37:33 -0600 Subject: [PATCH 51/62] A few changes to allow docker-utils to support ssh docker (#53) --- .gitignore | 1 + scripts/create-dock | 187 +--------------------------------------- scripts/destroy-dock | 91 +------------------ scripts/dock | 13 +-- scripts/register-dock | 164 ++++++----------------------------- scripts/ssh-dock | 3 - scripts/unregister-dock | 43 +++++++++ 7 files changed, 75 insertions(+), 427 deletions(-) create mode 100755 scripts/unregister-dock diff --git a/.gitignore b/.gitignore index 19ef7ae..7052383 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ activate .coverage .pytest_cache .venv +.vscode diff --git a/scripts/create-dock b/scripts/create-dock index 6a9285e..3879c04 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -1,189 +1,4 @@ #!/usr/bin/env bash set -e -# Default Values -INSTANCE_TYPE="c5.xlarge" - -MONIKER= - -GREEN='\033[0;32m' -RED='\033[0;31m' -NO_COLOR='\033[0m' - -check_aws_connectivity() { - echo "Checking aws connectivity..." - USERNAME=$(aws iam get-user --query "User.UserName" --output text) - if [ "$USERNAME" == None ]; then - echo "Failed to get valid aws username." - exit 1 - fi -} - -check_aws_connectivity - -confirm_create() { - if [ -z "$1" ]; then - echo -e "Create dock with the following values?" - echo -e "Instance type: ${GREEN}${INSTANCE_TYPE}${NO_COLOR}" - echo -e "Name: ${GREEN}${INSTANCE_NAME}${NO_COLOR}" - echo -e "AMI ID: ${GREEN}${AMI_ID}${NO_COLOR}" - read -e -p "Type enter to Cancel, h for Help, y to Create: " RESPONSE - fi - - if [ "$RESPONSE" == "h" ]; then print_help; fi -} - -print_help() { - echo "Create dock - Help" - echo - echo "Description" - echo " This script uses the aws cli to create a new ec2 dock instance from the latest AMI." - echo " If no options are passed into the script, it will prompt with defaults." - echo " The register-dock script is run automatically after the instance is ready." - echo - echo " A number of values used in creating the ec2 instance (security-group-ids, etc.) can be over-ridden" - echo " by specifying a ~/.docker/dock.cfg file with the following values:" - echo - echo ' key_name="..."' - echo ' security_group_ids="..."' - echo ' iam_instance_profile="..."' - echo ' subnet_id="..."' - echo ' tag_specifications="..."' - echo - echo "Usage" - echo " $ create-dock [options]" - echo - echo "Options" - echo -e "\n -n dock-name\n Specify a name for this dock with this format: my-dock-name" - echo -e " If dock-name is not specified, the name defaults to ${GREEN}${INSTANCE_NAME}${NO_COLOR}" - echo -e "\n -m moniker\n moniker will be added to your cli prompt to indicate that you are docked. (Default: instance IP address)" - echo - echo " Example CLI prompt with python virtual environment enabled and docked to remote worker:" - echo -e " $ (venv-name) prompt$ [dock:moniker] " - - echo -e "\n -i instance-type\n The default instance type is ${INSTANCE_TYPE} (no GPU).\n Other options include p2.xlarge (GPU), m5.2xlarge, etc." - echo -e "\n -a ami-id\n (Optional) The ami to use for the instance." - echo -e "\n -s skip-regisration\n (Optional) Skip running register-dock." - echo -e "\n -h help\n This help" - echo - echo "Examples" - echo - echo -e " $ create-dock\n Create default instance type ($INSTANCE_TYPE) named ${INSTANCE_NAME}.\n CLI prompt moniker when docked will be [dock:IP address]." - echo - echo -e " $ create-dock -i t2.micro -m my-dock\n Create t2.micro instance type named ${INSTANCE_NAME}.\n CLI prompt moniker when docked will be [dock:my-dock]." - - - exit 0 -} - -get_private_ip() { - aws ec2 describe-instances \ - --filters Name=instance-id,Values="$1" \ - --query 'Reservations[*].Instances[*].PrivateIpAddress' --output text -} - - -# Parse command line arguments in any order -hflag='' -nflag='' # dock name flag -iflag='' # instance type -aflag='' # ami flag -mflag='' # moniker flag -regflag='true' -while getopts 'hn:i:a:m:s' flag; do # if a character is followed by a colon, that argument is expected to have an argument. - case "${flag}" in - h) hflag='true';; - n) nflag='true'; INSTANCE_NAME="${OPTARG}" ;; - i) iflag='true'; INSTANCE_TYPE="${OPTARG}" ;; - a) aflag='true'; AMI_ID="${OPTARG}" ;; - m) mflag='true'; MONIKER="${OPTARG}" ;; - s) regflag='';; - *) error "Unexpected option ${flag}" ;; - esac -done - -INSTANCE_NAME="${USERNAME}-${MONIKER:-dock}" - -# Help -if [ -n "$hflag" ] || [ "$RESPONSE" == "h" ]; then - print_help -fi - -if [ -z "$AMI_ID" ]; then - AMI_ID=$(aws ec2 describe-images --filters "Name=tag:Name,Values=resero-labs-dlami" --query 'Images[*].{ID:ImageId}' --output text) - if [ -z "$AMI_ID" ]; then - echo "Unable to fetch default AMI ID." - exit 1 - fi -fi - -# Confirmation -confirm_create -if [ "$RESPONSE" != "y" ] && [ "$RESPONSE" != "h" ]; then - echo "Canceled" - exit 0 -fi - - -# Create dock -if [ -e "$HOME/.docker/dock.cfg" ]; then - source "$HOME/.docker/dock.cfg" -else - key_name="resero-staging" - security_group_ids="sg-213eb35a sg-3bde0341 sg-b93e0dc2 sg-1bd90461" - iam_instance_profile="lanista-app" - subnet_id="subnet-b8b440de" - tag_specifications="ResourceType=instance,Tags=[{Key=Name,Value=${INSTANCE_NAME}},\ - {Key=business_unit,Value='Archiving & Governance'},{Key=component,Value='ec2 instance'},\ - {Key=product,Value='Resero Development'},{Key=support_level,Value=dev},\ - {Key=created_by,Value=${USERNAME}}]" -fi -if [ ! -f "$HOME/.ssh/$key_name" ]; then - echo - echo - echo -e "${RED}It appears the the key required to access the EC2 instance doesn't exist ($HOME/.ssh/$key_name).${NO_COLOR}" - echo - echo "Please ensure that the correct ssh-key is configured in ~/.docker/dock.cfg" - exit 1 -fi -INSTANCE_ID=$(aws ec2 run-instances \ - --subnet-id ${subnet_id} \ - --image-id $AMI_ID \ - --instance-type "${INSTANCE_TYPE}" \ - --disable-api-termination \ - --block-device-mappings "DeviceName='/dev/sda1',Ebs={VolumeSize=100,VolumeType='gp2'}" \ - --tag-specifications "${tag_specifications}" \ - --iam-instance-profile Name="${iam_instance_profile}" \ - --key-name "${key_name}" \ - --security-group-ids ${security_group_ids} \ - | grep InstanceId | awk -F '"' '{print $4}' \ - ) - -if [ ! -z "$INSTANCE_ID" ]; then - echo "Creating instance id: $INSTANCE_ID" - echo "Waiting for instance to start..." - aws ec2 wait system-status-ok --instance-ids $INSTANCE_ID - - IP_ADDRESS=$(get_private_ip $INSTANCE_ID) - - if [ -n "$regflag" ]; then - if [ $(echo $IP_ADDRESS | grep -c -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') == 1 ]; then - echo 'Registering secure remote docker api' - register-dock -f ubuntu "$IP_ADDRESS" "$MONIKER" "$INSTANCE_ID" - fi - else - mkdir -p ~/.docker/${IP_ADDRESS} - printf "DOCK_USER=ubuntu\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$IP_ADDRESS\nDOCK_IP=$IP_ADDRESS\n" > $HOME/.docker/${IP_ADDRESS}/connection_config.txt - fi - printf "DOCK_INSTANCE_ID=$INSTANCE_ID" >> $HOME/.docker/${IP_ADDRESS}/connection_config.txt && - - - echo "New EC2 instance available to dock at $IP_ADDRESS" - echo "try these commands:" - echo " $ source dock $IP_ADDRESS" - echo " $ castoff" - echo " $ ssh-add -K ~/.ssh/private-key-name" - echo " $ ssh ubuntu@$IP_ADDRESS" -else - echo "Failed to create instance." -fi +echo "create-dock has been deprecated, please use AWS Service Catalog" diff --git a/scripts/destroy-dock b/scripts/destroy-dock index e14dcb5..b10535b 100755 --- a/scripts/destroy-dock +++ b/scripts/destroy-dock @@ -1,93 +1,4 @@ #!/usr/bin/env bash set -e -# Default Values -MONIKER=${1:-"$DOCKER_IP"} -shift -GREEN='\033[0;32m' -NO_COLOR='\033[0m' - -confirm_destroy() { - echo -e "Destroy dock with the following values?" - echo -e "Instance ID: ${GREEN}${2:-none}${NO_COLOR}" - echo -e "Config Dir: ${GREEN}${1:-none}${NO_COLOR}" - read -e -p "Type enter to Cancel, h for Help, y to Destroy: " RESPONSE - - if [ "$RESPONSE" == "h" ]; then print_help; fi - -} - -print_help() { - echo "Destroy dock - Help" - echo - echo "Description" - echo " This script uses the aws cli to terminate an existing ec2 dock instance." - echo " Either the dock ip or the moniker must be provided" - echo "" - echo " Options:" - echo " -f Disable termination protection and then terminate" - echo - echo "Usage" - echo " $ destroy-dock moniker|ip <-f>" - echo - - exit 0 -} - -# Parse command line arguments in any order -disable_terminate_protection=false -while getopts 'hf' flag; do # if a character is followed by a colon, that argument is expected to have an argument. - case "${flag}" in - h) hflag='true';; - f) disable_terminate_protection=true;; - *) error "Unexpected option ${flag}" ;; - esac -done - -# Look up IP from moniker -FOUND_MONIKER=false -for f in $HOME/.docker/*; do - if [ -d $f ] && [ -f $f/connection_config.txt ]; then - while read -r line; do declare $line; done < "$f/connection_config.txt" - if [[ $DOCK_MONIKER = $MONIKER || $DOCK_IP = $MONIKER ]]; then - FOUND_MONIKER=true - break - fi - fi -done - -if [ $FOUND_MONIKER = false ]; then - echo "Can't find dock configuration for $MONIKER" - exit -1 -fi - - -get_instance_id() { - aws ec2 describe-instances \ - --filters Name=private-ip-address,Values="$1" \ - --query 'Reservations[*].Instances[*].InstanceId' --output text -} - -# destroy dock -INSTANCE_ID=$(get_instance_id $DOCK_IP) - -# Confirmation -confirm_destroy "$f" "$INSTANCE_ID" -if [ "$RESPONSE" != "y" ] && [ "$RESPONSE" != "h" ]; then - echo "Canceled" - exit 0 -fi - -if [ -n "$INSTANCE_ID" ]; then - if ${disable_terminate_protection}; then - for I in $INSTANCE_ID; do - aws ec2 modify-instance-attribute --no-disable-api-termination --instance-id "$I" - done - fi - aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" --output text -fi - -if [ -n "$f" ]; then - rm -rf $f -fi - +echo "delete-dock has been deprecated, please use AWS Service Catalog" diff --git a/scripts/dock b/scripts/dock index 5e9ee04..b170228 100755 --- a/scripts/dock +++ b/scripts/dock @@ -16,7 +16,6 @@ if [[ -z "$BASH" && -z "$ZSH_NAME" && -z "$DOCKERUTILS_DISABLE_PROMPT" ]]; then fi MONIKER=${1:-none} -PORT=2377 if [ -n "$_DOCK_MONIKER" ]; then echo "Remote docker is already configured for '$DOCK_MONIKER'. Try 'castoff' to disconnect." @@ -31,7 +30,7 @@ if [ $MONIKER = "none" ]; then echo "Examples:" echo " $ dock my-centos1" echo " $ dock 10.93.133.2" - echo + echo echo "You can now run secure remote docker commands." echo "To undo this configuration:" echo " $ castoff" @@ -45,7 +44,7 @@ FOUND_MONIKER=false for f in $HOME/.docker/*; do if [ -d $f ] && [ -f $f/connection_config.txt ]; then while read -r line; do declare $line; done < "$f/connection_config.txt" - if [ $DOCK_MONIKER = $MONIKER ]; then + if [ $DOCK_MONIKER = $MONIKER ] || [ $DOCK_IP = $MONIKER ]; then FOUND_MONIKER=true break fi @@ -54,7 +53,7 @@ done if [ $FOUND_MONIKER = false ]; then echo "Can't find dock configuration for $MONIKER" - kill -INT $$ + kill -INT $$ fi echo "Docking to $DOCK_USER@$DOCK_IP [Moniker: $DOCK_MONIKER]" @@ -68,8 +67,6 @@ fi # Python virtual environment prompt (or lack therof) should remain unchanged. castoff() { _DOCK_MONIKER="\[dock:$DOCK_MONIKER\] " - unset DOCKER_TLS_VERIFY - unset DOCKER_CERT_PATH unset DOCKER_HOST unset DOCKER_IP @@ -102,9 +99,7 @@ castoff() { . dock-sync -export DOCKER_TLS_VERIFY=1 -export DOCKER_CERT_PATH=${HOME}/.docker/${DOCK_IP} -export DOCKER_HOST=tcp://${DOCK_IP}:2377 +export DOCKER_HOST=ssh://${DOCK_USER}@${DOCK_IP} export DOCKER_IP=${DOCK_IP} export _DOCK_MONIKER="[dock:$DOCK_MONIKER] " diff --git a/scripts/register-dock b/scripts/register-dock index fb82a89..56605b0 100755 --- a/scripts/register-dock +++ b/scripts/register-dock @@ -1,162 +1,48 @@ #!/bin/bash -FORCE_REBUILD=false -while getopts "f:" OPTION +SHOW_HELP= +while getopts ":h" OPTION do case $OPTION in - f) - FORCE_REBUILD=true - shift - ;; - --help | -h) + #--help | -h) + -h) + SHOW_HELP=y shift ;; esac done -REMOTE_CONFIG_DIR='/etc/resero/docker-keys' -USER=${1:-none} -DOCK_HOSTNAME=${2:-none} -IP=${2:-none} -MONIKER=${3:-$2} -PORT=2377 -SSH_OPTIONS="-o LogLevel=error -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +# DOCK_USER=ubuntu register-dock 10.92.128.111 +DOCK_USER=${DOCK_USER:-ubuntu} +IP=${1:-none} +MONIKER=${2:-none} -if [ $USER == "none" ] || [ $DOCK_HOSTNAME == "none" ]; then - echo "This script will secure the docker daemon socket on a remote server and download the" - echo "requisite files and configuration for usage." - echo +if [ $IP == "none" ] || [ "$IP" == "-h" ] || [ "$IP" == "--help" ]; then + echo "This script will register an ec2 instance that has been configured using Service Catalog" + echo "to allow easy interfacing using docker and other tools." + echo echo "NOTE: This script is potentially destructive:" echo " - On the client it will overwrite certificates in ~/.docker/" - echo " - On the server it will overwrite /etc/docker/daemon.json and $REMOTE_CONFIG_DIR/" - echo - echo "This script will also restart the docker daemon on the server with 'sudo systemctl restart docker'" - echo - echo "CONFIGURATION" - echo "Add private key identity to the authentication agent" - echo " $ ssh-add -K ~/.ssh/" - echo + echo echo "Usage:" - echo " $ register-dock [-f] [moniker]" - echo " - 'hostname' is a hostname that you have configured locally, e.g. in /etc/hosts" - echo " - The optional moniker will be added to your terminal prompt when remote docker" - echo " is enabled. Otherwise the host IP will be added." - echo " - If the certificate is already on the server then it will be copied to your machine." - echo " - Use the -f flag to force creation of a new CA, server and client key." + echo " $ DOCK_USER= register-dock [-f] [moniker]" + echo " - 'DOCK_USER' is an env that defaults to ubuntu but can be explicitly set if needed" + echo " - 'IP' is the hostname or ip address of the created instance" + echo " - 'moniker' is an optional name that can be used to refer to this instance in scripts" echo echo " Examples:" - echo " $ register-dock ubuntu 10.93.133.6" - echo " $ register-dock bveranth neon" - echo " $ register-dock bveranth neon remote-neon" + echo " $ DOCK_USER=admin register-dock 10.93.133.6" + echo " $ register-dock 10.92.128.33" + echo " $ register-dock 10.92.128.44 project-dev" exit 1 fi -TEMP_PASS=$(openssl rand -base64 12) - -if [ $(echo $IP | grep -c -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}') == 0 ]; then - #IP=$(arp -n "$DOCK_HOSTNAME" | tr -d '()' | awk '{print $2}') - IP=$(ping -i 0.5 -W 0.5 -c 1 "$DOCK_HOSTNAME" | head -n 1 |tr -d '():'|awk '{print $3}') -fi - -if [ -z "$IP" ]; then - echo "The hostname or IP that you specified ($DOCK_HOSTNAME) can't be resolved." - kill -INT $$ -fi - +echo "DOCK_USER:$DOCK_USER" echo "MONIKER:$MONIKER" -echo "HOSTNAME:$DOCK_HOSTNAME" -echo "IP:$IP" -echo "Connecting to ${IP}..." - -# If force rebuild flag is not set, then check for already existing key on server and download it. -if [ $FORCE_REBUILD == false ] && ssh ${SSH_OPTIONS} $USER@$IP "[ -f $REMOTE_CONFIG_DIR/ca.pem ]"; then - if [ -f ~/.docker/${IP}/ca.pem ]; then - mv -f ~/.docker/${IP}/ca.pem ~/.docker/${IP}/ca.pem.back - mv -f ~/.docker/${IP}/key.pem ~/.docker/${IP}/key.pem.back - mv -f ~/.docker/${IP}/cert.pem ~/.docker/${IP}/cert.pem.back - fi - mkdir -p ~/.docker/${IP} - - # Delete local files before copying from remote host. - rm -f ~/.docker/${IP}/ca.pem - rm -f ~/.docker/${IP}/key.pem - rm -f ~/.docker/${IP}/cert.pem - - # Copy new files from remote host - scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/ca.pem ~/.docker/${IP}/ca.pem && - scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/key.pem ~/.docker/${IP}/key.pem && - scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/cert.pem ~/.docker/${IP}/cert.pem && - export DOCKER_TLS_VERIFY=1 && - export DOCKER_CERT_PATH=~/.docker/$IP && - export DOCKER_HOST=tcp://$IP:$PORT && - printf "DOCK_USER=$USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$DOCK_HOSTNAME\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt && - echo "Secure remote docker API configured." - exit 0 -fi - -if ssh ${SSH_OPTIONS} $USER@$IP "[ -d $REMOTE_CONFIG_DIR ]"; then - echo "Backing up remote keys $REMOTE_CONFIG_DIR" - ssh ${SSH_OPTIONS} $USER@$IP "sudo rm -rf $REMOTE_CONFIG_DIR-back" && - ssh ${SSH_OPTIONS} $USER@$IP "sudo cp -rf $REMOTE_CONFIG_DIR $REMOTE_CONFIG_DIR-back" && - ssh ${SSH_OPTIONS} $USER@$IP "sudo rm -rf $REMOTE_CONFIG_DIR" -fi - -# echo "Creating CA..." -ssh ${SSH_OPTIONS} $USER@$IP "sudo mkdir -p $REMOTE_CONFIG_DIR" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl genrsa -aes256 -passout pass:$TEMP_PASS -out $REMOTE_CONFIG_DIR/ca-key.pem 4096" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl req -new -x509 -days 365 -key $REMOTE_CONFIG_DIR/ca-key.pem -sha256 \ - -out $REMOTE_CONFIG_DIR/ca.pem -passin pass:$TEMP_PASS \ - -subj '/C=US/ST=Utah/L=Draper/O=Proofpoint/OU=Resero/CN=proofpoint.com'" && -sleep 1 && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl genrsa -out $REMOTE_CONFIG_DIR/server-key.pem 4096" - -# echo "Creating new docker keys" -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl req -subj '/CN=$HOSTNAME' -sha256 -new -key $REMOTE_CONFIG_DIR/server-key.pem \ - -out $REMOTE_CONFIG_DIR/server.csr" && -ssh ${SSH_OPTIONS} $USER@$IP "echo subjectAltName = DNS:\$HOSTNAME,IP:$IP > /tmp/extfile.cnf" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo mv /tmp/extfile.cnf $REMOTE_CONFIG_DIR/extfile.cnf" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl x509 -req -days 365 -sha256 -in $REMOTE_CONFIG_DIR/server.csr \ - -CA $REMOTE_CONFIG_DIR/ca.pem -CAkey $REMOTE_CONFIG_DIR/ca-key.pem -CAcreateserial \ - -out $REMOTE_CONFIG_DIR/server-cert.pem -extfile $REMOTE_CONFIG_DIR/extfile.cnf -passin pass:$TEMP_PASS" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl genrsa -out $REMOTE_CONFIG_DIR/key.pem 4096" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl req -subj '/CN=client' -new -key $REMOTE_CONFIG_DIR/key.pem -out $REMOTE_CONFIG_DIR/client.csr" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo echo extendedKeyUsage = clientAuth > $REMOTE_CONFIG_DIR/extfile.cnf" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo openssl x509 -req -days 365 -sha256 -in $REMOTE_CONFIG_DIR/client.csr \ - -CA $REMOTE_CONFIG_DIR/ca.pem -CAkey $REMOTE_CONFIG_DIR/ca-key.pem -CAcreateserial \ - -out $REMOTE_CONFIG_DIR/cert.pem -extfile $REMOTE_CONFIG_DIR/extfile.cnf -passin pass:$TEMP_PASS" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo rm -v $REMOTE_CONFIG_DIR/client.csr $REMOTE_CONFIG_DIR/server.csr" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo chmod 0444 $REMOTE_CONFIG_DIR/ca-key.pem $REMOTE_CONFIG_DIR/key.pem $REMOTE_CONFIG_DIR/server-key.pem" && -ssh ${SSH_OPTIONS} $USER@$IP "sudo chmod 0444 $REMOTE_CONFIG_DIR/ca.pem $REMOTE_CONFIG_DIR/server-cert.pem $REMOTE_CONFIG_DIR/cert.pem" +echo "HOSTNAME:$IP" ### copy CA to client. -mkdir -p ~/.docker/${IP} && -if [ -f ~/.docker/${IP}/ca.pem.back ]; then - rm -f ~/.docker/${IP}/ca.pem.back - rm -f ~/.docker/${IP}/key.pem.back - rm -f ~/.docker/${IP}/cert.pem.back -fi -if [ -f ~/.docker/${IP}/ca.pem ]; then - mv -f ~/.docker/${IP}/ca.pem ~/.docker/${IP}/ca.pem.back - mv -f ~/.docker/${IP}/key.pem ~/.docker/${IP}/key.pem.back - mv -f ~/.docker/${IP}/cert.pem ~/.docker/${IP}/cert.pem.back -fi +mkdir -p ~/.docker/${IP} -scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/ca.pem ~/.docker/${IP}/ca.pem && -scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/key.pem ~/.docker/${IP}/key.pem && -scp ${SSH_OPTIONS} $USER@$IP:$REMOTE_CONFIG_DIR/cert.pem ~/.docker/${IP}/cert.pem && -# TODO: Rather than overwriting the configuration (generally a poor practice) -# TODO: look at merging the configuration we are interested in, into the configuration -# TODO: that is already present -ssh ${SSH_OPTIONS} $USER@$IP 'sudo rm -f /etc/docker/daemon.json' && DOCKER_CFG_FILE=$(python3 -c "import pkg_resources; print(pkg_resources.resource_filename('dockerutils', 'docker-server-daemon.json'))") -scp ${SSH_OPTIONS} $DOCKER_CFG_FILE $USER@$IP:/tmp/daemon.json && -ssh ${SSH_OPTIONS} $USER@$IP 'sudo mkdir -p /etc/docker/' && -ssh ${SSH_OPTIONS} $USER@$IP 'sudo mv /tmp/daemon.json /etc/docker/daemon.json' && -ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl stop docker' && -ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl daemon-reload' && -ssh ${SSH_OPTIONS} $USER@$IP 'sudo systemctl start docker' && -export DOCKER_TLS_VERIFY=1 && -export DOCKER_CERT_PATH=~/.docker/$IP && -export DOCKER_HOST=tcp://$IP:$PORT && -printf "DOCK_USER=$USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$DOCK_HOSTNAME\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt && -docker version +printf "DOCK_USER=$DOCK_USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$IP\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt diff --git a/scripts/ssh-dock b/scripts/ssh-dock index 96f487e..d6e1b2a 100755 --- a/scripts/ssh-dock +++ b/scripts/ssh-dock @@ -28,6 +28,3 @@ fi echo "Opening ssh connection to ${DOCKER_IP}" ssh ubuntu@${DOCKER_IP} - - - diff --git a/scripts/unregister-dock b/scripts/unregister-dock new file mode 100755 index 0000000..3b14c0f --- /dev/null +++ b/scripts/unregister-dock @@ -0,0 +1,43 @@ +#!/bin/bash + +# USER=ubuntu register-dock 10.92.128.111 +MONIKER=${1:-none} + +if [ $MONIKER == "none" ]; then + echo "This script will search for registered monikers and removes the entry for the passed in" + echo "moniker" + echo + echo "NOTE: This script is potentially destructive:" + echo " - On the client it will overwrite certificates in ~/.docker/" + echo + echo "Usage:" + echo " $ unregister-dock " + echo " - 'IP' or 'moniker' is the registered details of the instance" + echo + echo " Examples:" + echo " $ unregister-dock 10.93.133.6" + echo " $ unregister-dock project-dev" + exit 1 +fi + +# Look up IP from moniker +FOUND_MONIKER=false +FOUND_PATH= +for f in $HOME/.docker/*; do + if [ -d $f ] && [ -f $f/connection_config.txt ]; then + while read -r line; do declare $line; done < "$f/connection_config.txt" + if [ $DOCK_MONIKER = $MONIKER ] || [ $DOCK_IP = $MONIKER ]; then + FOUND_MONIKER=true + FOUND_PATH="${f}" + break + fi + fi +done + +if [ "$FOUND_MONIKER" = "false" ]; then + echo "No moniker found" + exit 0 +fi + +echo "Removing ${FOUND_PATH}" +rm -rf $FOUND_PATH From a53e7c1dbafb9e1f589561751dac6c2bc1163d7c Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Tue, 6 Aug 2019 14:25:53 -0600 Subject: [PATCH 52/62] Quote the command. This allows spaces to be in the command and for the command to be processed correctly. (#54) --- scripts/run-image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-image b/scripts/run-image index 5c44a4b..143fb14 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -21,7 +21,7 @@ else: _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ - '{volumes} {ports} {args} {image_name}:{image_tag} {cmd}' + '{volumes} {ports} {args} {image_name}:{image_tag} "{cmd}"' def fetch_env_variables(config, image, args_env=None): From 30bc04c78930e7e687a3c94f49432e14070578e3 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Tue, 6 Aug 2019 16:16:06 -0600 Subject: [PATCH 53/62] Bug fixes (#57) * Correctly handle case when command isn't specified. (Don't pass in empty quoted string) * Updated the README by removing the create-dock and destroy-dock references --- README.md | 18 ++++++++---------- scripts/run-image | 36 ++++++++++++++++++++++-------------- 2 files changed, 30 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 1611bb5..e89be49 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ The commands used to operate against these entities are: | CommandSet / Entity | Creation | Execution | Notebook | Utility | |----------------------|:------------------------:|:--------------------:|:------------:|:----------------------------:| -| Dock | create-dock
destroy-dock | start-dock
stop-dock | nb-dock | source dock
ls-dock
ssh-dock | +| Dock | register-dock
unregister-dock | start-dock
stop-dock | nb-dock | source dock
ls-dock
ssh-dock | | Image | build-image | run-image | run-notebook | publish-image
transfer-image | Possible use cases include: @@ -48,7 +48,7 @@ for containers that are dependent solely on the project pypi, etc.), for instance a server in a "locked-down" environment. A base image can be defined to isolate any dependencies that are required. That image can then be built and `transfer-image` used to transfer the base image to the target environment. - + Subsequent images can be built based off of that image that are "self-contained" (relying only on source from the project). The remote docker api can then be used to quickly iterate only requiring the more cumbersome transfer-image to be used when external dependencies change. @@ -56,7 +56,7 @@ for containers that are dependent solely on the project Includes setting most docker parameters, i.e. volume mounts, ports, networks, commands, etc. with replacement varilable support for things like user, project root, etc. - + ## Command-line Interface ### Image cli @@ -83,15 +83,13 @@ the latest version from docker hub. ### Dock cli -A "dock" is a remote system that has a docker daemon running and configured in a secure fashion (generally an EC2 -instance). You can "dock" your terminal to a remote instance and any docker commands, including image and notebook cli -above will be run against the remote docker server. Once a "dock" is created, you can dock your terminal by issuing -the command `source dock ` +A "dock" is a remote system that you can connect to through `ssh`. You can "dock" your terminal to a remote instance and +any docker commands, including image and notebook cli above will be run against the remote docker server. Once a "dock" +is created, you can dock your terminal by issuing the command `source dock ` -`create-dock` will start an ec2 instance that can be used for remote docking. This instance is configured to provide -secure interaction with the docker server, as well as to support GPU utliziation (`-g` option with `run-image`) +`register-dock` is used to add a remote system to the dock list with all its configuration (username, ip and a moniker) -`destroy-dock` will terminate a remote dock instance and delete any local configuration files +`unregister-dock` is used to remove the reference to the remote system `stop-dock` will change the instances state of a remote dock to `stopped` diff --git a/scripts/run-image b/scripts/run-image index 143fb14..1d20026 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -23,6 +23,11 @@ else: _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ '{volumes} {ports} {args} {image_name}:{image_tag} "{cmd}"' +# if we don't have a command, don't surround it in quotes, doing so cause a nop to be executed and the container +# to exit +_base_cmd_no_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ + '{volumes} {ports} {args} {image_name}:{image_tag} {cmd}' + def fetch_env_variables(config, image, args_env=None): # a special use case. retrieve env variables from a config server. @@ -69,20 +74,23 @@ def run(mode, image_name, image_tag, **kwargs): kwargs['network'] = "--network {network}".format(network=kwargs['network']) timestamp = datetime.datetime.now().strftime("%y-%m-%d_%H.%M.%S") - cmd = _base_cmd.format(image_name=image_name, - image_tag=image_tag, - name="{user}_{mode}_{timestamp}".format( - user=getpass.getuser(), mode=mode, timestamp=timestamp), - keep_container=kwargs['keep_container'], - interactive=kwargs['interactive'], - environment=kwargs['environment'], - network=kwargs['network'], - ports=kwargs['ports'], - args=kwargs['args'], - volumes=volumes, - gpu=kwargs['gpu'], - cmd=kwargs['cmd'], - init=kwargs['init']) + command_to_format = _base_cmd_no_cmd + if kwargs['cmd']: + command_to_format = _base_cmd + cmd = command_to_format.format(image_name=image_name, + image_tag=image_tag, + name="{user}_{mode}_{timestamp}".format( + user=getpass.getuser(), mode=mode, timestamp=timestamp), + keep_container=kwargs['keep_container'], + interactive=kwargs['interactive'], + environment=kwargs['environment'], + network=kwargs['network'], + ports=kwargs['ports'], + args=kwargs['args'], + volumes=volumes, + gpu=kwargs['gpu'], + cmd=kwargs['cmd'], + init=kwargs['init']) print('\n\n============================================================================') print('{cmd}\n\n'.format(cmd=cmd)) From 7c5c0eb888be139dd959cf9a1842d4b2c95fac89 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Thu, 8 Aug 2019 14:53:50 -0600 Subject: [PATCH 54/62] Add unregister-dock to scripts in setup.py --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index c64518e..1673781 100644 --- a/setup.py +++ b/setup.py @@ -43,7 +43,8 @@ 'scripts/ssh-dock', 'scripts/start-dock', 'scripts/stop-dock', - 'scripts/transfer-image' + 'scripts/transfer-image', + 'scripts/unregister-dock' ], include_package_data=True, packages=find_packages(exclude=['tests*']), From 5752972a14b5645efeaf7bc7f78798e3e862d5ba Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Thu, 8 Aug 2019 15:09:30 -0600 Subject: [PATCH 55/62] Fix bug in the way AWS env variables were added to command line --- scripts/run-image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-image b/scripts/run-image index 1d20026..7c1a269 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -129,7 +129,7 @@ if __name__ == '__main__': gpu = '' run_config = { - 'environment': fetch_env_variables(config, args.image, args.env) + populate_aws_env_variables(), + 'environment': fetch_env_variables(config, args.image, args.env) + ' ' + populate_aws_env_variables(), 'keep_container': args.keep or '--rm', 'interactive': '-d' if args.keep else '-it', 'gpu': gpu, From 058e33563e2500f5e7b83e172b32bb6bda60e6d6 Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Fri, 9 Aug 2019 15:21:10 -0600 Subject: [PATCH 56/62] Adjusted the cmd option to allow for quotes to be correctly used from a command --- scripts/run-image | 47 ++++++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/scripts/run-image b/scripts/run-image index 7c1a269..cc3d0d9 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -21,13 +21,7 @@ else: _base_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ - '{volumes} {ports} {args} {image_name}:{image_tag} "{cmd}"' - -# if we don't have a command, don't surround it in quotes, doing so cause a nop to be executed and the container -# to exit -_base_cmd_no_cmd = 'docker run {init} --name {name} {environment} {keep_container} {interactive} {gpu} {network} ' \ - '{volumes} {ports} {args} {image_name}:{image_tag} {cmd}' - + '{volumes} {ports} {args} {image_name}:{image_tag}' def fetch_env_variables(config, image, args_env=None): # a special use case. retrieve env variables from a config server. @@ -74,30 +68,29 @@ def run(mode, image_name, image_tag, **kwargs): kwargs['network'] = "--network {network}".format(network=kwargs['network']) timestamp = datetime.datetime.now().strftime("%y-%m-%d_%H.%M.%S") - command_to_format = _base_cmd_no_cmd - if kwargs['cmd']: - command_to_format = _base_cmd - cmd = command_to_format.format(image_name=image_name, - image_tag=image_tag, - name="{user}_{mode}_{timestamp}".format( - user=getpass.getuser(), mode=mode, timestamp=timestamp), - keep_container=kwargs['keep_container'], - interactive=kwargs['interactive'], - environment=kwargs['environment'], - network=kwargs['network'], - ports=kwargs['ports'], - args=kwargs['args'], - volumes=volumes, - gpu=kwargs['gpu'], - cmd=kwargs['cmd'], - init=kwargs['init']) + cmd = _base_cmd.format(image_name=image_name, + image_tag=image_tag, + name="{user}_{mode}_{timestamp}".format( + user=getpass.getuser(), mode=mode, timestamp=timestamp), + keep_container=kwargs['keep_container'], + interactive=kwargs['interactive'], + environment=kwargs['environment'], + network=kwargs['network'], + ports=kwargs['ports'], + args=kwargs['args'], + volumes=volumes, + gpu=kwargs['gpu'], + init=kwargs['init']) print('\n\n============================================================================') - print('{cmd}\n\n'.format(cmd=cmd)) + print("{cmd} {kwcmd}".format(cmd=cmd, kwcmd=kwargs['cmd'] or '')) # Since we are using secure env values I don't want those to print in the above commmand, but # they need to be expanded for the subprocess.call expanded_cmd = Template(cmd).substitute(os.environ) - return subprocess.call(shlex.split(expanded_cmd), cwd=os.getcwd()) + expanded_kwargs_cmd = Template(kwargs['cmd'] or '').substitute(os.environ) + + all_args = shlex.split(expanded_cmd) + shlex.split(expanded_kwargs_cmd) + return subprocess.call(all_args, cwd=os.getcwd()) if __name__ == '__main__': @@ -136,7 +129,7 @@ if __name__ == '__main__': 'network': args.network or '', 'volumes': '', 'ports': '', - 'cmd': args.command or '', + 'cmd': args.command or None, 'init': '--init', 'args': args.args } From 6783dc3213a6be326d05aa5c967d56a04560de7e Mon Sep 17 00:00:00 2001 From: Michael Wright Date: Fri, 9 Aug 2019 15:33:51 -0600 Subject: [PATCH 57/62] Adjusted the run-image to have some \n\n --- scripts/run-image | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-image b/scripts/run-image index cc3d0d9..ca07c71 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -82,7 +82,7 @@ def run(mode, image_name, image_tag, **kwargs): gpu=kwargs['gpu'], init=kwargs['init']) print('\n\n============================================================================') - print("{cmd} {kwcmd}".format(cmd=cmd, kwcmd=kwargs['cmd'] or '')) + print("{cmd} {kwcmd}\n\n".format(cmd=cmd, kwcmd=kwargs['cmd'] or '')) # Since we are using secure env values I don't want those to print in the above commmand, but # they need to be expanded for the subprocess.call From 6905ea9b9815a6bb02e146dc6f3590654c4e05d5 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Tue, 20 Aug 2019 11:44:38 -0600 Subject: [PATCH 58/62] Support {home} replacement on run-image (#60) --- README.md | 3 ++- scripts/run-image | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e89be49..104b01f 100644 --- a/README.md +++ b/README.md @@ -158,7 +158,8 @@ variable replacement designations of the form `{var}`. The supported variables * `project_root` - will be replaced with the root directory name of the project * `user` - will be replaced with the user name of the user running the command -* `project` - replace with project namge +* `project` - will be replaced with project name +* `home` - will be replaced with the user's home directory ### Image Push Replacement Variables diff --git a/scripts/run-image b/scripts/run-image index ca07c71..43c0a80 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -60,7 +60,8 @@ def run(mode, image_name, image_tag, **kwargs): volumes = kwargs['volumes'].format( project_root=get_root_dir(), user=user, - project=os.path.split(get_root_dir())[1] + project=os.path.split(get_root_dir())[1], + home=os.path.expanduser("~") ) volumes = os.path.expandvars(volumes) From ed712a8677fa25ceff7167a400864ecc3ef3c7ac Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Mon, 23 Sep 2019 10:29:49 -0600 Subject: [PATCH 59/62] Re-introduce create-dock & destroy-dock that use AWS servicecatalog API (#61) * Re-introduce create-dock & destroy-dock that use AWS servicecatalog API * Clean up directorys on destroy (properly) * Make create-dock just a little more general, add command line args for: * product name * product version --- README.md | 6 +- scripts/create-dock | 135 +++++++++++++++++++++++++++++++++++++++++- scripts/destroy-dock | 59 +++++++++++++++++- scripts/register-dock | 2 - 4 files changed, 191 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 104b01f..b000c52 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ The commands used to operate against these entities are: | CommandSet / Entity | Creation | Execution | Notebook | Utility | |----------------------|:------------------------:|:--------------------:|:------------:|:----------------------------:| -| Dock | register-dock
unregister-dock | start-dock
stop-dock | nb-dock | source dock
ls-dock
ssh-dock | +| Dock | create-dock
destroy-dock | start-dock
stop-dock | nb-dock | source dock
ls-dock
ssh-dock | | Image | build-image | run-image | run-notebook | publish-image
transfer-image | Possible use cases include: @@ -87,9 +87,9 @@ A "dock" is a remote system that you can connect to through `ssh`. You can "dock any docker commands, including image and notebook cli above will be run against the remote docker server. Once a "dock" is created, you can dock your terminal by issuing the command `source dock ` -`register-dock` is used to add a remote system to the dock list with all its configuration (username, ip and a moniker) +`create-dock` (`register-dock` if provisioning from AWS console) is used to add a remote system to the dock list with all its configuration (username, ip and a moniker) -`unregister-dock` is used to remove the reference to the remote system +`destroy-dock` (`unregister-dock` if provisioned from AWS console) is used to remove the reference to the remote system `stop-dock` will change the instances state of a remote dock to `stopped` diff --git a/scripts/create-dock b/scripts/create-dock index 3879c04..5fd20f4 100755 --- a/scripts/create-dock +++ b/scripts/create-dock @@ -1,4 +1,133 @@ -#!/usr/bin/env bash -set -e +#!/usr/bin/env python3 -echo "create-dock has been deprecated, please use AWS Service Catalog" +from __future__ import print_function +import os +import boto3 +import argparse +import getpass +import time +import sys + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("dock", help="Name of dock to create") + parser.add_argument("-i", "--instance_type", help="instance type to launch", default=None) + parser.add_argument("-s", "--subnet", help="subnet ID for instance", default=None) + parser.add_argument("-v", "--volume_size", help="Size of block device (in GBytes)", type=int, default=None) + parser.add_argument("-n", "--product_name", help="Name of the Product to use", default="Basic EC2 Instance") + parser.add_argument("-v", "--product_version", help="Version of the product to use", default="latest") + args = parser.parse_args() + + provisioning_artifact_id = None + path_id = None + ip = None + instance_id = None + + # first, we need to find the Basic EC2 Instance product + client = boto3.client('servicecatalog') + response = client.search_products(Filters={ + 'FullTextSearch': [args.product_name] + }) + product_id = response['ProductViewSummaries'][0]['ProductId'] + + # now we need to find the latest version of this product + response = client.describe_product(Id=product_id) + for provisioning_artifact in response['ProvisioningArtifacts']: + if provisioning_artifact['Name'] == args.product_version: + provisioning_artifact_id = provisioning_artifact['Id'] + + # currently, our products don't have a default path, it would be nice if they were configured + # with default path = "launch", but for now, find the launch path + response = client.list_launch_paths(ProductId=product_id) + for launch_path in response['LaunchPathSummaries']: + for constraint in launch_path['ConstraintSummaries']: + if constraint['Type'] == 'LAUNCH': + path_id = launch_path['Id'] + + # get the provisioning parameters and plug in any overrides + response = client.describe_provisioning_parameters(ProductId=product_id, + ProvisioningArtifactId=provisioning_artifact_id, + PathId=path_id) + + parameters = [] + for parameter in response['ProvisioningArtifactParameters']: + key = parameter['ParameterKey'] + value = parameter['DefaultValue'] + allowed_values = parameter['ParameterConstraints']['AllowedValues'] + value_override = None + if key == 'InstanceName': + value_override = args.dock + elif key == 'InstanceType': + value_override = args.instance_type + elif key == 'SubnetID': + value_override = args.subnet + elif key == 'VolumeSize': + value_override = args.volume_size + if value_override and (not allowed_values or value_override in allowed_values): + parameters.append({'Key': key, 'Value':value_override}) + # TODO: figure out a way to make this more general, so that as new products are created, we handle those + # parameters without exapnding the command arg list + + # now provision product + if (product_id and provisioning_artifact_id and path_id): + response = client.provision_product(ProductId=product_id, + PathId=path_id, + ProvisioningArtifactId=provisioning_artifact_id, + ProvisionedProductName=args.dock, + ProvisioningParameters=parameters, + Tags=[ + { + 'Key': 'Name', + 'Value': args.dock + }, + { + 'Key': 'business_unit', + 'Value': 'Compliance and Digital Risk' + }, + { + 'Key': 'component', + 'Value': 'ec2 instance' + }, + { + 'Key': 'product', + 'Value': 'ML Labs' + }, + { + 'Key': 'support_level', + 'Value': 'dev' + }, + { + 'Key': 'created_by', + 'Value': getpass.getuser() + } + ] + ) + record_id = response['RecordDetail']['RecordId'] + provisioned_product_id = response['RecordDetail']['ProvisionedProductId'] + sys.stdout.write('waiting for launch to complete') + while ip is None: + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(2) + response = client.describe_record(Id=record_id) + for output in response['RecordOutputs']: + if output['OutputKey'] == 'InstanceID': + instance_id = output['OutputValue'] + elif output['OutputKey'] == 'PrivateIP': + ip = output['OutputValue'] + sys.stdout.write('\n') + sys.stdout.flush() + + if 'DOCK_USER' in os.environ: + user = os.environ['DOCK_USER'] + else: + user = 'ubuntu' + cfg_dir = os.path.join(os.path.join(os.path.expanduser("~"), ".docker"), ip) + os.makedirs(cfg_dir, exist_ok=True) + f = open(os.path.join(cfg_dir, 'connection_config.txt'), 'w') + f.write("DOCK_USER={user}\n".format(user=user)) + f.write("DOCK_MONIKER={moniker}\n".format(moniker=args.dock)) + f.write("DOCK_HOSTNAME={ip}\n".format(ip=ip)) + f.write("DOCK_IP={ip}\n".format(ip=ip)) + f.write("DOCK_PROVISIONED_PRODUCT={id}\n".format(id=provisioned_product_id)) + f.close() diff --git a/scripts/destroy-dock b/scripts/destroy-dock index b10535b..86cfa1d 100755 --- a/scripts/destroy-dock +++ b/scripts/destroy-dock @@ -1,4 +1,57 @@ -#!/usr/bin/env bash -set -e +#!/usr/bin/env python3 +from __future__ import print_function +import os +import boto3 +import argparse +import fnmatch +import sys +import time +import shutil -echo "delete-dock has been deprecated, please use AWS Service Catalog" +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("dock", help="Name of dock to destroy") + args = parser.parse_args() + + ip = None + provisioned_product_id = None + + cfg_dir = os.path.join(os.path.expanduser("~"), ".docker") + for root, dirs, files in os.walk(cfg_dir): + for file in files: + if fnmatch.fnmatch(file, 'connection_config.txt'): + vars = {} + with open(os.path.join(root, file)) as f: + for line in f: + if line.startswith('#'): + continue + key, value = line.strip().split('=', 1) + vars[key] = value + if vars['DOCK_MONIKER'] == args.dock or vars['DOCK_IP'] == args.dock: + ip = vars['DOCK_IP'] + provisioned_product_id = vars['DOCK_PROVISIONED_PRODUCT'] + + client = boto3.client('servicecatalog') + response = client.terminate_provisioned_product(ProvisionedProductId=provisioned_product_id) + + record_id = response['RecordDetail']['RecordId'] + errors = None + sys.stdout.write('waiting for terminate to complete') + while True: + sys.stdout.write('.') + sys.stdout.flush() + time.sleep(2) + response = client.describe_record(Id=record_id) + if response['RecordDetail']['Status'] == 'SUCCEEDED': + shutil.rmtree(os.path.join(cfg_dir, ip), ignore_errors=True) + break + elif response['RecordDetail']['Status'] in ['IN_PROGRESS_IN_ERROR', 'FAILED']: + sys.stdout.write('\n') + sys.stdout.flush() + errors = response['RecordDetail']['RecordErrors'] + break + sys.stdout.write('\n') + sys.stdout.flush() + + if errors: + print(errors) \ No newline at end of file diff --git a/scripts/register-dock b/scripts/register-dock index 56605b0..86094b6 100755 --- a/scripts/register-dock +++ b/scripts/register-dock @@ -41,8 +41,6 @@ echo "DOCK_USER:$DOCK_USER" echo "MONIKER:$MONIKER" echo "HOSTNAME:$IP" -### copy CA to client. mkdir -p ~/.docker/${IP} -DOCKER_CFG_FILE=$(python3 -c "import pkg_resources; print(pkg_resources.resource_filename('dockerutils', 'docker-server-daemon.json'))") printf "DOCK_USER=$DOCK_USER\nDOCK_MONIKER=$MONIKER\nDOCK_HOSTNAME=$IP\nDOCK_IP=$IP\n" > $HOME/.docker/${IP}/connection_config.txt From a872a563b2bfa6ef1af5830ad68df6127fcc761b Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Mon, 23 Sep 2019 11:06:08 -0600 Subject: [PATCH 60/62] Fix typo and formatting --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b000c52..fd7468b 100644 --- a/README.md +++ b/README.md @@ -13,12 +13,12 @@ ec2 instance in AWS; allow for multiple images per project; etc. Perhaps the best way to think of dockerutils is that it embodies two entities, commonly used when working with docker, and a set of complementary commands for working with those entities. The two entities are: -* "dock" - the server that is hosing docker (by default, localhost) +* "dock" - the server that is hosting docker (by default, localhost) * image - the standard docker image The commands used to operate against these entities are: -| CommandSet / Entity | Creation | Execution | Notebook | Utility | +| CommandSet / Entity | Creation | Execution | Jupyter
Notebook | Utility | |----------------------|:------------------------:|:--------------------:|:------------:|:----------------------------:| | Dock | create-dock
destroy-dock | start-dock
stop-dock | nb-dock | source dock
ls-dock
ssh-dock | | Image | build-image | run-image | run-notebook | publish-image
transfer-image | From 4caa7aff61a46c6ec6be2b2b71c60b68ca70f934 Mon Sep 17 00:00:00 2001 From: Devon Kinghorn Date: Fri, 5 Feb 2021 10:53:00 -0700 Subject: [PATCH 61/62] Update run-image when docked (#66) --- scripts/run-image | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/scripts/run-image b/scripts/run-image index 43c0a80..744449f 100755 --- a/scripts/run-image +++ b/scripts/run-image @@ -121,9 +121,13 @@ if __name__ == '__main__': gpu = '--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all' else: gpu = '' - + is_docked = bool(os.environ.get('DOCKER_IP')) + if is_docked: + aws_env_vars = '' + else: + aws_env_vars = populate_aws_env_variables() run_config = { - 'environment': fetch_env_variables(config, args.image, args.env) + ' ' + populate_aws_env_variables(), + 'environment': fetch_env_variables(config, args.image, args.env) + ' ' + aws_env_vars, 'keep_container': args.keep or '--rm', 'interactive': '-d' if args.keep else '-it', 'gpu': gpu, @@ -135,7 +139,6 @@ if __name__ == '__main__': 'args': args.args } - is_docked = bool(os.environ.get('DOCKER_IP')) if args.image in config.sections(): for key in run_config.keys(): From 8f6fc32775359865075422abd3112785cc6fa2a5 Mon Sep 17 00:00:00 2001 From: Daniel Rapp Date: Fri, 5 Feb 2021 10:54:56 -0700 Subject: [PATCH 62/62] fixes #63: Add ssh options to ssh-dock command (#64) --- scripts/ls-dock | 5 +++++ scripts/ssh-dock | 37 ++++++++++++++++++++++++++++++++++--- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/scripts/ls-dock b/scripts/ls-dock index e86a179..18f8c77 100755 --- a/scripts/ls-dock +++ b/scripts/ls-dock @@ -26,6 +26,11 @@ while getopts 'h' flag; do # if a character is followed by a colon, that argu esac done +# Help +if [ ! -z "$hflag" ]; then + print_help +fi + get_instance_id() { aws ec2 describe-instances \ --filters Name=private-ip-address,Values="$1" \ diff --git a/scripts/ssh-dock b/scripts/ssh-dock index d6e1b2a..6d8b254 100755 --- a/scripts/ssh-dock +++ b/scripts/ssh-dock @@ -1,9 +1,40 @@ #!/usr/bin/env bash set -e +print_help() { + echo "ssh-dock - Help" + echo + echo "Description" + echo " This will ssh to an existing dock." + echo " If you are in a docked state, you do not need to specify a moniker." + echo + echo "Usage" + echo " $ ssh-dock [-s ] [dock-moniker]" + echo + + exit 0 +} + +SSH_OPTS="" + +# Parse command line arguments in any order +while getopts ':hs:' flag; do # if a character is followed by a colon, that argument is expected to have an argument. + case "${flag}" in + h) hflag='true';; + s) SSH_OPTS=$OPTARG ;; + *) error "Unexpected option ${flag}" ;; + esac +done +shift $((OPTIND -1)) + +# Help +if [ ! -z "$hflag" ]; then + print_help +fi + if [[ -z "$DOCKER_IP" && -z "$1" ]]; then echo "You must either be docked, or provide a argument specifying the 'moniker' of the dock you want to ssh to" - exit -1 + exit 1 fi if [ -n "$1" ]; then @@ -21,10 +52,10 @@ if [ -n "$1" ]; then if [ $FOUND_MONIKER = false ]; then echo "Can't find dock configuration for $1" - exit -1 + exit 1 fi DOCKER_IP=$DOCK_IP fi echo "Opening ssh connection to ${DOCKER_IP}" -ssh ubuntu@${DOCKER_IP} +ssh $SSH_OPTS ubuntu@${DOCKER_IP}