-
- |_ abdennour/ansible:x.y.z (Full for DEV and CI)
- |_ abdennour/ansible:x.y.z-slim (Light)
-
- |_ abdennour/aws:x.y.z
-
- |_ abdennour/docker:x.y-dind
- |_ abdennour/docker:x.y-dind-awsx.y.z
- |_ abdennour/docker:x.y-dind-composex.y.z
- |_ abdennour/docker:x.y-dind-composex.y.z-awsx.y.z
-
- |_ abdennour/dotenv-to-js-object
dotenv-to-js-object
-
- |_ abdennour/eksctl:x.y.z-aws-x.y.z
- |_ abdennour/eksctl:x.y.z-aws-x.y.z-kubectl-x.y.z
-
- |_ abdennour/git:x.y.z
-
- |_ abdennour/gitea-cli:x.y.z-alpine
-
- |_ abdennour/grails:x.y.z
-
- |_ abdennour/golang-vscode:x.y.z-dind-x.y.z-alpine-x.y
-
- |_ abdennour/helmfile:vx.y.z-aws-x.y.z
-
- |_ abdennour/helm:x.y.z
- |_ abdennour/helm:x.y.z-awsx.y.z-kubectlvx.y.z
-
- |_ abdennour/jq:alpine-3.12
-
- |_ abdennour/kind-cli:vx.y.z
-
- |_ abdennour/kubectl:vx.y.z
- |_ abdennour/kubectl:vx.y.z-awsx.y.z
- |_ abdennour/kubectl:vx.y.z-awsx.y.z-helm-x.y.z
-
- |_ abdennour/nexus3-cli:vx.y.z
-
- |_ abdennour/rhel:x
- |_ abdennour/rhel:8-ssh
-
- |_ abdennour/terraform:x.y.z-helmx.y.z
- |_ abdennour/terraform:x.y.z-aws-iam-authenticator
- |_ abdennour/terraform:x.y.z-helmx.y.z-aws-iam-auth
-
- |_ abdennour/ubuntu-desktop:x.y.z-devtools-
abdennour/ansible:x.y.z (Full for DEV and CI)
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
docker run -it --rm \
-v /var/run/docker.sock:/var/run/docker.sock \
-v "$(pwd):/playbook" \
-w /playbook
-e DOCKER_GID_ON_HOST=$(cat /etc/group | grep docker: | cut -d: -f3) \
-e SSH_PRIVATE_KEY_B64=$(cat ~/.ssh/id_rsa | base64) \
abdennour/ansible:${ANSIBLE_VERSION} bash;
### Then interactively
ansible --version
## SSH client is available
cat ~/.ssh/id_rsa.pub # autogenerated if you don't pass the ENV VAR SSH_PRIVATE_KEY_B64
ssh x.y.z.y;
## Docker in Docker is available also
ansible mycontainerIdOrName -m ping -e "ansible_connection=docker"
## Molecule is also available
molecule test
## Ansible Lint is also available
ansible-lint *.yml
abdennour/ansible:x.y.z-slim (Light)
## A light ansible container
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
docker run -it --rm \
abdennour/ansible:${ANSIBLE_VERSION}-slim \
ansible --version
abdennour/aws:x.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
docker run --rm \
-v "${HOME}/.aws:/root/.aws" \
abdennour/aws:${AWS_CLI_VERSION} s3 ls
abdennour/docker:x.y-dind
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
docker run --rm \
-v /var/run/docker.sock:/var/run/docker.sock \
abdennour/docker:${DOCKER_VERSION}-dind docker images
abdennour/docker:x.y-dind-awsx.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# docker + aws
docker run --name docker-aws --rm -d \
-v /var/run/docker.sock:/var/run/docker.sock \
-v "${HOME}/.aws:/root/.aws" \
abdennour/docker:${DOCKER_VERSION}-aws${AWS_CLI_VERSION} tail -f /dev/null
## then interactively
docker exec docker-aws sh -c '$(aws ecr get-login --no-include-email --region us-west-1)';
docker exec docker-aws docker pull xxxxxxxx.dkr.ecr.us-west-1.amazonaws.com
abdennour/dotenv-to-js-object
- It's a whole software.
- Check its README.md there.
abdennour/eksctl:x.y.z-aws-x.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
docker run --rm \
-v "${HOME}/.aws:/root/.aws" \
-e AWS_PROFILE=my-aws-profile \
abdennour/eksctl:${EKSCTL_VERSION}-aws-${AWS_CLI_VERSION} create cluster ...
abdennour/eksctl:x.y.z-aws-x.y.z-kubectl-x.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
docker run --rm \
-v "${HOME}/.aws:/root/.aws" \
-v "${HOME}/.kube:/kube" \
-e KUBECONFIG=/kube/config \
-e AWS_PROFILE=my-aws-profile \
abdennour/eksctl:${EKSCTL_VERSION}-aws-${AWS_CLI_VERSION} \
get nodes
# basic example
cat file_includes_env_vars.txt | docker run -i --rm abdennour/envsubst
# or
docker run -i --rm abdennour/envsubst < file_includes_env_vars.txt
# assume "Hello $NAME. My home is ${HOME}" is the content of "file.txt"
docker run -i --rm -e NAME=Abdou abdennour/envsubst < file.txt
# the above command outputs :
# "Hello Abdou. My home is /root"
abdennour/git:x.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
alias git='docker run -it -v $(pwd):/code -w /code abdennour/git:${GIT_VERSION}'
git --version
git remote -v
git log
# so on
abdennour/gitea-cli:x.y.z-alpine
docker run -it --rm \
-e GITEA_SERVER_URL=http://gitea:3000 \
-e GITEA_SERVER_USER=gitea_admin \
-e GITEA_SERVER_PASSWORD=**** \
abdennour/gitea-cli:0.9.0-alpine \
organizations list
abdennour/grails:x.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# assuming current directory is a grails project
docker run -it \
-v $(pwd):/app \
-p 8080:8080 \
abdennour/grails:${GRAILS_VERSION} run-app
# so on
abdennour/golang-vscode:x.y.z-dind-x.y.z-alpine-x.y
- THIS IS ARCHIVED Please check https://github.com/qdm12/godevcontainer
TODO
abdennour/helmfile:vx.y.z-aws-x.y.z
abdennour/helm:x.y.z
function helm
{
# setup
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
docker run --rm \
-e HELM_REPO_tn=https://charts.kubernetes.tn \
-e HELM_REPO_stable=https://kubernetes-charts.storage.googleapis.com/ \
-v $(pwd):/code \
-w /code \
abdennour/helm:${HELM_VERSION} $@
}
helm repo update
helm template r1 stable/chart1
# example with kubeconfig
docker run .. \
-v ${HOME}/.kube:/root/.kube:ro \
# -e KUBECONFIG=/root/.kube/config \
abdennour/helm:${HELM_VERSION} install r2 tn/chart1
abdennour/helm:x.y.z-awsx.y.z-kubectlvx.y.z
Same doc as the other one, but you can bind also :
- volume:
${HOME}/.awss:/root/.aws:ro
- env var:
AWS_RPOFILE
,AWS_*s
s ,...
Indeed, this image contains also aws
and kubectl
CLIs.
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs)
docker run -it --rm \
-e HELM_REPO_stable=https://kubernetes-charts.storage.googleapis.com/ \
-v helm-config:/root/.config/helm \
-v helm-cache:/root/.cache/helm \
-v ~/.kube:/kube \
-e KUBECONFIG: /kube/config \
abdennour/helm:${HELM_VERSION}-aws${AWS_CLI_VERSION}-kubectl${KUBECTL_VERSION}
abdennour/kubectl:vx.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
docker run --rm \
-v "${HOME}/.kube:/kube" \
-e KUBECONFIG=/kube/config \
abdennour/kubectl:${KUBECTL_VERSION} get pods
abdennour/jq:alpine-3.12
```sh
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
echo "{\"name\": \"abdennour\"}" > /tmp/file.json
docker run --rm -it -w /data \
-v /tmp:/data abdennour/jq:alpine-${ALPINE_VERSION} \
-r '.name' /data/file.json
# it must show "abdennour" in stdout
abdennour/kind-cli:vx.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# basic example
alias kind='docker run --rm \
-v /var/run/docker.sock:/var/run/docker.sock \
-v "${HOME}/.kube:/kube" \
abdennour/kind-cli:${KIND_CLI_VERSION}'
cat <<EOF | kind create cluster --config -
kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3
nodes:
- role: control-plane
- role: worker
- role: worker
EOF
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
abdennour/kubectl:vx.y.z-awsx.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
# kubectl + aws-iam-authenticator for EKS
docker run --rm \
-v "${HOME}/.aws:/root/.aws" \
-e AWS_PROFILE=my-aws-profile \
-e AWS_DEFAULT_REGION=md-west-1 \
-e CLUSTER_NAME=mycluster \
abdennour/kubectl:${KUBECTL_VERSION}-aws${AWS_CLI_VERSION} get pods
This is an image for https://pypi.org/project/nexus3-cli/
abdennour/nexus3-cli:vx.y.z
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
alias nexus3=' docker run --rm -t -v nexus3-cli:/root abdennour/nexus3-cli:v${NEXUS3CLI_VERSION}'
# login
nexus3 login
# Nexus OSS URL (http://localhost:8081):
# Nexus admin username (admin):
# Nexus admin password (admin123):
# Verify server certificate (True):
# Configuration saved to /root/.nexus-cli
#----
More about available commands: https://pypi.org/project/nexus3-cli/
TODO
TODO
Ready to be used as Ansible Managed Host.
abdennour/rhel:x
docker run -it --rm \
--name rhel8 \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-p 6666:80
--privileged \
abdennour/rhel:8
# then
docker exec -it rhel8 yum install nginx -y
docker exec -it rhel8 systemctl start nginx
# In the host navigate to : http://localhost:6666
abdennour/rhel:x-ssh
- RHEL + SSHD is up
docker run --rm --name rhel-vm \
-p 2525:22 \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
--privileged \
-d abdennour/rhel:8-ssh
# create user "myuser"
docker exec -it rhel-vm useradd myuser
# set password for the user
docker exec -it rhel-vm sh -c 'echo "Pass1234"| passwd myuser --stdin'
# Now SSH to container without exec:
ssh myuser@localhost -p 2525
##> Put Password: Pass1234
πππ You ssh ! DONE
- RHEL + SSHD is up + Bootstrap Passwordless sudoer user
docker run --rm --name rhel-vm \
-p 2525:22 \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
-v $(pwd)/keys:/data/ssh-key:rw \
--privileged \
-e BOOTSTRAP_USER=myuser \
-e BOOTSTRAP_USER_CAPS=sudoer,ssh-key \
-d abdennour/rhel:8-ssh
# now try to ssh with the bootstrap user
# using the auto-generated private-key
ssh -i $(pwd)/keys/myuser myuser@localhost -p 2525
# it should work
# myuser@<container-id>: $ ....
abdennour/terraform:x.y.z-helmx.y.z
- if you are using Terraform Helm provider, this docker container image for you:
function terraform
{
# get latest versions from .env
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
docker run --rm \
-e HELM_REPO_tn=https://charts.kubernetes.tn \
-e HELM_REPO_stable=https://kubernetes-charts.storage.googleapis.com/ \
-v $(pwd):/code \
-w /code \
abdennour/terraform:${TERRAFORM_VERSION}-helm${HELM_VERSION} $@
}
#
terraform apply
abdennour/terraform:x.y.z-aws-iam-authenticator
- terraform image has aws-iam-authenticator binary in the PATH without extra configuration.
abdennour/terraform:x.y.z-helmx.y.z-aws-iam-auth
- same like the first (abdennour/terraform:x.y.z-helmx.y.z) but it contains also the aws-iam-authenticator binary in the PATH.
abdennour/ubuntu-desktop:x.y.z-devtools-
export $(curl -SsL https://raw.githubusercontent.com/abdennour/dockerfiles/master/.env | xargs);
docker run -it --rm -name daemon -d \
-v vol-certs:/certs \
abdennour/docker:${DOCKER_VERSION}-dind
docker run -it --rm -d --name desktop \
-v vol-certs:/certs \
-e VNC_RESOLUTION=1920x1080 \
# -e VNC_VIEW_ONLY=true \
-e DOCKER_HOST=tcp://daemon:2376 \
-e DOCKER_TLS_VERIFY=1 \
-e DOCKER_TLS_CERTDIR=/certs \
-e DOCKER_CERT_PATH=/certs/client \
-e EXTRA_PATH=/home/user/bin \
-e SUDO_FORCE_REMOVE=no \
-p 9889:6901 \
--shm-size=256m \
abdennour/ubuntu-desktop
# Now visit http://localhost:9889
If you ask how can i upgrade these versions. Or generally, how can i contribute. then, check CONTRIBUTING.md
- Abdennour Toumi < http://kubernetes.tn | ceo@kubernetes.tn >
This software is licensed under MIT license.