-
Notifications
You must be signed in to change notification settings - Fork 983
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
enable openldap uid/gid to be specified at runtime #336
enable openldap uid/gid to be specified at runtime #336
Conversation
Hello, Thanks |
Hi Bertrand,
I would like to specify the UID/GID as env vars at time of container start
and not build time.
The motivation is the following:
I have a environment where the docker stack is defined in an ansible play.
The docker user is also specified in the ansible playbook.
The user uid/gid is then passed in the docker-compose at the time of
running the playbook to create the docker stack.
E.g., The following docker-compose.yml template (jinja2 template) is used
by ansible to specify the inputs at the time the docker admin stack
playbook is ran:
…---
#Requirement: Set environmental variables: USERDIR, PUID, PGID,
MYSQL_ROOT_PASSWORD, and TZ as explained in the reference.
version: "3.6"
networks:
default:
driver: bridge
net:
external: false
attachable: true
traefik-public:
external: true
services:
######### FRONTENDS ##########
# Traefik Reverse Proxy
traefik:
hostname: traefik
image: traefik:latest
container_name: traefik
restart: always
domainname: {{ external_domainname }}
networks:
- default
- traefik-public
ports:
- "80:80"
- "443:443"
# - "XXXX:8080"
environment:
- PUID={{ docker_user_uid }}
- PGID={{ docker_user_gid }}
- TZ={{ timezone }}
- CLOUDFLARE_EMAIL={{ cloudflare_email }}
- CLOUDFLARE_API_KEY={{ cloudflare_apikey }}
- CF_API_EMAIL={{ cloudflare_email }}
- CF_API_KEY={{ cloudflare_apikey }}
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- {{ docker_container_dir }}/traefik:/etc/traefik
- {{ docker_container_dir }}/shared:/shared
- {{ docker_container_dir }}/traefik/certs:/certs
labels:
- "traefik.enable=true"
- "traefik.backend=traefik"
- "traefik.frontend.rule=Host:traefik.{{ internal_domainname }}"
- "traefik.port=8080"
- "traefik.docker.network=traefik-public"
- "traefik.frontend.headers.SSLRedirect=true"
- "traefik.frontend.headers.STSSeconds=315360000"
- "traefik.frontend.headers.browserXSSFilter=true"
- "traefik.frontend.headers.contentTypeNosniff=true"
- "traefik.frontend.headers.forceSTSHeader=true"
- "traefik.frontend.headers.SSLHost={{ external_domainname }}"
- "traefik.frontend.headers.STSIncludeSubdomains=true"
- "traefik.frontend.headers.STSPreload=true"
- "traefik.frontend.headers.frameDeny=true"
#Portainer - WebUI for Containers
portainer:
image: portainer/portainer
container_name: portainer
restart: always
command: -H unix:///var/run/docker.sock
environment:
- TZ={{ timezone }}
networks:
- traefik-public
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- {{ docker_container_dir }}/portainer/data:/data
- {{ docker_container_dir }}/shared:/shared
labels:
- "traefik.enable=true"
- "traefik.backend=portainer"
- "traefik.frontend.rule=Host:portainer.{{ internal_domainname }}"
- "traefik.port=9000"
- "traefik.docker.network=traefik-public"
- "traefik.frontend.headers.SSLRedirect=true"
- "traefik.frontend.headers.STSSeconds=315360000"
- "traefik.frontend.headers.browserXSSFilter=true"
- "traefik.frontend.headers.contentTypeNosniff=true"
- "traefik.frontend.headers.forceSTSHeader=true"
- "traefik.frontend.headers.SSLHost={{ external_domainname }}"
- "traefik.frontend.headers.STSIncludeSubdomains=true"
- "traefik.frontend.headers.STSPreload=true"
- "traefik.frontend.headers.frameDeny=true"
## must have --copy-service for runtime bootstrap to work
## ref: #163
openldap:
container_name: "openldap"
# image: {{ docker_registry }}/openldap:latest
image: {{ openldap_image }}
command: --copy-service --loglevel debug
ports:
- 389:389
restart: "unless-stopped"
networks:
- net
volumes:
- {{ docker_container_dir }}/openldap/slapd/database:/var/lib/ldap
- {{ docker_container_dir }}/openldap/slapd/config:/etc/ldap/slapd.d
- {{ docker_container_dir
}}/openldap/slapd/certs:/container/service/slapd/assets/certs
- {{ docker_container_dir
}}/openldap/ldif:/container/service/slapd/assets/config/bootstrap/ldif/custom
## custom schema
## ref: #177
- {{ docker_container_dir
}}/openldap/schema/sudo.schema:/container/service/slapd/assets/config/bootstrap/schema/sudo.schema
environment:
# PUID: {{ docker_user_uid }}
# PGID: {{ docker_user_gid }}
LDAP_OPENLDAP_UID: {{ docker_user_uid }}
LDAP_OPENLDAP_GID: {{ docker_user_gid }}
LDAP_ORGANISATION: "{{ ldap_internal.ldap_organisation }}"
LDAP_DOMAIN: "{{ ldap_internal.ldap_domain }}"
LDAP_BASE_DN: "{{ ldap_internal.ldap_base_dn }}"
SLAPD_ADDITIONAL_MODULES: "memberof"
LDAP_ADMIN_PASSWORD: "{{ ldap_internal.ldap_admin_password }}"
LDAP_READONLY_USER: "{{ ldap_internal.ldap_readonly_user }}"
LDAP_READONLY_USER_USERNAME: "{{
ldap_internal.ldap_readonly_user_username }}"
LDAP_READONLY_USER_PASSWORD: "{{
ldap_internal.ldap_readonly_user_password }}"
## ref: https://github.com/alterrebe/docker-mail-relay
postfix:
container_name: "postfix"
image: {{ postfix_image }}
restart: "unless-stopped"
networks:
- net
healthcheck:
# test: [ "CMD", "sh", "-c", "netstat -an | fgrep 587 | fgrep -q LISTEN" ]
test: [ "CMD", "sh", "-c", "netstat -an | fgrep 25 | fgrep -q LISTEN" ]
interval: 10s
timeout: 5s
start_period: 10s
retries: 2
ports:
- "1025:25"
- "1587:587"
environment:
TZ: {{ timezone }}
# INBOUND_DEBUGGING: 1
RELAY_HOST_NAME: {{ inventory_hostname }}
EXT_RELAY_HOST: {{ smtp_relay_host }}
EXT_RELAY_PORT: {{ smtp_relay_port }}
SMTP_LOGIN: {{ smtp_relay_username }}
SMTP_PASSWORD: {{ smtp_relay_password }}
ACCEPTED_NETWORKS: {{ smtp_relay_accepted_networks }}
# ALLOWED_SENDER_DOMAINS: {{ postfix_allowed_sender_domains }}
ROOT_ALIAS_ADDRESS: {{ root_alias_email }}
MAILMAN_TRANSPORT: {{ mailman_transport }}
RELAY_EXCLUDES: {{ smtp_relay_excludes }}
cobbler:
container_name: "cobbler"
# image: {{ docker_registry }}/cobbler:latest
image: {{ cobbler_image }}
privileged: true
ports:
- 69:69
- 9080:80
- 9443:443
- 25151:25151
restart: "unless-stopped"
networks:
# - internal
- net
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
- {{ docker_container_dir }}/cobbler:/etc/cobbler
- {{ docker_container_dir
}}/cobbler/var/www/cobbler/images:/var/www/cobbler/images
- {{ docker_container_dir
}}/cobbler/var/www/cobbler/ks_mirror:/var/www/cobbler/ks_mirror
- {{ docker_container_dir
}}/cobbler/var/www/cobbler/links:/var/www/cobbler/links
- {{ docker_container_dir
}}/cobbler/var/lib/cobbler/config:/var/lib/cobbler/config
- {{ docker_container_dir }}/cobbler/var/lib/tftpboot:/var/lib/tftpboot
- {{ docker_container_dir }}/cobbler/dist/centos:/mnt:ro
environment:
PUID: {{ docker_user_uid }}
PGID: {{ docker_user_gid }}
DEFAULT_ROOT_PASSWD: cobbler
HOST_IP_ADDR: {{
hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}
HOST_HTTP_PORT: 80
# HOST_HTTP_PORT: 9080
# HOST_HTTP_PORT: 9443
COBBLER_WEB_USER: cobbler
COBBLER_WEB_PASSWD: cobbler
COBBLER_WEB_REALM: Cobbler
COBBLER_LANG: en_US
COBBLER_KEYBOARD: us
COBBLER_TZ: {{ timezone }}
TZ: {{ timezone }}
The ansible play would look like:
---
# BLOCK: Create docker_admin_server group and user, enable sudo access
- block:
- name: create {{ docker_user_group }} group
group: name={{ docker_user_group }} system=yes state=present
- name: Create docker user to run admin stack containers
user:
name: "{{ docker_user_username }}"
password: "{{ docker_user_password | password_hash('sha512') }}"
group: "{{ docker_user_group }}"
update_password: on_create
state: present
createhome: yes
shell: "{{ docker_user_shell }}"
groups: adm,cdrom,dip,plugdev,docker
append: yes
register: docker_user_verify_user
#- debug:
# var: docker_user_verify_user
# verbosity: 1
- name: Set Fact "docker_user_uid"
set_fact: docker_user_uid={{ docker_user_verify_user.uid }}
- name: Set Fact "docker_user_gid"
set_fact: docker_user_gid={{ docker_user_verify_user.group }}
- name: Set Fact "docker_user_uid"
set_fact: docker_user_home={{ docker_user_verify_user.home }}
- name: Enable sudo for admin user
user:
name={{ docker_user_username }}
groups=sudo
append=yes
when: docker_user_sudo_access | bool
# Create admin_server directories and configs
- name: Ensure admin dirs exist
file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner | default( docker_user_username ) }}"
group: "{{ item.owner | default( docker_user_group ) }}"
mode: "{{ item.mode | default(omit) }}"
recurse: "{{ item.recurse | default( omit ) }}"
with_items:
- { path: "{{ script_dir }}", owner: "root", group: "root", recurse: yes }
- { path: "{{ fwbackups_dir }}", owner: "root", group: "root",
recurse: yes }
- { path: "{{ backups_dir }}", owner: "root", group: "root", recurse: yes }
- { path: "{{ backups_dir }}/records/daily", owner: "root", group: "root" }
- { path: "{{ backups_dir }}/records/monthly", owner: "root",
group: "root" }
- { path: "{{ backups_log_dir }}", owner: "root", group: "root" }
- { path: "{{ docker_container_dir }}/traefik" }
- { path: "{{ docker_container_dir }}/traefik/rules" }
- { path: "{{ docker_container_dir }}/traefik/certs" }
- { path: "{{ docker_container_dir }}/traefik/acme" }
- { path: "{{ docker_container_dir }}/shared" }
- { path: "{{ docker_container_dir }}/portainer" }
- { path: "{{ docker_container_dir }}/postfix" }
- { path: "{{ docker_container_dir }}/openldap" }
- { path: "{{ docker_container_dir }}/openldap/certs" }
- { path: "{{ docker_container_dir }}/openldap/ldif" }
- { path: "{{ docker_container_dir }}/openldap/schema" }
- { path: "{{ docker_container_dir }}/openldap/slapd" }
- { path: "{{ docker_container_dir }}/openldap/slapd/database" }
- { path: "{{ docker_container_dir }}/openldap/slapd/config" }
- { path: "{{ docker_container_dir }}/cobbler" }
- { path: "{{ docker_container_dir }}/cobbler/etc" }
- { path: "{{ docker_container_dir }}/cobbler/var/www/cobbler/images" }
- { path: "{{ docker_container_dir }}/cobbler/var/www/cobbler/ks_mirror" }
- { path: "{{ docker_container_dir }}/cobbler/var/www/cobbler/links" }
- { path: "{{ docker_container_dir }}/cobbler/var/lib/cobbler/config" }
- { path: "{{ docker_container_dir }}/cobbler/var/lib/tftpboot" }
- { path: "{{ docker_container_dir }}/cobbler/dist/centos" }
- name: Install admin_server app config settings
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode | default('0664') }}"
owner: "{{ item.owner | default( docker_user_username ) }}"
group: "{{ item.owner | default( docker_user_group ) }}"
with_items:
- {src: 'ldap/sudo.schema', dest: "{{ docker_container_dir
}}/openldap/schema/sudo.schema"}
- {src: 'ldap/johnson.local.ldif', dest: "{{ docker_container_dir
}}/openldap/ldif/bootstrap.ldif"}
- {src: 'ldap/samba_indices.ldif', dest: "{{ docker_container_dir
}}/openldap/ldif/samba_indices.ldif"}
- {src: 'scripts/backups/job-backup-incremental.sh', dest: "{{
script_dir }}", mode: "0770", owner: "root", group: "root"}
- {src: 'scripts/backups/rsync-incremental-backup-local', dest:
"{{ script_dir }}", mode: "0770", owner: "root", group: "root"}
- {src: 'scripts/backups/fwbackup.sh', dest: "{{ script_dir }}",
mode: "0770", owner: "root", group: "root"}
- {src: 'scripts/docker/docker-utils.sh', dest: "{{
docker_container_dir }}", mode: "0770"}
- {src: 'scripts/reset-openldap.sh', dest: "{{
docker_container_dir }}", mode: "0770"}
- name: Install admin_server docker config settings
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode | default('0664') }}"
owner: "{{ docker_user_username }}"
group: "{{ docker_user_group }}"
with_items:
- {src: 'config-traefik.toml.j2', dest: "{{ docker_container_dir
}}/traefik/traefik.toml"}
- {src: 'traefik/rules/nginx.toml.j2', dest: "{{
docker_container_dir }}/traefik/rules/nginx.toml"}
- {src: 'postfix/master.conf.j2', dest: "{{ docker_container_dir
}}/postfix/master.cf"}
- {src: 'postfix/main.conf.j2', dest: "{{ docker_container_dir
}}/postfix/main.cf"}
- {src: 'config-smbcredentials.conf.j2', dest:
"/root/.smbcredentials", mode: "0600" }
- {src: 'config-fwbackup.conf.j2', dest: "/root/.fwbackup.cfg",
mode: "0600" }
- {src: 'compose.env.j2', dest: "{{ docker_container_dir }}/.env" }
- {src: 'docker-compose.yml.j2', dest: "{{ docker_container_dir
}}/docker-compose.yml" }
- name: Ensure admin mount dirs exist
file:
path: "{{ item.name }}"
state: directory
with_items: "{{ admin_mounts }}"
- name: Add admin mounts to fstab
mount:
name: "{{ item.name }}"
src: "{{ item.src }}"
fstype: "{{ item.fstype }}"
opts: "{{ item.options }}"
state: mounted
with_items: "{{ admin_mounts }}"
## create acme.json for letsencrypt
- name: ensure acme.json file exists
copy:
content: ""
dest: "{{ docker_container_dir }}/traefik/acme/acme.json"
force: "{{ overwrite_configs }}"
owner: "{{ docker_user_username }}"
group: "{{ docker_user_group }}"
mode: 0600
## for some reason copy above does not enforce correct chmod perms
## set correct perms on acme.json
- name: ensure acme.json has correct perms
file:
path: "{{ docker_container_dir }}/traefik/acme/acme.json"
owner: "{{ docker_user_username }}"
group: "{{ docker_user_group }}"
mode: 0600
# BLOCK: run docker-compose to setup admin_server services
- block:
- name: Init a new swarm with default parameters
docker_swarm:
state: present
- name: Create docker networks
docker_network:
name: "{{ item }}"
with_items:
# - internal
- net
- traefik-public
## run docker compose
## ref: https://stackoverflow.com/questions/44962282/how-to-write-an-ansible-playbook-with-docker-compose
- name: run the services defined in docker-compose.yml
docker_compose:
project_src: "{{ docker_container_dir }}"
remove_orphans: "{{ docker_remove_orphans }}"
# BLOCK: Configure firewalld
- block:
- name: Installing Firewall Packages
package:
name: "{{ firewalld_packages }}"
state: present
# state: latest
# update_cache: yes
- name: Allow admin services through the firewall
firewalld:
service: "{{ item }}"
permanent: yes
state: enabled
immediate: yes
with_items:
"{{ exposed_services }}"
notify:
- reload firewalld
- name: Allow admin_server ports through the firewall
firewalld:
port: "{{ item }}"
zone: internal
permanent: true
state: enabled
immediate: yes
with_items:
"{{ exposed_ports }}"
notify:
- reload firewalld
## ref: https://forums.docker.com/t/no-route-to-host-network-request-from-container-to-host-ip-port-published-from-other-container/39063/5
- name: Add docker networks to firewall
firewalld:
source: 172.0.0.0/8
permanent: true
zone: internal
state: enabled
immediate: yes
notify:
- reload firewalld
tags:
- firewall
I hope this helps in understanding the motivation.
On Thu, Aug 1, 2019 at 8:36 AM Bertrand ***@***.***> wrote:
Hello,
thanks for the PR, can you describe the problem solved by this ?
Thanks
—
You are receiving this because you authored the thread.
Reply to this email directly, view it on GitHub
<#336?email_source=notifications&email_token=AAP3JD3M36WM7YHDCBZ4SATQCLKE3A5CNFSM4IEQ3GF2YY3PNVWWK3TUL52HS4DFVREXG43VMVBW63LNMVXHJKTDN5WW2ZLOORPWSZGOD3KODEI#issuecomment-517267857>,
or mute the thread
<https://github.com/notifications/unsubscribe-auth/AAP3JD2FK7BKKY23YWOZDFTQCLKE3ANCNFSM4IEQ3GFQ>
.
|
This is very usual practise - e.g. all LinuxServer/ images have this setting, and others. Usually this is ony needed when you want to use host-mounts, not named volumes, and thus need to align with the host uid/gid for other tooling or e.g. shared volume / smb / nfs permissions. |
Thanks for you feedbacks. This sounds good to me.
May the user adjusments done in |
Hi Bertrand, I wasn't sure where the startup.sh was ultimately getting called from and did not want to adversely impact any of the existing code. In order to minimize the impact, I looked for the least invasive approach which seemed to me the approach taken by the jenkins docker implementation for the related feature of defining the user/group id at startup used here: https://github.com/sudo-bmitch/jenkins-docker/blob/master/entrypoint.sh If the startup.sh is simply getting kicked off at container start, then I imagine it should work fine with no adverse impact. I could rework the logic into the startup.sh and test for the specific configuration I am using. I would imagine testing for all of the existing configurations would be required to make sure that is the case. Is there test automation already setup to test PR changes to run/test each of the configurations available in order to determine if the relevant PR does not break anything? If so, and if the test automation is kicked off automatically upon submitting the PR, I can rework the logic into the startup.sh as mentioned above and push the changes through this PR. Just let me know if this approach is desired. -Lee |
Thanks Lee, startup.sh is called when the container start. i guess group and user adjustments would fit great just before
tests are run via travis, but you can run them locally with Make sure to have bats installed before : |
Hi Bertrand, As requested, I moved the logic from entrypoint.sh into the startup.sh and tested successfully:
|
@@ -10,8 +10,8 @@ ARG PQCHECKER_MD5=c005ce596e97d13e39485e711dcbc7e1 | |||
|
|||
# Add openldap user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added | |||
# If explicit uid or gid is given, use it. | |||
RUN if [ -z "${LDAP_OPENLDAP_GID}" ]; then groupadd -r openldap; else groupadd -r -g ${LDAP_OPENLDAP_GID} openldap; fi \ | |||
&& if [ -z "${LDAP_OPENLDAP_UID}" ]; then useradd -r -g openldap openldap; else useradd -r -g openldap -u ${LDAP_OPENLDAP_UID} openldap; fi | |||
RUN if [ -z "${LDAP_OPENLDAP_GID}" ]; then groupadd -g 911 -r openldap; else groupadd -r -g ${LDAP_OPENLDAP_GID} openldap; fi \ |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is not necessary since the startup.sh will check to see if the ID matches the defined or default.
If the id did not match, the startup.sh will update the id and make the necessary chown updates.
This saves time in the case where the default is to be expected/used in the startup.sh runtime since files impacted by the chown operations should be lesser and performance enhanced.
RUN if [ -z "${LDAP_OPENLDAP_GID}" ]; then groupadd -r openldap; else groupadd -r -g ${LDAP_OPENLDAP_GID} openldap; fi \ | ||
&& if [ -z "${LDAP_OPENLDAP_UID}" ]; then useradd -r -g openldap openldap; else useradd -r -g openldap -u ${LDAP_OPENLDAP_UID} openldap; fi | ||
RUN if [ -z "${LDAP_OPENLDAP_GID}" ]; then groupadd -g 911 -r openldap; else groupadd -r -g ${LDAP_OPENLDAP_GID} openldap; fi \ | ||
&& if [ -z "${LDAP_OPENLDAP_UID}" ]; then useradd -u 911 -r -g openldap openldap; else useradd -r -g openldap -u ${LDAP_OPENLDAP_UID} openldap; fi |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is not necessary since the startup.sh will check to see if the ID matches the defined or default.
If the id did not match, the startup.sh will update the id and make the necessary chown updates.
This saves time in the case where the default is to be expected/used in the startup.sh runtime since files impacted by the chown operations should be lesser and performance enhanced.
HI! Any news on this PR? It looks promising! (missing documentation though) |
Hi - I just added a brief description for the new env vars in the environments section of the doc here: |
Thanks a lot ! |
No description provided.