Skip to content

Commit d8a8274

Browse files
committed
Use almalinux as the Docker base image (#80524)
Closes #76681. Our approach to using `scratch` for building Docker images has caused problems at Docker Hub. Fix this situation by removing the whole process of using scratch and instead bases the default distribution on `almalinux:8.4-minimal`. Alma Linux is binary-compatible with RHEL, and therefore very similar to UBI.
1 parent 8380123 commit d8a8274

File tree

6 files changed

+53
-238
lines changed

6 files changed

+53
-238
lines changed

build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/DockerBase.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
* This class models the different Docker base images that are used to build Docker distributions of Elasticsearch.
1313
*/
1414
public enum DockerBase {
15-
CENTOS("centos:8", ""),
15+
DEFAULT("almalinux:8.4-minimal", ""),
1616

1717
// "latest" here is intentional, since the image name specifies "8"
1818
UBI("docker.elastic.co/ubi8/ubi-minimal:latest", "-ubi8"),
@@ -21,7 +21,7 @@ public enum DockerBase {
2121
IRON_BANK("${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG}", "-ironbank"),
2222

2323
// Base image with extras for Cloud
24-
CLOUD("centos:8", "-cloud"),
24+
CLOUD("almalinux:8.4-minimal", "-cloud"),
2525

2626
// Based on CLOUD above, with more extras. We don't set a base image because
2727
// we programmatically extend from the Cloud image.

distribution/docker/build.gradle

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ ext.expansions = { Architecture architecture, DockerBase base ->
9696
'config_dir' : base == DockerBase.IRON_BANK ? 'scripts' : 'config',
9797
'git_revision' : BuildParams.gitRevision,
9898
'license' : base == DockerBase.IRON_BANK ? 'Elastic License 2.0' : 'Elastic-License-2.0',
99-
'package_manager' : base == DockerBase.UBI ? 'microdnf' : 'yum',
99+
'package_manager' : base == DockerBase.IRON_BANK ? 'yum' : 'microdnf',
100100
'docker_base' : base.name().toLowerCase(),
101101
'version' : VersionProperties.elasticsearch,
102102
'major_minor_version': "${major}.${minor}",
@@ -121,7 +121,7 @@ private static String toCamel(String input) {
121121
private static String taskName(String prefix, Architecture architecture, DockerBase base, String suffix) {
122122
return prefix +
123123
(architecture == Architecture.AARCH64 ? 'Aarch64' : '') +
124-
(base == DockerBase.CENTOS ? "" : toCamel(base.name())) +
124+
(base == DockerBase.DEFAULT ? "" : toCamel(base.name())) +
125125
suffix
126126
}
127127

@@ -361,7 +361,7 @@ void addBuildDockerImageTask(Architecture architecture, DockerBase base) {
361361

362362
baseImages = [baseImage]
363363
buildArgs = buildArgsMap
364-
} else if (base == DockerBase.CENTOS || base == DockerBase.CLOUD) {
364+
} else if (base == DockerBase.DEFAULT || base == DockerBase.CLOUD) {
365365
baseImages = ['alpine:3.13', base.image]
366366
} else {
367367
baseImages = [base.image]
@@ -443,7 +443,7 @@ subprojects { Project subProject ->
443443
apply plugin: 'distribution'
444444

445445
final Architecture architecture = subProject.name.contains('aarch64-') ? Architecture.AARCH64 : Architecture.X64
446-
DockerBase base = DockerBase.CENTOS
446+
DockerBase base = DockerBase.DEFAULT
447447
if (subProject.name.contains('ubi-')) {
448448
base = DockerBase.UBI
449449
} else if (subProject.name.contains('ironbank-')) {

distribution/docker/src/docker/Dockerfile

Lines changed: 12 additions & 187 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
add as many newlines here as necessary to improve legibility.
2020
*/ %>
2121
22-
<% if (docker_base == "ubi") { %>
22+
<% if (docker_base == 'default' || docker_base == "ubi") { %>
2323
################################################################################
2424
# Build stage 0 `builder`:
2525
# Extract Elasticsearch artifact
@@ -66,158 +66,6 @@ FROM ${base_image} AS builder
6666
COPY tini /bin/tini
6767
RUN chmod 0555 /bin/tini
6868
69-
<% } else { %>
70-
71-
<% /* CentOS builds are actaully a custom base image with a minimal set of dependencies */ %>
72-
73-
################################################################################
74-
# Stage 1. Build curl statically. Installing it from RPM on CentOS pulls in too
75-
# many dependencies.
76-
################################################################################
77-
FROM alpine:3.13 AS curl
78-
79-
ENV VERSION 7.71.0
80-
ENV TARBALL_URL https://curl.haxx.se/download/curl-\${VERSION}.tar.xz
81-
ENV TARBALL_PATH curl-\${VERSION}.tar.xz
82-
83-
# Install dependencies
84-
RUN <%= retry.loop('apk', 'apk add gnupg gcc make musl-dev openssl-dev openssl-libs-static file') %>
85-
86-
RUN mkdir /work
87-
WORKDIR /work
88-
89-
# Fetch curl sources and files for validation. Note that alpine's `wget` doesn't have retry options.
90-
RUN function retry_wget() { \\
91-
local URL="\$1" ; \\
92-
local DEST="\$2" ; \\
93-
<%= retry.loop('wget', 'wget "\$URL\" -O "\$DEST"', 6, 'return') %> ; \\
94-
} ; \\
95-
retry_wget "https://daniel.haxx.se/mykey.asc" "curl-gpg.pub" && \\
96-
retry_wget "\${TARBALL_URL}.asc" "\${TARBALL_PATH}.asc" && \\
97-
retry_wget "\${TARBALL_URL}" "\${TARBALL_PATH}"
98-
99-
# Validate source
100-
RUN gpg --import --always-trust "curl-gpg.pub" && \\
101-
gpg --verify "\${TARBALL_PATH}.asc" "\${TARBALL_PATH}"
102-
103-
# Unpack and build
104-
RUN set -e ; \\
105-
tar xfJ "\${TARBALL_PATH}" ; \\
106-
cd "curl-\${VERSION}" ; \\
107-
if ! ./configure --disable-shared --with-ca-fallback --with-ca-bundle=/etc/pki/tls/certs/ca-bundle.crt ; then \\
108-
[[ -e config.log ]] && cat config.log ; \\
109-
exit 1 ; \\
110-
fi ; \\
111-
make curl_LDFLAGS="-all-static" ; \\
112-
cp src/curl /work/curl ; \\
113-
strip /work/curl
114-
115-
################################################################################
116-
# Step 2. Create a minimal root filesystem directory. This will form the basis
117-
# for our image.
118-
################################################################################
119-
FROM ${base_image} AS rootfs
120-
121-
ENV TINI_VERSION 0.19.0
122-
123-
# Start off with an up-to-date system
124-
RUN ${package_manager} update --setopt=tsflags=nodocs -y
125-
126-
# Create a directory into which we will install files
127-
RUN mkdir /rootfs
128-
129-
# Create required devices
130-
RUN mkdir -m 755 /rootfs/dev && \\
131-
mknod -m 600 /rootfs/dev/console c 5 1 && \\
132-
mknod -m 600 /rootfs/dev/initctl p && \\
133-
mknod -m 666 /rootfs/dev/full c 1 7 && \\
134-
mknod -m 666 /rootfs/dev/null c 1 3 && \\
135-
mknod -m 666 /rootfs/dev/ptmx c 5 2 && \\
136-
mknod -m 666 /rootfs/dev/random c 1 8 && \\
137-
mknod -m 666 /rootfs/dev/tty c 5 0 && \\
138-
mknod -m 666 /rootfs/dev/tty0 c 4 0 && \\
139-
mknod -m 666 /rootfs/dev/urandom c 1 9 && \\
140-
mknod -m 666 /rootfs/dev/zero c 1 5
141-
142-
# Install a minimal set of dependencies, and some for Elasticsearch
143-
RUN ${package_manager} --installroot=/rootfs --releasever=/ --setopt=tsflags=nodocs \\
144-
--setopt=group_package_types=mandatory -y \\
145-
--skip-broken \\
146-
install basesystem bash zip zlib
147-
148-
# `tini` is a tiny but valid init for containers. This is used to cleanly
149-
# control how ES and any child processes are shut down.
150-
#
151-
# The tini GitHub page gives instructions for verifying the binary using
152-
# gpg, but the keyservers are slow to return the key and this can fail the
153-
# build. Instead, we check the binary against the published checksum.
154-
#
155-
# Also, we use busybox instead of installing utility RPMs, which pulls in
156-
# all kinds of stuff we don't want.
157-
RUN set -e ; \\
158-
TINI_BIN="" ; \\
159-
BUSYBOX_COMMIT="" ; \\
160-
case "\$(arch)" in \\
161-
aarch64) \\
162-
BUSYBOX_COMMIT='8a500845daeaeb926b25f73089c0668cac676e97' ; \\
163-
TINI_BIN='tini-arm64' ; \\
164-
;; \\
165-
x86_64) \\
166-
BUSYBOX_COMMIT='cc81bf8a3c979f596af2d811a3910aeaa230e8ef' ; \\
167-
TINI_BIN='tini-amd64' ; \\
168-
;; \\
169-
*) echo >&2 "Unsupported architecture \$(arch)" ; exit 1 ;; \\
170-
esac ; \\
171-
curl --retry 10 -S -L -O "https://github.com/krallin/tini/releases/download/v0.19.0/\${TINI_BIN}" ; \\
172-
curl --retry 10 -S -L -O "https://github.com/krallin/tini/releases/download/v0.19.0/\${TINI_BIN}.sha256sum" ; \\
173-
sha256sum -c "\${TINI_BIN}.sha256sum" ; \\
174-
rm "\${TINI_BIN}.sha256sum" ; \\
175-
mv "\${TINI_BIN}" /rootfs/bin/tini ; \\
176-
chmod 0555 /rootfs/bin/tini ; \\
177-
curl --retry 10 -L -O \\
178-
# Here we're fetching the same binaries used for the official busybox docker image from their GtiHub repository
179-
"https://github.com/docker-library/busybox/raw/\${BUSYBOX_COMMIT}/stable/musl/busybox.tar.xz" ; \\
180-
tar -xf busybox.tar.xz -C /rootfs/bin --strip=2 ./bin ; \\
181-
rm busybox.tar.xz ;
182-
183-
# Curl needs files under here. More importantly, we change Elasticsearch's
184-
# bundled JDK to use /etc/pki/ca-trust/extracted/java/cacerts instead of
185-
# the bundled cacerts.
186-
RUN mkdir -p /rootfs/etc && \\
187-
cp -a /etc/pki /rootfs/etc/
188-
189-
# Cleanup the filesystem
190-
RUN ${package_manager} --installroot=/rootfs -y clean all && \\
191-
cd /rootfs && \\
192-
rm -rf \\
193-
etc/{X11,centos-release*,csh*,profile*,skel*,yum*} \\
194-
sbin/sln \\
195-
usr/bin/rpm \\
196-
{usr,var}/games \\
197-
usr/lib/{dracut,systemd,udev} \\
198-
usr/lib64/X11 \\
199-
usr/local \\
200-
usr/share/{awk,centos-release,cracklib,desktop-directories,gcc-*,i18n,icons,licenses,xsessions,zoneinfo} \\
201-
usr/share/{man,doc,info,games,gdb,ghostscript,gnome,groff,icons,pixmaps,sounds,backgrounds,themes,X11} \\
202-
usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} \\
203-
var/cache/yum \\
204-
var/lib/{rpm,yum} \\
205-
var/log/yum.log
206-
207-
# ldconfig
208-
RUN rm -rf /rootfs/etc/ld.so.cache /rootfs/var/cache/ldconfig && \\
209-
mkdir -p --mode=0755 /rootfs/var/cache/ldconfig
210-
211-
COPY --from=curl /work/curl /rootfs/usr/bin/curl
212-
213-
# Ensure that there are no files with setuid or setgid, in order to mitigate "stackclash" attacks.
214-
RUN find /rootfs -xdev -perm -4000 -exec chmod ug-s {} +
215-
216-
################################################################################
217-
# Step 3. Fetch the Elasticsearch distribution and configure it for Docker
218-
################################################################################
219-
FROM ${base_image} AS builder
220-
22169
<% } %>
22270
22371
RUN mkdir /usr/share/elasticsearch
@@ -285,8 +133,6 @@ RUN mkdir -p /opt/plugins/archive
285133
RUN chmod -R 0555 /opt/plugins
286134
<% } %>
287135
288-
<% if (docker_base == "ubi" || docker_base == "iron_bank") { %>
289-
290136
################################################################################
291137
# Build stage 1 (the actual Elasticsearch image):
292138
#
@@ -296,7 +142,17 @@ RUN chmod -R 0555 /opt/plugins
296142
297143
FROM ${base_image}
298144
299-
<% if (docker_base == "ubi") { %>
145+
<% if (docker_base == "iron_bank") { %>
146+
<%
147+
/* Reviews of the Iron Bank Dockerfile said that they preferred simpler */
148+
/* scripting so this version doesn't have the retry loop featured below. */
149+
%>
150+
RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\
151+
${package_manager} install --setopt=tsflags=nodocs -y \\
152+
nc shadow-utils zip findutils unzip procps-ng && \\
153+
${package_manager} clean all
154+
155+
<% } else { %>
300156

301157
RUN <%= retry.loop(
302158
package_manager,
@@ -306,49 +162,18 @@ RUN <%= retry.loop(
306162
" ${package_manager} clean all"
307163
) %>
308164

309-
<% } else { %>
310-
311-
<%
312-
/* Reviews of the Iron Bank Dockerfile said that they preferred simpler */
313-
/* scripting so this version doesn't have the retry loop featured above. */
314-
%>
315-
RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\
316-
${package_manager} install --setopt=tsflags=nodocs -y \\
317-
nc shadow-utils zip findutils unzip procps-ng && \\
318-
${package_manager} clean all
319-
320165
<% } %>
321166

322167
RUN groupadd -g 1000 elasticsearch && \\
323168
adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \\
324169
chown -R 0:0 /usr/share/elasticsearch
325170

326-
<% } else { %>
327-
328-
################################################################################
329-
# Stage 4. Build the final image, using the rootfs above as the basis, and
330-
# copying in the Elasticsearch distribution
331-
################################################################################
332-
FROM scratch
333-
334-
# Setup the initial filesystem.
335-
COPY --from=rootfs /rootfs /
336-
337-
RUN addgroup -g 1000 elasticsearch && \\
338-
adduser -D -u 1000 -G elasticsearch -g elasticsearch -h /usr/share/elasticsearch elasticsearch && \\
339-
addgroup elasticsearch root && \\
340-
chown -R 0:0 /usr/share/elasticsearch
341-
342-
<% } %>
343-
344171
ENV ELASTIC_CONTAINER true
345172

346173
WORKDIR /usr/share/elasticsearch
347174
COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch
348175

349-
<% if (docker_base == "ubi" || docker_base == "iron_bank") { %>
350176
COPY --from=builder --chown=0:0 /bin/tini /bin/tini
351-
<% } %>
352177

353178
<% if (docker_base == 'cloud') { %>
354179
COPY --from=builder --chown=0:0 /opt /opt

docs/changelog/80524.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 80524
2+
summary: Use almalinux as the Docker base image
3+
area: Packaging
4+
type: enhancement
5+
issues:
6+
- 76681

0 commit comments

Comments
 (0)