19
19
add as many newlines here as necessary to improve legibility.
20
20
*/ %>
21
21
22
- <% if (docker_base == "ubi") { %>
22
+ <% if (docker_base == ' default ' || docker_base == "ubi") { %>
23
23
################################################################################
24
24
# Build stage 0 `builder`:
25
25
# Extract Elasticsearch artifact
@@ -66,158 +66,6 @@ FROM ${base_image} AS builder
66
66
COPY tini /bin/tini
67
67
RUN chmod 0555 /bin/tini
68
68
69
- <% } else { %>
70
-
71
- <% /* CentOS builds are actaully a custom base image with a minimal set of dependencies */ %>
72
-
73
- ################################################################################
74
- # Stage 1. Build curl statically. Installing it from RPM on CentOS pulls in too
75
- # many dependencies.
76
- ################################################################################
77
- FROM alpine:3.13 AS curl
78
-
79
- ENV VERSION 7.71.0
80
- ENV TARBALL_URL https://curl.haxx.se/download/curl-\$ {VERSION}.tar.xz
81
- ENV TARBALL_PATH curl-\$ {VERSION}.tar.xz
82
-
83
- # Install dependencies
84
- RUN <%= retry.loop(' apk', ' apk add gnupg gcc make musl-dev openssl-dev openssl-libs-static file') %>
85
-
86
- RUN mkdir /work
87
- WORKDIR /work
88
-
89
- # Fetch curl sources and files for validation. Note that alpine' s `wget` doesn't have retry options.
90
- RUN function retry_wget() { \\
91
- local URL="\$ 1" ; \\
92
- local DEST="\$ 2" ; \\
93
- <%= retry.loop(' wget', ' wget "\$ URL\" -O " \$ DEST"', 6, 'return') %> ; \\
94
- } ; \\
95
- retry_wget " https://daniel.haxx.se/mykey.asc" " curl-gpg.pub" && \\
96
- retry_wget " \$ {TARBALL_URL}.asc" " \$ {TARBALL_PATH}.asc" && \\
97
- retry_wget " \$ {TARBALL_URL}" " \$ {TARBALL_PATH}"
98
-
99
- # Validate source
100
- RUN gpg --import --always-trust " curl-gpg.pub" && \\
101
- gpg --verify " \$ {TARBALL_PATH}.asc" " \$ {TARBALL_PATH}"
102
-
103
- # Unpack and build
104
- RUN set -e ; \\
105
- tar xfJ " \$ {TARBALL_PATH}" ; \\
106
- cd " curl-\$ {VERSION}" ; \\
107
- if ! ./configure --disable-shared --with-ca-fallback --with-ca-bundle=/etc/pki/tls/certs/ca-bundle.crt ; then \\
108
- [[ -e config.log ]] && cat config.log ; \\
109
- exit 1 ; \\
110
- fi ; \\
111
- make curl_LDFLAGS=" -all-static" ; \\
112
- cp src/curl /work/curl ; \\
113
- strip /work/curl
114
-
115
- ################################################################################
116
- # Step 2. Create a minimal root filesystem directory. This will form the basis
117
- # for our image.
118
- ################################################################################
119
- FROM ${base_image} AS rootfs
120
-
121
- ENV TINI_VERSION 0.19.0
122
-
123
- # Start off with an up-to-date system
124
- RUN ${package_manager} update --setopt=tsflags=nodocs -y
125
-
126
- # Create a directory into which we will install files
127
- RUN mkdir /rootfs
128
-
129
- # Create required devices
130
- RUN mkdir -m 755 /rootfs/dev && \\
131
- mknod -m 600 /rootfs/dev/console c 5 1 && \\
132
- mknod -m 600 /rootfs/dev/initctl p && \\
133
- mknod -m 666 /rootfs/dev/full c 1 7 && \\
134
- mknod -m 666 /rootfs/dev/null c 1 3 && \\
135
- mknod -m 666 /rootfs/dev/ptmx c 5 2 && \\
136
- mknod -m 666 /rootfs/dev/random c 1 8 && \\
137
- mknod -m 666 /rootfs/dev/tty c 5 0 && \\
138
- mknod -m 666 /rootfs/dev/tty0 c 4 0 && \\
139
- mknod -m 666 /rootfs/dev/urandom c 1 9 && \\
140
- mknod -m 666 /rootfs/dev/zero c 1 5
141
-
142
- # Install a minimal set of dependencies, and some for Elasticsearch
143
- RUN ${package_manager} --installroot=/rootfs --releasever=/ --setopt=tsflags=nodocs \\
144
- --setopt=group_package_types=mandatory -y \\
145
- --skip-broken \\
146
- install basesystem bash zip zlib
147
-
148
- # `tini` is a tiny but valid init for containers. This is used to cleanly
149
- # control how ES and any child processes are shut down.
150
- #
151
- # The tini GitHub page gives instructions for verifying the binary using
152
- # gpg, but the keyservers are slow to return the key and this can fail the
153
- # build. Instead, we check the binary against the published checksum.
154
- #
155
- # Also, we use busybox instead of installing utility RPMs, which pulls in
156
- # all kinds of stuff we don't want.
157
- RUN set -e ; \\
158
- TINI_BIN="" ; \\
159
- BUSYBOX_COMMIT="" ; \\
160
- case " \$ (arch)" in \\
161
- aarch64) \\
162
- BUSYBOX_COMMIT='8a500845daeaeb926b25f73089c0668cac676e97' ; \\
163
- TINI_BIN='tini-arm64' ; \\
164
- ;; \\
165
- x86_64) \\
166
- BUSYBOX_COMMIT='cc81bf8a3c979f596af2d811a3910aeaa230e8ef' ; \\
167
- TINI_BIN='tini-amd64' ; \\
168
- ;; \\
169
- *) echo >&2 " Unsupported architecture \$ (arch)" ; exit 1 ;; \\
170
- esac ; \\
171
- curl --retry 10 -S -L -O " https://github.com/krallin/tini/releases/download/v0.19.0/\$ {TINI_BIN}" ; \\
172
- curl --retry 10 -S -L -O " https://github.com/krallin/tini/releases/download/v0.19.0/\$ {TINI_BIN}.sha256sum" ; \\
173
- sha256sum -c " \$ {TINI_BIN}.sha256sum" ; \\
174
- rm " \$ {TINI_BIN}.sha256sum" ; \\
175
- mv " \$ {TINI_BIN}" /rootfs/bin/tini ; \\
176
- chmod 0555 /rootfs/bin/tini ; \\
177
- curl --retry 10 -L -O \\
178
- # Here we're fetching the same binaries used for the official busybox docker image from their GtiHub repository
179
- " https://github.com/docker-library/busybox/raw/\$ {BUSYBOX_COMMIT}/stable/musl/busybox.tar.xz" ; \\
180
- tar -xf busybox.tar.xz -C /rootfs/bin --strip=2 ./bin ; \\
181
- rm busybox.tar.xz ;
182
-
183
- # Curl needs files under here. More importantly, we change Elasticsearch's
184
- # bundled JDK to use /etc/pki/ca-trust/extracted/java/cacerts instead of
185
- # the bundled cacerts.
186
- RUN mkdir -p /rootfs/etc && \\
187
- cp -a /etc/pki /rootfs/etc/
188
-
189
- # Cleanup the filesystem
190
- RUN ${package_manager} --installroot=/rootfs -y clean all && \\
191
- cd /rootfs && \\
192
- rm -rf \\
193
- etc/{X11,centos-release*,csh*,profile*,skel*,yum*} \\
194
- sbin/sln \\
195
- usr/bin/rpm \\
196
- {usr,var}/games \\
197
- usr/lib/{dracut,systemd,udev} \\
198
- usr/lib64/X11 \\
199
- usr/local \\
200
- usr/share/{awk,centos-release,cracklib,desktop-directories,gcc-*,i18n,icons,licenses,xsessions,zoneinfo} \\
201
- usr/share/{man,doc,info,games,gdb,ghostscript,gnome,groff,icons,pixmaps,sounds,backgrounds,themes,X11} \\
202
- usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} \\
203
- var/cache/yum \\
204
- var/lib/{rpm,yum} \\
205
- var/log/yum.log
206
-
207
- # ldconfig
208
- RUN rm -rf /rootfs/etc/ld.so.cache /rootfs/var/cache/ldconfig && \\
209
- mkdir -p --mode=0755 /rootfs/var/cache/ldconfig
210
-
211
- COPY --from=curl /work/curl /rootfs/usr/bin/curl
212
-
213
- # Ensure that there are no files with setuid or setgid, in order to mitigate " stackclash" attacks.
214
- RUN find /rootfs -xdev -perm -4000 -exec chmod ug-s {} +
215
-
216
- ################################################################################
217
- # Step 3. Fetch the Elasticsearch distribution and configure it for Docker
218
- ################################################################################
219
- FROM ${base_image} AS builder
220
-
221
69
<% } %>
222
70
223
71
RUN mkdir /usr/share/elasticsearch
@@ -285,8 +133,6 @@ RUN mkdir -p /opt/plugins/archive
285
133
RUN chmod -R 0555 /opt/plugins
286
134
<% } %>
287
135
288
- <% if (docker_base == " ubi" || docker_base == " iron_bank") { %>
289
-
290
136
################################################################################
291
137
# Build stage 1 (the actual Elasticsearch image):
292
138
#
@@ -296,7 +142,17 @@ RUN chmod -R 0555 /opt/plugins
296
142
297
143
FROM ${base_image}
298
144
299
- <% if (docker_base == " ubi") { %>
145
+ <% if (docker_base == "iron_bank") { %>
146
+ <%
147
+ /* Reviews of the Iron Bank Dockerfile said that they preferred simpler */
148
+ /* scripting so this version doesn' t have the retry loop featured below. */
149
+ %>
150
+ RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\
151
+ ${package_manager} install --setopt=tsflags=nodocs -y \\
152
+ nc shadow-utils zip findutils unzip procps-ng && \\
153
+ ${package_manager} clean all
154
+
155
+ <% } else { %>
300
156
301
157
RUN <%= retry.loop(
302
158
package_manager,
@@ -306,49 +162,18 @@ RUN <%= retry.loop(
306
162
" ${package_manager} clean all"
307
163
) %>
308
164
309
- <% } else { %>
310
-
311
- <%
312
- /* Reviews of the Iron Bank Dockerfile said that they preferred simpler */
313
- /* scripting so this version doesn't have the retry loop featured above. */
314
- %>
315
- RUN ${package_manager} update --setopt=tsflags=nodocs -y && \\
316
- ${package_manager} install --setopt=tsflags=nodocs -y \\
317
- nc shadow-utils zip findutils unzip procps-ng && \\
318
- ${package_manager} clean all
319
-
320
165
<% } %>
321
166
322
167
RUN groupadd -g 1000 elasticsearch && \\
323
168
adduser -u 1000 -g 1000 -G 0 -d /usr/share/elasticsearch elasticsearch && \\
324
169
chown -R 0:0 /usr/share/elasticsearch
325
170
326
- <% } else { %>
327
-
328
- ################################################################################
329
- # Stage 4. Build the final image, using the rootfs above as the basis, and
330
- # copying in the Elasticsearch distribution
331
- ################################################################################
332
- FROM scratch
333
-
334
- # Setup the initial filesystem.
335
- COPY --from=rootfs /rootfs /
336
-
337
- RUN addgroup -g 1000 elasticsearch && \\
338
- adduser -D -u 1000 -G elasticsearch -g elasticsearch -h /usr/share/elasticsearch elasticsearch && \\
339
- addgroup elasticsearch root && \\
340
- chown -R 0:0 /usr/share/elasticsearch
341
-
342
- <% } %>
343
-
344
171
ENV ELASTIC_CONTAINER true
345
172
346
173
WORKDIR /usr/share/elasticsearch
347
174
COPY --from=builder --chown=0:0 /usr/share/elasticsearch /usr/share/elasticsearch
348
175
349
- <% if (docker_base == " ubi" || docker_base == " iron_bank") { %>
350
176
COPY --from=builder --chown=0:0 /bin/tini /bin/tini
351
- <% } %>
352
177
353
178
<% if (docker_base == 'cloud' ) { %>
354
179
COPY --from=builder --chown=0:0 /opt /opt
0 commit comments