Skip to content

Commit

Permalink
feat(openshift): container Build action (#4726)
Browse files Browse the repository at this point in the history
* chore(openshift): rename container.ts -> deploy.ts

* feat(openshift): container Build action, buildmode local-docker

* chore: fix lint on openshift/build.ts

* feat(openshift): in-cluster container Build using kaniko

* chore(openshift): add openshift logo to example web page

* chore(openshift): update demo-project readme
  • Loading branch information
Walther authored Jun 30, 2023
1 parent 0773b57 commit bdf9e0f
Show file tree
Hide file tree
Showing 15 changed files with 333 additions and 51 deletions.
65 changes: 34 additions & 31 deletions core/src/plugins/kubernetes/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -627,37 +627,7 @@ export const kubernetesConfigBase = () =>
imagePullSecrets: imagePullSecretsSchema(),
copySecrets: copySecretsSchema(),
// TODO: invert the resources and storage config schemas
resources: joi
.object()
.keys({
builder: resourceSchema(defaultResources.builder, false).description(dedent`
Resource requests and limits for the in-cluster builder. It's important to consider which build mode you're using when configuring this.
When \`buildMode\` is \`kaniko\`, this refers to _each Kaniko pod_, i.e. each individual build, so you'll want to consider the requirements for your individual image builds, with your most expensive/heavy images in mind.
When \`buildMode\` is \`cluster-buildkit\`, this applies to the BuildKit deployment created in _each project namespace_. So think of this as the resource spec for each individual user or project namespace.
`),
util: resourceSchema(defaultResources.util, false).description(dedent`
Resource requests and limits for the util pod for in-cluster builders.
This pod is used to get, start, stop and inquire the status of the builds.
This pod is created in each garden namespace.
`),
sync: resourceSchema(defaultResources.sync, true)
.description(
dedent`
Resource requests and limits for the code sync service, which we use to sync build contexts to the cluster
ahead of building images. This generally is not resource intensive, but you might want to adjust the
defaults if you have many concurrent users.
`
)
.meta({
deprecated: "The sync service is only used for the cluster-docker build mode, which is being deprecated.",
}),
})
.default(defaultResources).description(deline`
Resource requests and limits for the in-cluster builder..
`),
resources: resourcesSchema(),
tlsCertificates: joiSparseArray(tlsCertificateSchema())
.unique("name")
.description("One or more certificates to use for ingress."),
Expand Down Expand Up @@ -1001,3 +971,36 @@ export const namespaceNameSchema = () =>
joiIdentifier()
.max(63) // Max length of a DNS label, and by extension max k8s namespace length
.description("A valid Kubernetes namespace name. Must be a " + joiIdentifierDescription)

export const resourcesSchema = () =>
joi
.object()
.keys({
builder: resourceSchema(defaultResources.builder, false).description(dedent`
Resource requests and limits for the in-cluster builder. It's important to consider which build mode you're using when configuring this.
When \`buildMode\` is \`kaniko\`, this refers to _each Kaniko pod_, i.e. each individual build, so you'll want to consider the requirements for your individual image builds, with your most expensive/heavy images in mind.
When \`buildMode\` is \`cluster-buildkit\`, this applies to the BuildKit deployment created in _each project namespace_. So think of this as the resource spec for each individual user or project namespace.
`),
util: resourceSchema(defaultResources.util, false).description(dedent`
Resource requests and limits for the util pod for in-cluster builders.
This pod is used to get, start, stop and inquire the status of the builds.
This pod is created in each garden namespace.
`),
sync: resourceSchema(defaultResources.sync, true)
.description(
dedent`
Resource requests and limits for the code sync service, which we use to sync build contexts to the cluster
ahead of building images. This generally is not resource intensive, but you might want to adjust the
defaults if you have many concurrent users.
`
)
.meta({
deprecated: "The sync service is only used for the cluster-docker build mode, which is being deprecated.",
}),
})
.default(defaultResources).description(deline`
Resource requests and limits for the in-cluster builder..
`)
19 changes: 12 additions & 7 deletions core/src/plugins/kubernetes/container/build/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ export async function skopeoBuildStatus({
if (res.exitCode !== 0 && !skopeoManifestUnknown(res.stderr)) {
const output = res.allLogs || err.message

// TODO: if a registry does not have an image with the name at all, we throw here
// This isn't a great first-time-use experience (or after you've reset a registry)
throw new RuntimeError({
message: `Unable to query registry for image status: ${output}`,
detail: {
Expand Down Expand Up @@ -354,7 +356,13 @@ export async function ensureUtilDeployment({

export async function getManifestInspectArgs(remoteId: string, deploymentRegistry: ContainerRegistryConfig) {
const dockerArgs = ["manifest", "inspect", remoteId]
if (isLocalHostname(deploymentRegistry.hostname)) {
const { hostname } = deploymentRegistry
// Allow insecure connections on local registry
if (
hostname === "localhost" ||
hostname.startsWith("127.") ||
hostname === "default-route-openshift-image-registry.apps-crc.testing"
) {
dockerArgs.push("--insecure")
}

Expand Down Expand Up @@ -399,10 +407,6 @@ export async function ensureBuilderSecret({
return { authSecret, updated }
}

function isLocalHostname(hostname: string) {
return hostname === "localhost" || hostname.startsWith("127.")
}

export function getUtilContainer(authSecretName: string, provider: KubernetesProvider): V1Container {
return {
name: utilContainerName,
Expand Down Expand Up @@ -459,7 +463,7 @@ export function getUtilContainer(authSecretName: string, provider: KubernetesPro
},
},
},
resources: stringifyResources(provider.config.resources.util),
resources: stringifyResources(provider.config?.resources?.util),
securityContext: {
runAsUser: 1000,
runAsGroup: 1000,
Expand All @@ -477,6 +481,7 @@ export function getUtilManifests(
builderToleration,
]
const kanikoAnnotations = provider.config.kaniko?.util?.annotations || provider.config.kaniko?.annotations
const utilContainer = getUtilContainer(authSecretName, provider)
const deployment: KubernetesDeployment = {
apiVersion: "apps/v1",
kind: "Deployment",
Expand All @@ -502,7 +507,7 @@ export function getUtilManifests(
annotations: kanikoAnnotations,
},
spec: {
containers: [getUtilContainer(authSecretName, provider)],
containers: [utilContainer],
imagePullSecrets,
volumes: [
{
Expand Down
11 changes: 10 additions & 1 deletion core/src/plugins/kubernetes/container/build/kaniko.ts
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,16 @@ export const kanikoBuild: BuildHandler = async (params) => {
...getKanikoFlags(spec.extraFlags, provider.config.kaniko?.extraFlags),
]

if (provider.config.deploymentRegistry?.insecure === true) {
const isOpenShiftLocal =
provider.config.deploymentRegistry?.hostname === "default-route-openshift-image-registry.apps-crc.testing"

if (isOpenShiftLocal) {
// The registry in OpenShift Local requires TLS and comes with a self-signed certificate
args.push("--skip-tls-verify")
}

// TODO: do we support the garden-provided in-cluster registry anymore, or could this be deleted?
if (provider.config.deploymentRegistry?.insecure === true && !isOpenShiftLocal) {
// The in-cluster registry is not exposed, so we don't configure TLS on it.
args.push("--insecure")
}
Expand Down
12 changes: 10 additions & 2 deletions core/src/plugins/kubernetes/namespace.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,17 @@ export async function ensureNamespace(
cache[namespace.name] = { status: "pending" }

// Get the latest remote namespace list
const namespacesStatus = await api.core.listNamespace()
let namespaces: KubernetesServerResource<V1Namespace>[] = []
try {
const namespacesStatus = await api.core.listNamespace()
namespaces = namespacesStatus.items
} catch (error) {
log.warn("Unable to list all namespaces. If you are using OpenShift, ignore this warning.")
let namespaceStatus = await api.core.readNamespace(namespace.name)
namespaces = [namespaceStatus]
}

for (const n of namespacesStatus.items) {
for (const n of namespaces) {
if (n.status.phase === "Active") {
cache[n.metadata.name] = { status: "created", resource: n }
}
Expand Down
66 changes: 66 additions & 0 deletions core/src/plugins/openshift/build.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
/*
* Copyright (C) 2018-2023 Garden Technologies, Inc. <info@garden.io>
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/

import { DeepPrimitiveMap } from "@garden-io/platform-api-types"
import { BuildActionExtension } from "../../plugin/action-types"
import { ContainerBuildAction } from "../container/config"
import { ContainerBuildMode, KubernetesProvider } from "../kubernetes/config"
import { k8sGetContainerBuildActionOutputs } from "../kubernetes/container/handlers"
import { k8sPublishContainerBuild } from "../kubernetes/container/publish"
import { BuildHandler, BuildStatusHandler } from "../kubernetes/container/build/common"
import { getLocalBuildStatus, localBuild } from "../kubernetes/container/build/local"
import { getKanikoBuildStatus, kanikoBuild } from "../kubernetes/container/build/kaniko"

export const openshiftContainerBuildExtension = (): BuildActionExtension<ContainerBuildAction> => ({
name: "container",
handlers: {
async getOutputs({ ctx, action }) {
const provider = ctx.provider as KubernetesProvider
return {
outputs: k8sGetContainerBuildActionOutputs({ action, provider }) as unknown as DeepPrimitiveMap,
}
},

build: async (params) => {
const { ctx } = params

const provider = <KubernetesProvider>ctx.provider
const buildMode = provider.config.buildMode || "local-docker"
const handler = buildHandlers[buildMode]

return handler(params)
},

getStatus: async (params) => {
const { ctx } = params
const provider = <KubernetesProvider>ctx.provider

const buildMode = provider.config.buildMode || "local-docker"
const handler = buildStatusHandlers[buildMode]
return handler(params)
},

publish: k8sPublishContainerBuild,
},
})

const unimplemented = () => {
throw new Error("Unimplemented handler called in OpenShift Build")
}

const buildStatusHandlers: { [mode in ContainerBuildMode]: BuildStatusHandler } = {
"local-docker": getLocalBuildStatus,
"cluster-buildkit": unimplemented,
"kaniko": getKanikoBuildStatus,
}

const buildHandlers: { [mode in ContainerBuildMode]: BuildHandler } = {
"local-docker": localBuild,
"cluster-buildkit": unimplemented,
"kaniko": kanikoBuild,
}
2 changes: 2 additions & 0 deletions core/src/plugins/openshift/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import { joiProviderName } from "../../config/common"
import { BaseProviderConfig, Provider, providerConfigBaseSchema } from "../../config/provider"
import { PluginContext } from "../../plugin-context"
import { resourcesSchema } from "../kubernetes/config"

export interface OpenShiftConfig extends BaseProviderConfig {}
export type OpenShiftProvider = Provider<OpenShiftConfig>
Expand All @@ -20,5 +21,6 @@ export const configSchema = () =>
providerConfigBaseSchema()
.keys({
name: joiProviderName("openshift"),
resources: resourcesSchema(),
})
.description("The provider configuration for the openshift plugin")
File renamed without changes.
4 changes: 3 additions & 1 deletion core/src/plugins/openshift/openshift.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,9 @@

import { ConfigureProviderParams } from "../../plugin/handlers/Provider/configureProvider"
import { createGardenPlugin } from "../../plugin/plugin"
import { openshiftContainerBuildExtension } from "./build"
import { OpenShiftConfig, configSchema } from "./config"
import { openshiftContainerDeployExtension } from "./container"
import { openshiftContainerDeployExtension } from "./deploy"

export async function configureProvider({ config }: ConfigureProviderParams<OpenShiftConfig>) {
return { config }
Expand All @@ -26,6 +27,7 @@ export const gardenPlugin = () => {

extendActionTypes: {
Deploy: [openshiftContainerDeployExtension()],
Build: [openshiftContainerBuildExtension()],
},
})
}
75 changes: 72 additions & 3 deletions core/test/data/openshift/demo-project/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,24 @@ There are no guarantees for feature support, correctness, stability, or ice crea

This is mostly internal documentation while we work through adding support for OpenShift.

This document has been written using the following:

```
CRC version: 2.20.0+f3a947
OpenShift version: 4.13.0
```

## Preparation

If you are using Docker Desktop and its builtin Kubernetes support, you need to do the following cleanup steps first:

- Go to Docker Desktop settings, Kubernetes tab, and disable it with the checkbox
- Quit Docker Desktop from the menu - using the Restart button is not enough
- Start Docker Desktop
- Verify that Docker Desktop is no longer binding the port for Kubernetes API server: `lsof -i :6443`
- If the port is still active, you may need to quit and start Docker Desktop again
- Move `~/.kube/config` to another location, both as a backup as well as to give OpenShift the room to create its own

## Setup

- Download OpenShift Local from RedHat's website
Expand All @@ -14,8 +32,7 @@ This is mostly internal documentation while we work through adding support for O
- downloading/copying the pull secret
- running `crc setup`
- pasting the secret in when prompted
- save the kubeadmin password to your password manager
- add the kubeadmin password to your `.zshrc` or similar as `export OPENSHIFT_ADMIN_PASSWORD=password`
- take note of the generated kubeadmin password
- Run `oc login -u developer -p developer https://api.crc.testing:6443` once and accept the certificate
- Run `oc new-project demo-project` to create the project

Expand All @@ -26,13 +43,65 @@ brew install socat
socat TCP6-LISTEN:6443,fork,reuseaddr TCP4:127.0.0.1:6443
```

This needs to be kept running in the background.

## Optional: enabling in-cluster builds

Allow anyuid to enable `kaniko` builder to work on the cluster:

_TODO: we should fix this properly by editing the `garden-util` image, and make this section obsolete_

```bash
oc login -u kubeadmin https://api.crc.testing:6443
oc adm policy add-scc-to-user anyuid -z default --namespace demo-project
oc logout
oc login -u developer -p developer
```

Optionally, increase the resources on the VM to make sure the builder does not run into limits:

```bash
crc config set memory 10240
crc config set cpus 4
crc stop
crc start
```

Configure the image pull secrets:

```bash
oc registry login --to auth.json
oc create secret docker-registry imagepullsecrets --from-file=.dockerconfigjson=auth.json
rm auth.json
```

## Deploy

Let's make sure your terminal has fresh credentials in the environment:

```bash
oc login -u developer -p developer https://api.crc.testing:6443
oc registry login --insecure=true
```

Ideally, at this point this should work:

```bash
garden deploy
open http://hello.local.demo.garden/ # NOTE: this will return 403 as there is no content in the served directory and directory listing is forbidden
open http://hello.local.demo.garden/
garden logs nginx-hello # NOTE: this will be empty due to https://github.com/sclorg/nginx-container/issues/94
garden delete deploy
```

## Cleanup

If you want to reset your environment, you can run:

```bash
crc delete
```

This will delete the VM running OpenShift, including any configuration done within it. Remember to also delete or move the `~/.kube.config` file.

You can create a new one by running `crc start` and repeating setup steps for the project, permissions, registry, etc.
Note that creating a new VM will generate a new kubeadmin password.
18 changes: 18 additions & 0 deletions core/test/data/openshift/demo-project/garden.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,21 @@ providers:
name: demo-project
context: demo-project/api-crc-testing:6443/developer
cluster: api-crc-testing:6443
deploymentRegistry:
hostname: default-route-openshift-image-registry.apps-crc.testing
namespace: demo-project
# FIXME: this config option is overloaded
# in different places, it is used for both force HTTP as well as skip TLS verify on HTTPS
insecure: true
imagePullSecrets:
- name: imagepullsecrets
namespace: demo-project
buildMode:
local-docker
# kaniko
kaniko:
extraFlags:
# TODO: possibly figure out better non-user-facing workarounds?
- "--ignore-path=/usr/bin/newuidmap"
- "--ignore-path=/usr/bin/newgidmap"
- "--ignore-path=/usr/sbin/suexec"
Loading

0 comments on commit bdf9e0f

Please sign in to comment.