Skip to content

Commit

Permalink
Merge branch 'main' into feat/single-instance-client
Browse files Browse the repository at this point in the history
  • Loading branch information
rafalgalaw authored Dec 28, 2023
2 parents cb1dd78 + d9084dd commit 2e1cc29
Show file tree
Hide file tree
Showing 23 changed files with 1,015 additions and 475 deletions.
32 changes: 16 additions & 16 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,22 +31,22 @@ jobs:
with:
go-version: '1.21.5'

# - name: Synopsys Detect
# run: |
# GITHUB_REF="$(echo $GITHUB_REF_NAME | tr ':/' '_')"
# BLACKDUCK_SCAN_VERSION_NAME="${GITHUB_REF}_${GITHUB_SHA}"
# export BLACKDUCK_SCAN_VERSION_NAME

# # create the tmp directory as we also do during the release process
# mkdir -p tmp

# ./hack/foss-scan.sh

# mv tmp/Black_Duck_Notices_Report.txt tmp/3RD_PARTY_LICENSES.txt
# env:
# BLACKDUCK_URL: ${{ secrets.BLACKDUCK_URL }}
# BLACKDUCK_PROJECT_NAME: ${{ secrets.BLACKDUCK_PROJECT_NAME }}
# BLACKDUCK_TOKEN: ${{ secrets.BLACKDUCK_TOKEN }}
- name: Synopsys Detect
run: |
GITHUB_REF="$(echo $GITHUB_REF_NAME | tr ':/' '_')"
BLACKDUCK_SCAN_VERSION_NAME="${GITHUB_REF}_${GITHUB_SHA}"
export BLACKDUCK_SCAN_VERSION_NAME
# create the tmp directory as we also do during the release process
mkdir -p tmp
./hack/foss-scan.sh
mv tmp/Black_Duck_Notices_Report.txt tmp/3RD_PARTY_LICENSES.txt
env:
BLACKDUCK_URL: ${{ secrets.BLACKDUCK_URL }}
BLACKDUCK_PROJECT_NAME: ${{ secrets.BLACKDUCK_PROJECT_NAME }}
BLACKDUCK_TOKEN: ${{ secrets.BLACKDUCK_TOKEN }}

- name: release
run: make release
Expand Down
4 changes: 2 additions & 2 deletions .goreleaser.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ release:
- glob: tmp/garm_operator_all.yaml
- glob: tmp/garm_operator_crds.yaml
- glob: tmp/garm_operator.yaml
# - glob: tmp/3RD_PARTY_LICENSES.txt
# - glob: tmp/BlackDuck_RiskReport.pdf
- glob: tmp/3RD_PARTY_LICENSES.txt
- glob: tmp/BlackDuck_RiskReport.pdf
header: |
Container image is available at `ghcr.io/mercedes-benz/garm-operator/{{ .ProjectName }}:v{{ .Version }}`
Expand Down
4 changes: 4 additions & 0 deletions api/v1alpha1/enterprise_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ func (e *Enterprise) GetID() string {
return e.Status.ID
}

func (e *Enterprise) GetName() string {
return e.ObjectMeta.Name
}

func (e *Enterprise) GetPoolManagerIsRunning() bool {
return e.Status.PoolManagerIsRunning
}
Expand Down
4 changes: 4 additions & 0 deletions api/v1alpha1/organization_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,10 @@ func (o *Organization) GetID() string {
return o.Status.ID
}

func (o *Organization) GetName() string {
return o.ObjectMeta.Name
}

func (o *Organization) GetPoolManagerIsRunning() bool {
return o.Status.PoolManagerIsRunning
}
Expand Down
31 changes: 20 additions & 11 deletions api/v1alpha1/pool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,22 @@ import (

// PoolSpec defines the desired state of Pool
// See: https://github.com/cloudbase/garm/blob/main/params/requests.go#L142

// +kubebuilder:validation:Required
// +kubebuilder:validation:XValidation:rule="self.minIdleRunners <= self.maxRunners",message="minIdleRunners must be less than or equal to maxRunners"
type PoolSpec struct {
// Defines in which Scope Runners a registered. Has a reference to either an Enterprise, Org or Repo CRD
GitHubScopeRef corev1.TypedLocalObjectReference `json:"githubScopeRef"`
ProviderName string `json:"providerName"`
MaxRunners uint `json:"maxRunners"`
MinIdleRunners uint `json:"minIdleRunners"`
Flavor string `json:"flavor"`
OSType commonParams.OSType `json:"osType"`
OSArch commonParams.OSArch `json:"osArch"`
Tags []string `json:"tags"`
Enabled bool `json:"enabled"`
RunnerBootstrapTimeout uint `json:"runnerBootstrapTimeout"`
GitHubScopeRef corev1.TypedLocalObjectReference `json:"githubScopeRef"`
ProviderName string `json:"providerName"`
MaxRunners uint `json:"maxRunners"`
// +kubebuilder:default=0
MinIdleRunners uint `json:"minIdleRunners"`
Flavor string `json:"flavor"`
OSType commonParams.OSType `json:"osType"`
OSArch commonParams.OSArch `json:"osArch"`
Tags []string `json:"tags"`
Enabled bool `json:"enabled"`
RunnerBootstrapTimeout uint `json:"runnerBootstrapTimeout"`

// The name of the image resource, this image resource must exists in the same namespace as the pool
ImageName string `json:"imageName"`
Expand All @@ -41,12 +45,17 @@ type PoolSpec struct {

// PoolStatus defines the observed state of Pool
type PoolStatus struct {
ID string `json:"id"`
ID string `json:"id"`
IdleRunners uint `json:"idleRunners"`
Runners uint `json:"runners"`
Selector string `json:"selector"`

LastSyncError string `json:"lastSyncError,omitempty"`
}

//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
//+kubebuilder:subresource:scale:specpath=.spec.minIdleRunners,statuspath=.status.idleRunners,selectorpath=.status.selector
//+kubebuilder:resource:path=pools,scope=Namespaced,categories=garm
//+kubebuilder:printcolumn:name="ID",type=string,JSONPath=`.status.id`
//+kubebuilder:printcolumn:name="MinIdleRunners",type=string,JSONPath=`.spec.minIdleRunners`
Expand Down
4 changes: 4 additions & 0 deletions api/v1alpha1/repository_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@ func (r *Repository) GetID() string {
return r.Status.ID
}

func (r *Repository) GetName() string {
return r.ObjectMeta.Name
}

func (r *Repository) GetPoolManagerIsRunning() bool {
return r.Status.PoolManagerIsRunning
}
Expand Down
1 change: 1 addition & 0 deletions api/v1alpha1/shared.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ type GitHubScope interface {
GetKind() string
GetCredentialsName() string
GetID() string
GetName() string
GetPoolManagerIsRunning() bool
GetPoolManagerFailureReason() string
}
Expand Down
18 changes: 17 additions & 1 deletion config/crd/bases/garm-operator.mercedes-benz.com_pools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ spec:
metadata:
type: object
spec:
description: 'PoolSpec defines the desired state of Pool See: https://github.com/cloudbase/garm/blob/main/params/requests.go#L142'
properties:
enabled:
type: boolean
Expand Down Expand Up @@ -113,6 +112,7 @@ spec:
maxRunners:
type: integer
minIdleRunners:
default: 0
type: integer
osArch:
type: string
Expand Down Expand Up @@ -141,18 +141,34 @@ spec:
- runnerBootstrapTimeout
- tags
type: object
x-kubernetes-validations:
- message: minIdleRunners must be less than or equal to maxRunners
rule: self.minIdleRunners <= self.maxRunners
status:
description: PoolStatus defines the observed state of Pool
properties:
id:
type: string
idleRunners:
type: integer
lastSyncError:
type: string
runners:
type: integer
selector:
type: string
required:
- id
- idleRunners
- runners
- selector
type: object
type: object
served: true
storage: true
subresources:
scale:
labelSelectorPath: .status.selector
specReplicasPath: .spec.minIdleRunners
statusReplicasPath: .status.idleRunners
status: {}
Binary file added docs/assets/scaling.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
138 changes: 138 additions & 0 deletions docs/quickstart.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
<!-- SPDX-License-Identifier: MIT -->

# Quickstart

To get started, you need to have the following prerequisites in place:
1. A running `garm-server` instance in your kubernetes cluster or somewhere else (needs to be reachable by garm-operator)
2. A running `garm-operator` instance in your kubernetes cluster
3. A configured Enterprise, Organization or Repository `webhook` on your GitHub Instance. [See official Garm Docs](https://github.com/cloudbase/garm/blob/main/doc/webhooks.md)

Each `garm-operator` is tied to one `garm-server`. Make sure to apply the following `CustomResources (CRs)` to the same `namespace`, your `garm-operator` is running in.
In the following examples, our `garm-operator` is deployed in the namespace `garm-operator-system`

<!-- toc -->
- [1. Webhook Secret](#1-webhook-secret)
- [2. Enterprise / Organization / Repository CR](#2-enterprise--organization--repository-cr)
- [3. Spin up a <code>Pool</code> with runners](#3-spin-up-a-pool-with-runners)
<!-- /toc -->

## 1. Webhook Secret
First, apply a normal [kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) which holds your previously, in your `GitHub webhook` configured secret as a `base64`encoded value:
```bash
$ cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: Secret
metadata:
name: enterprise-webhook-secret
namespace: garm-operator-system
data:
webhookSecret: bXlzdXBlcnNlY3JldHdlYmhvb2tzZWNyZXQ= #mysupersecretwebhooksecret
EOF
```

## 2. Enterprise / Organization / Repository CR
Depending on which `GitHub scope` you registered your `webhook` and want to spin runners, apply one of the following `Enterprise / Organization / Repository CRs`.
See [/config/samples/](../config/samples) for more example `CustomResources`.

In the following we are configuring our `garm-server`, so it can spin up runners on an `Enterprise scope` in GitHub.
1. `.spec.credentialsName` must be the name of your configured GitHub credentials in [config.toml](https://github.com/cloudbase/garm/blob/main/doc/github_credentials.md?plain=1#L25) of your `garm-server`.
```bash
$ cat <<EOF | kubectl apply -f -
---
apiVersion: garm-operator.mercedes-benz.com/v1alpha1
kind: Enterprise
metadata:
labels:
app.kubernetes.io/name: enterprise
app.kubernetes.io/instance: enterprise-sample
app.kubernetes.io/part-of: garm-operator
name: enterprise-sample
namespace: garm-operator-system
spec:
credentialsName: GitHub-Actions
webhookSecretRef:
key: "webhookSecret"
name: "enterprise-webhook-secret"
EOF
```

After applying your `Enterprise / Organization / Repository CR`, you should see a populated `.status.id` field when querying with `kubectl`.
This `ID` comes from the `garm-server`, after syncing the `Enterprise` object to its internal database and which gets reflected back to the applied `Enterprise CR`.
```bash
$ kubectl get enterprise -o wide

NAME ID READY ERROR AGE
enterprise-sample d6afb512-77d0-45d2-b8b3-b94f3dc62511 true 1m
```

## 3. Spin up a `Pool` with runners
To spin up a Pool, you need to apply an `Image CR` first. Essentially one `Image CR` can be referenced by multiple `Pool CRs`. Each `Image CR` holds an image tag, which
the associated `Provider` of the `Pool` can create a `Runner Instance` off.

```bash
$ cat <<EOF | kubectl apply -f -
---
apiVersion: garm-operator.mercedes-benz.com/v1alpha1
kind: Image
metadata:
labels:
app.kubernetes.io/name: image
app.kubernetes.io/instance: image-sample
app.kubernetes.io/part-of: garm-operator
name: runner-default
namespace: garm-operator-system
spec:
tag: linux-ubuntu-22.04
EOF
```

Next apply a `Pool CR`:
```bash
$ cat <<EOF | kubectl apply -f -
---
apiVersion: garm-operator.mercedes-benz.com/v1alpha1
kind: Pool
metadata:
labels:
app.kubernetes.io/instance: pool-sample
app.kubernetes.io/name: pool
app.kubernetes.io/part-of: garm-operator
name: openstack-small-pool-enterprise
namespace: garm-operator-system
spec:
githubScopeRef:
apiGroup: garm-operator.mercedes-benz.com
kind: Enterprise
name: enterprise-sample
enabled: true
extraSpecs: '{}'
flavor: small
githubRunnerGroup: ""
imageName: runner-default
maxRunners: 4
minIdleRunners: 2
osArch: amd64
osType: linux
providerName: openstack
runnerBootstrapTimeout: 20
runnerPrefix: ""
tags:
- linux
- small
- ubuntu
EOF
```
Take care of the following:
1. `.spec.githubScopeRef.name` and `.spec.githubScopeRef.kind` should reference the previously applied `Enterprise / Organization / Repository CR`, so its `Runners` are getting registered in the correct scope.
2. `.spec.providerName` should be the same name as your desired provider configured in your [config.toml](https://github.com/cloudbase/garm/blob/main/doc/providers.md?plain=1#L26) of your `garm-server`.
3. `.spec.imageName` should reference the previously applied `Image CRs` `.metadata.name` field

After that you should see the following output, where `ID` gets reflected back from `garm-server` to the `.status.id` field of your `Pool CR`:

```bash
$ kubectl get pool

NAME ID MINIDLERUNNERS MAXRUNNERS AGE
openstack-small-pool-enterprise 0ff3f052-5901-46ac-902c-28f2f38a64ec 2 4 1m
```
Loading

0 comments on commit 2e1cc29

Please sign in to comment.