From 57300ff48bd3f0cd58098f1de566b5f34cfb8506 Mon Sep 17 00:00:00 2001 From: Pipelines as Code CI Robot Date: Mon, 26 May 2025 15:06:27 +0000 Subject: [PATCH 01/20] Release yaml generated from https://github.com/openshift-pipelines/pipelines-as-code/commit/d67421772b62b8d5e4ba46233165b7cf2ab67c0a for release v0.35.0 --- docs/content/ALLVERSIONS | 1 + docs/content/VERSION | 1 + pkg/params/version/version.txt | 2 +- release.k8s.yaml | 1658 +++++++++++++++++++++++++++++ release.yaml | 1792 ++++++++++++++++++++++++++++++++ 5 files changed, 3453 insertions(+), 1 deletion(-) create mode 100644 docs/content/ALLVERSIONS create mode 100644 docs/content/VERSION create mode 100644 release.k8s.yaml create mode 100644 release.yaml diff --git a/docs/content/ALLVERSIONS b/docs/content/ALLVERSIONS new file mode 100644 index 000000000..eefd238e7 --- /dev/null +++ b/docs/content/ALLVERSIONS @@ -0,0 +1 @@ +nightly,stable,v0.35.0,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 diff --git a/docs/content/VERSION b/docs/content/VERSION new file mode 100644 index 000000000..027d0d8ce --- /dev/null +++ b/docs/content/VERSION @@ -0,0 +1 @@ +v0.35.x diff --git a/pkg/params/version/version.txt b/pkg/params/version/version.txt index bf867e0ae..ab4e51c67 100644 --- a/pkg/params/version/version.txt +++ b/pkg/params/version/version.txt @@ -1 +1 @@ -nightly +v0.35.0 diff --git a/release.k8s.yaml b/release.k8s.yaml new file mode 100644 index 000000000..abcf71d72 --- /dev/null +++ b/release.k8s.yaml @@ -0,0 +1,1658 @@ +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Namespace +metadata: + name: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + openshift.io/cluster-monitoring: "true" +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + # All system:authenticated users needs to have access + # of the pipelines-as-code-info ConfigMap even if they don't + # have access to the other resources present in the + # installed namespace. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["pipelines-as-code-info"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-info +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pipelines-as-code-aggregate + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - pipelinesascode.tekton.dev + resources: + - repositories + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-as-code-controller-role + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-controller-binding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-controller-role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipeline-as-code-controller-clusterrole + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["create"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "update", "delete"] + - apiGroups: ["pipelinesascode.tekton.dev"] + resources: ["repositories"] + verbs: ["get", "create", "list"] + - apiGroups: ["tekton.dev"] + resources: ["pipelineruns"] + verbs: ["get", "list", "create", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines-as-code-controller-clusterbinding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-controller + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipeline-as-code-controller-clusterrole +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-as-code-watcher-role + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-watcher-binding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-watcher +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-watcher-role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipeline-as-code-watcher-clusterrole + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "delete"] + - apiGroups: ["pipelinesascode.tekton.dev"] + resources: ["repositories"] + verbs: ["get", "list", "update", "watch"] + - apiGroups: ["tekton.dev"] + resources: ["pipelineruns"] + verbs: ["get", "delete", "list", "watch", "update", "patch"] + - apiGroups: ["tekton.dev"] + resources: ["taskruns"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines-as-code-watcher-clusterbinding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: +- kind: ServiceAccount + name: pipelines-as-code-watcher + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipeline-as-code-watcher-clusterrole +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-as-code-webhook-role + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "update"] + resourceNames: ["pipelines-as-code-webhook-certs"] + # The webhook daemon makes a reconciliation loop on webhook-certs. Whenever + # the secret changes it updates the webhook configurations with the certificates + # stored in the secret. + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + # webhook uses leases for leader election +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-webhook-binding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-webhook-role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipeline-as-code-webhook-clusterrole + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: ["pipelinesascode.tekton.dev"] + resources: ["repositories"] + verbs: ["get", "list", "watch"] + # The webhook performs a reconciliation on this resource and continuously + # updates configuration. + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["list", "watch"] + # When there are changes to the configs or secrets, knative updates the validating webhook config + # with the updated certificates or the refreshed set of rules. + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "update", "delete"] + resourceNames: ["validation.pipelinesascode.tekton.dev"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines-as-code-webhook-clusterbinding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: +- kind: ServiceAccount + name: pipelines-as-code-webhook + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipeline-as-code-webhook-clusterrole +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: repositories.pipelinesascode.tekton.dev + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +spec: + group: pipelinesascode.tekton.dev + versions: + - name: v1alpha1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - name: Succeeded + type: string + jsonPath: '.pipelinerun_status[-1].conditions[?(@.type=="Succeeded")].status' + - name: Reason + type: string + jsonPath: '.pipelinerun_status[-1].conditions[?(@.type=="Succeeded")].reason' + - name: StartTime + type: date + jsonPath: ".pipelinerun_status[-1].startTime" + - name: CompletionTime + type: date + jsonPath: ".pipelinerun_status[-1].completionTime" + served: true + storage: true + schema: + openAPIV3Schema: + description: Repository is the representation of a Git repository from a Git provider platform. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + pipelinerun_status: + items: + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is additional Status fields for the Resource to save some + additional State as well as convey more information to the user. This is + roughly akin to Annotations on any k8s resource, just the reconciler conveying + richer information outwards. + type: object + completionTime: + description: CompletionTime is the time the PipelineRun completed. + format: date-time + type: string + conditions: + description: Conditions the latest available observations of a resource's current state. + items: + description: |- + Condition defines a readiness condition for a Knative resource. + See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time the condition transitioned from one status to another. + We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic + differences (all other things held constant). + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + severity: + description: |- + Severity with which to treat failures of this type of condition. + When this is not specified, it defaults to Error. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + event_type: + description: EventType is the event type of that run + type: string + failure_reason: + additionalProperties: + description: TaskInfos contains information about a task. + properties: + completion_time: + format: date-time + type: string + display_name: + type: string + log_snippet: + type: string + message: + type: string + name: + type: string + reason: + type: string + required: + - name + type: object + description: CollectedTaskInfos is the information about tasks + type: object + logurl: + description: LogURL is the full URL to the log for this run. + type: string + observedGeneration: + description: |- + ObservedGeneration is the 'Generation' of the Service that + was last processed by the controller. + format: int64 + type: integer + pipelineRunName: + description: PipelineRunName is the name of the PipelineRun + type: string + sha: + description: SHA is the name of the SHA that has been tested + type: string + sha_url: + description: SHA the URL of the SHA to view it + type: string + startTime: + description: StartTime is the time the PipelineRun is actually started. + format: date-time + type: string + target_branch: + description: TargetBranch is the target branch of that run + type: string + title: + description: Title is the title of the commit SHA that has been tested + type: string + type: object + type: array + spec: + description: |- + RepositorySpec defines the desired state of a Repository, including its URL, + Git provider configuration, and operational settings. + properties: + concurrency_limit: + description: |- + ConcurrencyLimit defines the maximum number of concurrent pipelineruns that can + run for this repository. This helps prevent resource exhaustion when many events trigger + pipelines simultaneously. + minimum: 1 + type: integer + git_provider: + description: |- + GitProvider details specific to a git provider configuration. Contains authentication, + API endpoints, and provider type information needed to interact with the Git service. + properties: + secret: + description: |- + Secret reference for authentication with the Git provider. Contains the token, + password, or private key used to authenticate requests to the Git provider API. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + type: + description: |- + Type of git provider. Determines which Git provider API and authentication flow to use. + Supported values: + - 'github': GitHub.com or GitHub Enterprise + - 'gitlab': GitLab.com or self-hosted GitLab + - 'bitbucket-datacenter': Bitbucket Data Center (self-hosted) + - 'bitbucket-cloud': Bitbucket Cloud (bitbucket.org) + - 'gitea': Gitea instances + enum: + - github + - gitlab + - bitbucket-datacenter + - bitbucket-cloud + - gitea + type: string + url: + description: |- + URL of the git provider API endpoint. This is the base URL for API requests to the + Git provider (e.g., 'https://api.github.com' for GitHub or a custom GitLab instance URL). + type: string + user: + description: |- + User of the git provider. Username to use for authentication when using basic auth + or token-based authentication methods. Not used for GitHub Apps authentication. + type: string + webhook_secret: + description: |- + WebhookSecret reference for webhook validation. Contains the shared secret used to + validate that incoming webhooks are legitimate and coming from the Git provider. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + type: object + incoming: + description: |- + Incomings defines incoming webhook configurations. Each configuration specifies how to + handle external webhook requests that don't come directly from the primary Git provider. + items: + properties: + params: + description: |- + Params defines parameter names to extract from the webhook payload. These parameters + will be made available to the PipelineRuns triggered by this webhook. + items: + type: string + type: array + secret: + description: |- + Secret for the incoming webhook authentication. This secret is used to validate + that webhook requests are coming from authorized sources. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + targets: + description: |- + Targets defines target branches for this webhook. When specified, only webhook + events targeting these branches will trigger PipelineRuns. + items: + type: string + type: array + type: + description: |- + Type of the incoming webhook. Currently only 'webhook-url' is supported, which allows + external systems to trigger PipelineRuns via generic webhook requests. + enum: + - webhook-url + type: string + required: + - secret + - type + type: object + type: array + params: + description: |- + Params defines repository level parameters that can be referenced in PipelineRuns. + These parameters can be used as default values or configured for specific events. + items: + properties: + filter: + description: |- + Filter defines when this parameter applies. It can be used to conditionally + apply parameters based on the event type, branch name, or other attributes. + type: string + name: + description: |- + Name of the parameter. This is the key that will be used to reference this parameter + in PipelineRun definitions through via the {{ name }} syntax. + type: string + secret_ref: + description: |- + SecretRef references a secret for the parameter value. Use this when the parameter + contains sensitive information that should not be stored directly in the Repository CR. + This field is mutually exclusive with Value. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + value: + description: |- + Value of the parameter. The literal value to be provided to the PipelineRun. + This field is mutually exclusive with SecretRef. + type: string + required: + - name + type: object + type: array + settings: + description: |- + Settings contains the configuration settings for the repository, including + authorization policies, provider-specific configuration, and provenance settings. + properties: + github_app_token_scope_repos: + description: |- + GithubAppTokenScopeRepos lists repositories that can access the GitHub App token when using the + GitHub App authentication method. This allows specific repositories to use tokens generated for + the GitHub App installation, useful for cross-repository access. + items: + type: string + type: array + gitlab: + description: Gitlab contains GitLab-specific settings for repositories hosted on GitLab. + properties: + comment_strategy: + description: |- + CommentStrategy defines how GitLab comments are handled for pipeline results. + Options: + - 'status-comment': Posts a single comment and updates it with pipeline results + - 'pipeline-runs-comment': Creates a new comment for each PipelineRun + - 'disable_all': Disables all comments on merge requests + enum: + - status-comment + - pipeline-runs-comment + - disable_all + type: string + type: object + pipelinerun_provenance: + description: |- + PipelineRunProvenance configures how PipelineRun definitions are fetched. + Options: + - 'source': Fetch definitions from the event source branch/SHA (default) + - 'default_branch': Fetch definitions from the repository default branch + enum: + - source + - default_branch + type: string + policy: + description: |- + Policy defines authorization policies for the repository, controlling who can + trigger PipelineRuns under different conditions. + properties: + ok_to_test: + description: |- + OkToTest defines a list of usernames that are allowed to trigger pipeline runs on pull requests + from external contributors by commenting "/ok-to-test" on the PR. These users are typically + repository maintainers or trusted contributors who can vouch for external contributions. + items: + type: string + type: array + pull_request: + description: |- + PullRequest defines a list of usernames that are explicitly allowed to execute + pipelines on their pull requests, even if they wouldn't normally have permission. + This is useful for allowing specific external contributors to trigger pipeline runs. + items: + type: string + type: array + type: object + type: object + url: + description: |- + URL of the repository we are building. Must be a valid HTTP/HTTPS Git repository URL + that PAC will use to clone and fetch pipeline definitions from. + type: string + type: object + required: + - spec + type: object + scope: Namespaced + names: + plural: repositories + singular: repository + kind: Repository + shortNames: + - repo +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-config-logging + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + zap-logger-config: | + { + "level": "info", + "development": false, + "sampling": { + "initial": 100, + "thereafter": 100 + }, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + # Log level overrides + loglevel.pipelinesascode: "info" + loglevel.pac-watcher: "info" + loglevel.pipelines-as-code-webhook: "info" +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# See https://pipelinesascode.com/docs/install/settings/ for the complete +# documentation of all settings. + +apiVersion: v1 +data: + # The application name, you can customize this label. If using the Github App you will need to customize the label on the github app setting as well. + application-name: "Pipelines as Code CI" + + # Whether to automatically create a secret with the token to be use by git-clone + secret-auto-create: "true" + + # By default we only generate token scoped to the repository from where the + # payload come from. + # We do this because if the github apps is installed on an github organisation + # + # and there is a mix of public and private repositories in there + # where some users on that org does not have access. + # + # If you trust every users on your organisations to access any repos there or + # not planning to install your github application globally on a Github Organisation + # then you can safely set this option to false. + secret-github-app-token-scoped: "true" + + # If you don't want to completely disable the scoping of the token, but still + # wants some other repos (on the same installation id) available from the + # token, then you can add an extra owner/repo here. + # + # You can have multiple owner/repositories separated by commas: + # i.e: "owner/private-repo1, org/repo2" + secret-github-app-scope-extra-repos: "" + + # Tekton HUB API urls + hub-url: "https://api.hub.tekton.dev/v1" + + # Tekton HUB catalog name + hub-catalog-name: "tekton" + + # Additional Hub Catalogs is supported, for example: + # + # catalog-1-id: anotherhub + # catalog-1-name: tekton + # catalog-1-url: https://api.other.com/v1 + # + # this configuration will have a new catalog named anotherhub on https://api.other.com/v1 endpoint and catalog name tekton + # to be used by a user in their templates like this: + # pipelinesascode.tekton.dev/task: "anotherhub://task" + # + # Increase the number of the catalog to add more of them + + # Allow fetching remote tasks + remote-tasks: "true" + + # Using the URL of the Tekton dashboard, Pipelines-as-Code generates a URL to the + # PipelineRun on the Tekton dashboard + tekton-dashboard-url: "" + + # Enable or disable the feature to show a log snippet of the failed task when there is + # an error in a Pipeline + # + # It will show the last 3 lines of the first container of the first task + # that has error in the pipeline. + # + # you may want to disable this if you think your pipeline may leak some value + error-log-snippet: "true" + + # Enable or disable the inspection of container logs to detect error message + # and expose them as annotations on Pull Request. Only Github apps is supported + error-detection-from-container-logs: "true" + + # How many lines to grab from the container when inspecting the + # logs for error-detection. Increasing this value may increase the watcher + # memory usage. Use -1 for unlimited lines. + error-detection-max-number-of-lines: "50" + + # The default regexp used when we use the simple error detection + error-detection-simple-regexp: |- + ^(?P[^:]*):(?P[0-9]+):(?P[0-9]+)?([ ]*)?(?P.*) + + # Global setting to control whether Pipelines-as-Code should automatically cancel + # any in-progress PipelineRuns associated with a pull request when that pull request is updated. + # This helps prevent multiple redundant runs from executing simultaneously. + # Default value: false. + enable-cancel-in-progress-on-pull-requests: "false" + + # Global setting to determine whether Pipelines-as-Code should automatically cancel + # in-progress PipelineRuns triggered by a push event, if a new push occurs on the same branch. + # This prevents overlapping or redundant runs for the same branch. + # Default value: false. + enable-cancel-in-progress-on-push: "false" + + # Since public bitbucket doesn't have the concept of Secret, we need to be + # able to secure the request by querying https://ip-ranges.atlassian.com/, + # this only happen for public bitbucket (ie: when provider.url is not set in + # repository spec). If you want to override this, you need to bear in mind + # this could be a security issue, a malicious user can send a PR to your repo + # with a modification to your PipelineRun that would grab secrets, tunnel or + # others and then send a malicious webhook payload to the controller which + # look like a authorized owner has send the PR to run it.. + bitbucket-cloud-check-source-ip: "true" + + # Add extra IPS (ie: 127.0.0.1) or networks (127.0.0.0/16) separated by commas. + bitbucket-cloud-additional-source-ip: "" + + # max-keep-run-upper-limit defines the upper limit for max-keep-run annotation + # value which a user can set on pipelineRun. the value set on annotation + # should be less than or equal to the upper limit otherwise the upper limit + # will be used while cleaning up + max-keep-run-upper-limit: "" + + # if defined then applies to all pipelineRun who doesn't have max-keep-runs annotation + default-max-keep-runs: "" + + # Whether to auto configure newly created repositories, this will create a new + # namespace and repository CR, supported only with GitHub App + auto-configure-new-github-repo: "false" + + # add a template to generate name for namespace for your auto configured + # github repo supported fields are repo_owner, repo_name eg. if defined as + # `{{repo_owner}}-{{repo_name}}-ci`, then namespace generated for repository + # https://github.com/owner/repo will be `owner-repo-ci` + auto-configure-repo-namespace-template: "" + + # Enable or disable the feature to rerun the CI if push event happens on + # a pull request + # + # By default it is true and CI will be re-run in case of push/amend on the + # pull request if ok-to-test is done once + # + # you may want to disable this if ok-to-test should be done on each iteration + remember-ok-to-test: "false" + + # When enabled, this option prevents duplicate pipeline runs when a commit appears in + # both a push event and a pull request. If a push event comes from a commit that is + # part of an open pull request, the push event will be skipped as it would create + # a duplicate pipeline run. + # Default: true + skip-push-event-for-pr-commits: "true" + + # Configure a custom console here, the driver support custom parameters from + # Repo CR along a few other template variable, see documentation for more + # details + # + # custom-console-name: Console Name + # custom-console-url: https://url + # custom-console-url-pr-details: https://url/ns/{{ namespace }}/{{ pr }} + # custom-console-url-pr-tasklog: https://url/ns/{{ namespace }}/{{ pr }}/logs/{{ task }} + +kind: ConfigMap +metadata: + name: pipelines-as-code + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This configmap is filled by bootstrap command +# GitHub App is added as provider and later this is checked +# before configuring a new GitHub App so that we don't +# configure more than one App + +apiVersion: v1 +data: + # pipelines as code controller version + version: "v0.35.0" + + # controller url to be used for configuring webhook using cli + controller-url: "" + + # display the configured provider on the platform + # only one provider type to be configured at a time + # eg. if GitHub App is configured, then webhooks should not be configured + provider: "" + +kind: ConfigMap +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-as-code-webhook-certs + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +# The data is populated at install time +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.pipelinesascode.tekton.dev + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +webhooks: + - admissionReviewVersions: ["v1"] + clientConfig: + service: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + failurePolicy: Fail + sideEffects: None + name: validation.pipelinesascode.tekton.dev +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-as-code-config-observability + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using Stackdriver will incur additional charges. + metrics.backend-destination: prometheus + # metrics.stackdriver-project-id field specifies the Stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used and metrics will be sent to the cluster's project if this field is + # not provided. + metrics.stackdriver-project-id: "" + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed + # to send metrics to Stackdriver using "global" resource type and custom + # metric type. Setting this flag to "true" could cause extra Stackdriver + # charge. If metrics.backend-destination is not Stackdriver, this is + # ignored. + metrics.allow-stackdriver-custom-metrics: "false" +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-watcher-config-leader-election + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # lease-duration is how long non-leaders will wait to try to acquire the + # lock; 15 seconds is the value used by core kubernetes controllers. + lease-duration: "60s" + # renew-deadline is how long a leader will try to renew the lease before + # giving up; 10 seconds is the value used by core kubernetes controllers. + renew-deadline: "40s" + # retry-period is how long the leader election client waits between tries of + # actions; 2 seconds is the value used by core kubernetes controllers. + retry-period: "10s" + # buckets is the number of buckets used to partition key space of each + # Reconciler. If this number is M and the replica number of the controller + # is N, the N replicas will compete for the M buckets. The owner of a + # bucket will take care of the reconciling for the keys partitioned into + # that bucket. + buckets: "1" +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-webhook-config-leader-election + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # lease-duration is how long non-leaders will wait to try to acquire the + # lock; 15 seconds is the value used by core kubernetes controllers. + lease-duration: "60s" + # renew-deadline is how long a leader will try to renew the lease before + # giving up; 10 seconds is the value used by core kubernetes controllers. + renew-deadline: "40s" + # retry-period is how long the leader election client waits between tries of + # actions; 2 seconds is the value used by core kubernetes controllers. + retry-period: "10s" + # buckets is the number of buckets used to partition key space of each + # Reconciler. If this number is M and the replica number of the controller + # is N, the N replicas will compete for the M buckets. The owner of a + # bucket will take care of the reconciling for the keys partitioned into + # that bucket. + buckets: "1" +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app: pipelines-as-code-controller + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-controller + containers: + - name: pac-controller + image: "ghcr.io/openshift-pipelines/pipelines-as-code/pipelines-as-code-controller:v0.35.x" + imagePullPolicy: Always + ports: + - name: api + containerPort: 8082 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + readinessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: TLS_KEY + value: "key" + - name: TLS_CERT + value: "cert" + - name: TLS_SECRET_NAME + value: "pipelines-as-code-tls-secret" + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K_METRICS_CONFIG + value: '{"Domain":"pipelinesascode.tekton.dev/controller","Component":"pac_controller","PrometheusPort":9090,"ConfigMap":{"name":"pipelines-as-code-config-observability"}}' + - name: K_TRACING_CONFIG + value: '{"backend":"prometheus","debug":"false","sample-rate":"0"}' + - name: K_SINK_TIMEOUT + value: "30" + - name: PAC_CONTROLLER_LABEL + value: "default" + - name: PAC_CONTROLLER_SECRET + value: "pipelines-as-code-secret" + - name: PAC_CONTROLLER_CONFIGMAP + value: "pipelines-as-code" + - name: KUBERNETES_MIN_VERSION + value: "v1.28.0" + volumeMounts: + - mountPath: "/etc/pipelines-as-code/tls" + readOnly: true + name: tls + volumes: + - name: tls + secret: + secretName: pipelines-as-code-tls-secret + optional: true +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app: pipelines-as-code-controller + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + ports: + - name: http-listener + port: 8080 + protocol: TCP + targetPort: 8082 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + app: pipelines-as-code-watcher + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-watcher + containers: + - name: pac-watcher + image: "ghcr.io/openshift-pipelines/pipelines-as-code/pipelines-as-code-watcher:v0.35.x" + imagePullPolicy: Always + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: METRICS_DOMAIN + value: tekton.dev/pipelinesascode + - name: CONFIG_OBSERVABILITY_NAME + value: pipelines-as-code-config-observability + - name: CONFIG_LEADERELECTION_NAME + value: pac-watcher-config-leader-election + - name: KUBERNETES_MIN_VERSION + value: "v1.28.0" + ports: + - name: probes + containerPort: 8080 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + readinessProbe: + httpGet: + path: /live + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + livenessProbe: + httpGet: + path: /live + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code + app: pipelines-as-code-watcher +spec: + ports: + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-webhook + containers: + - name: pac-webhook + image: "ghcr.io/openshift-pipelines/pipelines-as-code/pipelines-as-code-webhook:v0.35.x" + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEBHOOK_SERVICE_NAME + value: pipelines-as-code-webhook + - name: WEBHOOK_SECRET_NAME + value: pipelines-as-code-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipelinesascode + - name: CONFIG_LEADERELECTION_NAME + value: pac-webhook-config-leader-election + - name: KUBERNETES_MIN_VERSION + value: "v1.28.0" + ports: + - name: https-webhook + containerPort: 8443 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + ports: + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code diff --git a/release.yaml b/release.yaml new file mode 100644 index 000000000..59a9eb1ae --- /dev/null +++ b/release.yaml @@ -0,0 +1,1792 @@ +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Namespace +metadata: + name: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + openshift.io/cluster-monitoring: "true" +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + # All system:authenticated users needs to have access + # of the pipelines-as-code-info ConfigMap even if they don't + # have access to the other resources present in the + # installed namespace. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["pipelines-as-code-info"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: Group + name: system:authenticated + apiGroup: rbac.authorization.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-info +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pipelines-as-code-aggregate + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - pipelinesascode.tekton.dev + resources: + - repositories + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-as-code-controller-role + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-controller-binding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-controller-role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipeline-as-code-controller-clusterrole + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["create"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "create", "update", "delete"] + - apiGroups: ["pipelinesascode.tekton.dev"] + resources: ["repositories"] + verbs: ["get", "create", "list"] + - apiGroups: ["tekton.dev"] + resources: ["pipelineruns"] + verbs: ["get", "list", "create", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create"] + - apiGroups: ["route.openshift.io"] + resources: ["routes"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines-as-code-controller-clusterbinding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-controller + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipeline-as-code-controller-clusterrole +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-as-code-watcher-role + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-watcher-binding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-watcher +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-watcher-role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipeline-as-code-watcher-clusterrole + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "delete"] + - apiGroups: ["pipelinesascode.tekton.dev"] + resources: ["repositories"] + verbs: ["get", "list", "update", "watch"] + - apiGroups: ["tekton.dev"] + resources: ["pipelineruns"] + verbs: ["get", "delete", "list", "watch", "update", "patch"] + - apiGroups: ["tekton.dev"] + resources: ["taskruns"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: ["route.openshift.io"] + resources: ["routes"] + verbs: ["get"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines-as-code-watcher-clusterbinding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: +- kind: ServiceAccount + name: pipelines-as-code-watcher + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipeline-as-code-watcher-clusterrole +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipelines-as-code-webhook-role + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "update"] + resourceNames: ["pipelines-as-code-webhook-certs"] + # The webhook daemon makes a reconciliation loop on webhook-certs. Whenever + # the secret changes it updates the webhook configurations with the certificates + # stored in the secret. + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] + # webhook uses leases for leader election +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-webhook-binding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: + - kind: ServiceAccount + name: pipelines-as-code-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-webhook-role +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: pipeline-as-code-webhook-clusterrole + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +rules: + - apiGroups: ["pipelinesascode.tekton.dev"] + resources: ["repositories"] + verbs: ["get", "list", "watch"] + # The webhook performs a reconciliation on this resource and continuously + # updates configuration. + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["list", "watch"] + # When there are changes to the configs or secrets, knative updates the validating webhook config + # with the updated certificates or the refreshed set of rules. + - apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "update", "delete"] + resourceNames: ["validation.pipelinesascode.tekton.dev"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: pipelines-as-code-webhook-clusterbinding + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +subjects: +- kind: ServiceAccount + name: pipelines-as-code-webhook + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: pipeline-as-code-webhook-clusterrole +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: repositories.pipelinesascode.tekton.dev + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +spec: + group: pipelinesascode.tekton.dev + versions: + - name: v1alpha1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - name: Succeeded + type: string + jsonPath: '.pipelinerun_status[-1].conditions[?(@.type=="Succeeded")].status' + - name: Reason + type: string + jsonPath: '.pipelinerun_status[-1].conditions[?(@.type=="Succeeded")].reason' + - name: StartTime + type: date + jsonPath: ".pipelinerun_status[-1].startTime" + - name: CompletionTime + type: date + jsonPath: ".pipelinerun_status[-1].completionTime" + served: true + storage: true + schema: + openAPIV3Schema: + description: Repository is the representation of a Git repository from a Git provider platform. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + pipelinerun_status: + items: + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is additional Status fields for the Resource to save some + additional State as well as convey more information to the user. This is + roughly akin to Annotations on any k8s resource, just the reconciler conveying + richer information outwards. + type: object + completionTime: + description: CompletionTime is the time the PipelineRun completed. + format: date-time + type: string + conditions: + description: Conditions the latest available observations of a resource's current state. + items: + description: |- + Condition defines a readiness condition for a Knative resource. + See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + properties: + lastTransitionTime: + description: |- + LastTransitionTime is the last time the condition transitioned from one status to another. + We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic + differences (all other things held constant). + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + severity: + description: |- + Severity with which to treat failures of this type of condition. + When this is not specified, it defaults to Error. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + event_type: + description: EventType is the event type of that run + type: string + failure_reason: + additionalProperties: + description: TaskInfos contains information about a task. + properties: + completion_time: + format: date-time + type: string + display_name: + type: string + log_snippet: + type: string + message: + type: string + name: + type: string + reason: + type: string + required: + - name + type: object + description: CollectedTaskInfos is the information about tasks + type: object + logurl: + description: LogURL is the full URL to the log for this run. + type: string + observedGeneration: + description: |- + ObservedGeneration is the 'Generation' of the Service that + was last processed by the controller. + format: int64 + type: integer + pipelineRunName: + description: PipelineRunName is the name of the PipelineRun + type: string + sha: + description: SHA is the name of the SHA that has been tested + type: string + sha_url: + description: SHA the URL of the SHA to view it + type: string + startTime: + description: StartTime is the time the PipelineRun is actually started. + format: date-time + type: string + target_branch: + description: TargetBranch is the target branch of that run + type: string + title: + description: Title is the title of the commit SHA that has been tested + type: string + type: object + type: array + spec: + description: |- + RepositorySpec defines the desired state of a Repository, including its URL, + Git provider configuration, and operational settings. + properties: + concurrency_limit: + description: |- + ConcurrencyLimit defines the maximum number of concurrent pipelineruns that can + run for this repository. This helps prevent resource exhaustion when many events trigger + pipelines simultaneously. + minimum: 1 + type: integer + git_provider: + description: |- + GitProvider details specific to a git provider configuration. Contains authentication, + API endpoints, and provider type information needed to interact with the Git service. + properties: + secret: + description: |- + Secret reference for authentication with the Git provider. Contains the token, + password, or private key used to authenticate requests to the Git provider API. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + type: + description: |- + Type of git provider. Determines which Git provider API and authentication flow to use. + Supported values: + - 'github': GitHub.com or GitHub Enterprise + - 'gitlab': GitLab.com or self-hosted GitLab + - 'bitbucket-datacenter': Bitbucket Data Center (self-hosted) + - 'bitbucket-cloud': Bitbucket Cloud (bitbucket.org) + - 'gitea': Gitea instances + enum: + - github + - gitlab + - bitbucket-datacenter + - bitbucket-cloud + - gitea + type: string + url: + description: |- + URL of the git provider API endpoint. This is the base URL for API requests to the + Git provider (e.g., 'https://api.github.com' for GitHub or a custom GitLab instance URL). + type: string + user: + description: |- + User of the git provider. Username to use for authentication when using basic auth + or token-based authentication methods. Not used for GitHub Apps authentication. + type: string + webhook_secret: + description: |- + WebhookSecret reference for webhook validation. Contains the shared secret used to + validate that incoming webhooks are legitimate and coming from the Git provider. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + type: object + incoming: + description: |- + Incomings defines incoming webhook configurations. Each configuration specifies how to + handle external webhook requests that don't come directly from the primary Git provider. + items: + properties: + params: + description: |- + Params defines parameter names to extract from the webhook payload. These parameters + will be made available to the PipelineRuns triggered by this webhook. + items: + type: string + type: array + secret: + description: |- + Secret for the incoming webhook authentication. This secret is used to validate + that webhook requests are coming from authorized sources. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + targets: + description: |- + Targets defines target branches for this webhook. When specified, only webhook + events targeting these branches will trigger PipelineRuns. + items: + type: string + type: array + type: + description: |- + Type of the incoming webhook. Currently only 'webhook-url' is supported, which allows + external systems to trigger PipelineRuns via generic webhook requests. + enum: + - webhook-url + type: string + required: + - secret + - type + type: object + type: array + params: + description: |- + Params defines repository level parameters that can be referenced in PipelineRuns. + These parameters can be used as default values or configured for specific events. + items: + properties: + filter: + description: |- + Filter defines when this parameter applies. It can be used to conditionally + apply parameters based on the event type, branch name, or other attributes. + type: string + name: + description: |- + Name of the parameter. This is the key that will be used to reference this parameter + in PipelineRun definitions through via the {{ name }} syntax. + type: string + secret_ref: + description: |- + SecretRef references a secret for the parameter value. Use this when the parameter + contains sensitive information that should not be stored directly in the Repository CR. + This field is mutually exclusive with Value. + properties: + key: + description: Key in the secret + type: string + name: + description: Name of the secret + type: string + required: + - name + type: object + value: + description: |- + Value of the parameter. The literal value to be provided to the PipelineRun. + This field is mutually exclusive with SecretRef. + type: string + required: + - name + type: object + type: array + settings: + description: |- + Settings contains the configuration settings for the repository, including + authorization policies, provider-specific configuration, and provenance settings. + properties: + github_app_token_scope_repos: + description: |- + GithubAppTokenScopeRepos lists repositories that can access the GitHub App token when using the + GitHub App authentication method. This allows specific repositories to use tokens generated for + the GitHub App installation, useful for cross-repository access. + items: + type: string + type: array + gitlab: + description: Gitlab contains GitLab-specific settings for repositories hosted on GitLab. + properties: + comment_strategy: + description: |- + CommentStrategy defines how GitLab comments are handled for pipeline results. + Options: + - 'status-comment': Posts a single comment and updates it with pipeline results + - 'pipeline-runs-comment': Creates a new comment for each PipelineRun + - 'disable_all': Disables all comments on merge requests + enum: + - status-comment + - pipeline-runs-comment + - disable_all + type: string + type: object + pipelinerun_provenance: + description: |- + PipelineRunProvenance configures how PipelineRun definitions are fetched. + Options: + - 'source': Fetch definitions from the event source branch/SHA (default) + - 'default_branch': Fetch definitions from the repository default branch + enum: + - source + - default_branch + type: string + policy: + description: |- + Policy defines authorization policies for the repository, controlling who can + trigger PipelineRuns under different conditions. + properties: + ok_to_test: + description: |- + OkToTest defines a list of usernames that are allowed to trigger pipeline runs on pull requests + from external contributors by commenting "/ok-to-test" on the PR. These users are typically + repository maintainers or trusted contributors who can vouch for external contributions. + items: + type: string + type: array + pull_request: + description: |- + PullRequest defines a list of usernames that are explicitly allowed to execute + pipelines on their pull requests, even if they wouldn't normally have permission. + This is useful for allowing specific external contributors to trigger pipeline runs. + items: + type: string + type: array + type: object + type: object + url: + description: |- + URL of the repository we are building. Must be a valid HTTP/HTTPS Git repository URL + that PAC will use to clone and fetch pipeline definitions from. + type: string + type: object + required: + - spec + type: object + scope: Namespaced + names: + plural: repositories + singular: repository + kind: Repository + shortNames: + - repo +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-config-logging + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + zap-logger-config: | + { + "level": "info", + "development": false, + "sampling": { + "initial": 100, + "thereafter": 100 + }, + "outputPaths": ["stdout"], + "errorOutputPaths": ["stderr"], + "encoding": "json", + "encoderConfig": { + "timeKey": "ts", + "levelKey": "level", + "nameKey": "logger", + "callerKey": "caller", + "messageKey": "msg", + "stacktraceKey": "stacktrace", + "lineEnding": "", + "levelEncoder": "", + "timeEncoder": "iso8601", + "durationEncoder": "", + "callerEncoder": "" + } + } + # Log level overrides + loglevel.pipelinesascode: "info" + loglevel.pac-watcher: "info" + loglevel.pipelines-as-code-webhook: "info" +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# See https://pipelinesascode.com/docs/install/settings/ for the complete +# documentation of all settings. + +apiVersion: v1 +data: + # The application name, you can customize this label. If using the Github App you will need to customize the label on the github app setting as well. + application-name: "Pipelines as Code CI" + + # Whether to automatically create a secret with the token to be use by git-clone + secret-auto-create: "true" + + # By default we only generate token scoped to the repository from where the + # payload come from. + # We do this because if the github apps is installed on an github organisation + # + # and there is a mix of public and private repositories in there + # where some users on that org does not have access. + # + # If you trust every users on your organisations to access any repos there or + # not planning to install your github application globally on a Github Organisation + # then you can safely set this option to false. + secret-github-app-token-scoped: "true" + + # If you don't want to completely disable the scoping of the token, but still + # wants some other repos (on the same installation id) available from the + # token, then you can add an extra owner/repo here. + # + # You can have multiple owner/repositories separated by commas: + # i.e: "owner/private-repo1, org/repo2" + secret-github-app-scope-extra-repos: "" + + # Tekton HUB API urls + hub-url: "https://api.hub.tekton.dev/v1" + + # Tekton HUB catalog name + hub-catalog-name: "tekton" + + # Additional Hub Catalogs is supported, for example: + # + # catalog-1-id: anotherhub + # catalog-1-name: tekton + # catalog-1-url: https://api.other.com/v1 + # + # this configuration will have a new catalog named anotherhub on https://api.other.com/v1 endpoint and catalog name tekton + # to be used by a user in their templates like this: + # pipelinesascode.tekton.dev/task: "anotherhub://task" + # + # Increase the number of the catalog to add more of them + + # Allow fetching remote tasks + remote-tasks: "true" + + # Using the URL of the Tekton dashboard, Pipelines-as-Code generates a URL to the + # PipelineRun on the Tekton dashboard + tekton-dashboard-url: "" + + # Enable or disable the feature to show a log snippet of the failed task when there is + # an error in a Pipeline + # + # It will show the last 3 lines of the first container of the first task + # that has error in the pipeline. + # + # you may want to disable this if you think your pipeline may leak some value + error-log-snippet: "true" + + # Enable or disable the inspection of container logs to detect error message + # and expose them as annotations on Pull Request. Only Github apps is supported + error-detection-from-container-logs: "true" + + # How many lines to grab from the container when inspecting the + # logs for error-detection. Increasing this value may increase the watcher + # memory usage. Use -1 for unlimited lines. + error-detection-max-number-of-lines: "50" + + # The default regexp used when we use the simple error detection + error-detection-simple-regexp: |- + ^(?P[^:]*):(?P[0-9]+):(?P[0-9]+)?([ ]*)?(?P.*) + + # Global setting to control whether Pipelines-as-Code should automatically cancel + # any in-progress PipelineRuns associated with a pull request when that pull request is updated. + # This helps prevent multiple redundant runs from executing simultaneously. + # Default value: false. + enable-cancel-in-progress-on-pull-requests: "false" + + # Global setting to determine whether Pipelines-as-Code should automatically cancel + # in-progress PipelineRuns triggered by a push event, if a new push occurs on the same branch. + # This prevents overlapping or redundant runs for the same branch. + # Default value: false. + enable-cancel-in-progress-on-push: "false" + + # Since public bitbucket doesn't have the concept of Secret, we need to be + # able to secure the request by querying https://ip-ranges.atlassian.com/, + # this only happen for public bitbucket (ie: when provider.url is not set in + # repository spec). If you want to override this, you need to bear in mind + # this could be a security issue, a malicious user can send a PR to your repo + # with a modification to your PipelineRun that would grab secrets, tunnel or + # others and then send a malicious webhook payload to the controller which + # look like a authorized owner has send the PR to run it.. + bitbucket-cloud-check-source-ip: "true" + + # Add extra IPS (ie: 127.0.0.1) or networks (127.0.0.0/16) separated by commas. + bitbucket-cloud-additional-source-ip: "" + + # max-keep-run-upper-limit defines the upper limit for max-keep-run annotation + # value which a user can set on pipelineRun. the value set on annotation + # should be less than or equal to the upper limit otherwise the upper limit + # will be used while cleaning up + max-keep-run-upper-limit: "" + + # if defined then applies to all pipelineRun who doesn't have max-keep-runs annotation + default-max-keep-runs: "" + + # Whether to auto configure newly created repositories, this will create a new + # namespace and repository CR, supported only with GitHub App + auto-configure-new-github-repo: "false" + + # add a template to generate name for namespace for your auto configured + # github repo supported fields are repo_owner, repo_name eg. if defined as + # `{{repo_owner}}-{{repo_name}}-ci`, then namespace generated for repository + # https://github.com/owner/repo will be `owner-repo-ci` + auto-configure-repo-namespace-template: "" + + # Enable or disable the feature to rerun the CI if push event happens on + # a pull request + # + # By default it is true and CI will be re-run in case of push/amend on the + # pull request if ok-to-test is done once + # + # you may want to disable this if ok-to-test should be done on each iteration + remember-ok-to-test: "false" + + # When enabled, this option prevents duplicate pipeline runs when a commit appears in + # both a push event and a pull request. If a push event comes from a commit that is + # part of an open pull request, the push event will be skipped as it would create + # a duplicate pipeline run. + # Default: true + skip-push-event-for-pr-commits: "true" + + # Configure a custom console here, the driver support custom parameters from + # Repo CR along a few other template variable, see documentation for more + # details + # + # custom-console-name: Console Name + # custom-console-url: https://url + # custom-console-url-pr-details: https://url/ns/{{ namespace }}/{{ pr }} + # custom-console-url-pr-tasklog: https://url/ns/{{ namespace }}/{{ pr }}/logs/{{ task }} + +kind: ConfigMap +metadata: + name: pipelines-as-code + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This configmap is filled by bootstrap command +# GitHub App is added as provider and later this is checked +# before configuring a new GitHub App so that we don't +# configure more than one App + +apiVersion: v1 +data: + # pipelines as code controller version + version: "v0.35.0" + + # controller url to be used for configuring webhook using cli + controller-url: "" + + # display the configured provider on the platform + # only one provider type to be configured at a time + # eg. if GitHub App is configured, then webhooks should not be configured + provider: "" + +kind: ConfigMap +metadata: + name: pipelines-as-code-info + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +apiVersion: v1 +kind: Secret +metadata: + name: pipelines-as-code-webhook-certs + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +# The data is populated at install time +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: validation.pipelinesascode.tekton.dev + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +webhooks: + - admissionReviewVersions: ["v1"] + clientConfig: + service: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + failurePolicy: Fail + sideEffects: None + name: validation.pipelinesascode.tekton.dev +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: pipelines-as-code-config-observability + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # metrics.backend-destination field specifies the system metrics destination. + # It supports either prometheus (the default) or stackdriver. + # Note: Using Stackdriver will incur additional charges. + metrics.backend-destination: prometheus + # metrics.stackdriver-project-id field specifies the Stackdriver project ID. This + # field is optional. When running on GCE, application default credentials will be + # used and metrics will be sent to the cluster's project if this field is + # not provided. + metrics.stackdriver-project-id: "" + # metrics.allow-stackdriver-custom-metrics indicates whether it is allowed + # to send metrics to Stackdriver using "global" resource type and custom + # metric type. Setting this flag to "true" could cause extra Stackdriver + # charge. If metrics.backend-destination is not Stackdriver, this is + # ignored. + metrics.allow-stackdriver-custom-metrics: "false" +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-watcher-config-leader-election + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # lease-duration is how long non-leaders will wait to try to acquire the + # lock; 15 seconds is the value used by core kubernetes controllers. + lease-duration: "60s" + # renew-deadline is how long a leader will try to renew the lease before + # giving up; 10 seconds is the value used by core kubernetes controllers. + renew-deadline: "40s" + # retry-period is how long the leader election client waits between tries of + # actions; 2 seconds is the value used by core kubernetes controllers. + retry-period: "10s" + # buckets is the number of buckets used to partition key space of each + # Reconciler. If this number is M and the replica number of the controller + # is N, the N replicas will compete for the M buckets. The owner of a + # bucket will take care of the reconciling for the keys partitioned into + # that bucket. + buckets: "1" +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: pac-webhook-config-leader-election + namespace: pipelines-as-code + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +data: + _example: | + ################################ + # # + # EXAMPLE CONFIGURATION # + # # + ################################ + # This block is not actually functional configuration, + # but serves to illustrate the available configuration + # options and document them in a way that is accessible + # to users that `kubectl edit` this config map. + # + # These sample configuration options may be copied out of + # this example block and unindented to be in the data block + # to actually change the configuration. + # lease-duration is how long non-leaders will wait to try to acquire the + # lock; 15 seconds is the value used by core kubernetes controllers. + lease-duration: "60s" + # renew-deadline is how long a leader will try to renew the lease before + # giving up; 10 seconds is the value used by core kubernetes controllers. + renew-deadline: "40s" + # retry-period is how long the leader election client waits between tries of + # actions; 2 seconds is the value used by core kubernetes controllers. + retry-period: "10s" + # buckets is the number of buckets used to partition key space of each + # Reconciler. If this number is M and the replica number of the controller + # is N, the N replicas will compete for the M buckets. The owner of a + # bucket will take care of the reconciling for the keys partitioned into + # that bucket. + buckets: "1" +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app: pipelines-as-code-controller + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-controller + containers: + - name: pac-controller + image: "ghcr.io/openshift-pipelines/pipelines-as-code/pipelines-as-code-controller:v0.35.x" + imagePullPolicy: Always + ports: + - name: api + containerPort: 8082 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + readinessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /live + port: api + scheme: HTTP + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: TLS_KEY + value: "key" + - name: TLS_CERT + value: "cert" + - name: TLS_SECRET_NAME + value: "pipelines-as-code-tls-secret" + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K_METRICS_CONFIG + value: '{"Domain":"pipelinesascode.tekton.dev/controller","Component":"pac_controller","PrometheusPort":9090,"ConfigMap":{"name":"pipelines-as-code-config-observability"}}' + - name: K_TRACING_CONFIG + value: '{"backend":"prometheus","debug":"false","sample-rate":"0"}' + - name: K_SINK_TIMEOUT + value: "30" + - name: PAC_CONTROLLER_LABEL + value: "default" + - name: PAC_CONTROLLER_SECRET + value: "pipelines-as-code-secret" + - name: PAC_CONTROLLER_CONFIGMAP + value: "pipelines-as-code" + - name: KUBERNETES_MIN_VERSION + value: "v1.28.0" + volumeMounts: + - mountPath: "/etc/pipelines-as-code/tls" + readOnly: true + name: tls + volumes: + - name: tls + secret: + secretName: pipelines-as-code-tls-secret + optional: true +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-controller + namespace: pipelines-as-code + labels: + app: pipelines-as-code-controller + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + ports: + - name: http-listener + port: 8080 + protocol: TCP + targetPort: 8082 + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + app: pipelines-as-code-watcher + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-watcher + containers: + - name: pac-watcher + image: "ghcr.io/openshift-pipelines/pipelines-as-code/pipelines-as-code-watcher:v0.35.x" + imagePullPolicy: Always + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: METRICS_DOMAIN + value: tekton.dev/pipelinesascode + - name: CONFIG_OBSERVABILITY_NAME + value: pipelines-as-code-config-observability + - name: CONFIG_LEADERELECTION_NAME + value: pac-watcher-config-leader-election + - name: KUBERNETES_MIN_VERSION + value: "v1.28.0" + ports: + - name: probes + containerPort: 8080 + - name: metrics + containerPort: 9090 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + readinessProbe: + httpGet: + path: /live + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + livenessProbe: + httpGet: + path: /live + port: probes + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-watcher + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code + app: pipelines-as-code-watcher +spec: + ports: + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/name: watcher + app.kubernetes.io/component: watcher + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + template: + metadata: + labels: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: pipelines-as-code-webhook + containers: + - name: pac-webhook + image: "ghcr.io/openshift-pipelines/pipelines-as-code/pipelines-as-code-webhook:v0.35.x" + env: + - name: CONFIG_LOGGING_NAME + value: pac-config-logging + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: WEBHOOK_SERVICE_NAME + value: pipelines-as-code-webhook + - name: WEBHOOK_SECRET_NAME + value: pipelines-as-code-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipelinesascode + - name: CONFIG_LEADERELECTION_NAME + value: pac-webhook-config-leader-election + - name: KUBERNETES_MIN_VERSION + value: "v1.28.0" + ports: + - name: https-webhook + containerPort: 8443 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: v1 +kind: Service +metadata: + name: pipelines-as-code-webhook + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code +spec: + ports: + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + annotations: + haproxy.router.openshift.io/timeout: 600s + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: pipelines-as-code + app.kubernetes.io/version: "v0.35.0" + pipelines-as-code/route: controller + name: pipelines-as-code-controller + namespace: pipelines-as-code +spec: + port: + targetPort: http-listener + tls: + insecureEdgeTerminationPolicy: Redirect + termination: edge + to: + kind: Service + name: pipelines-as-code-controller + weight: 100 + wildcardPolicy: None +--- + +# Copyright 2025 Red Hat +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pipelines-as-code-monitoring + namespace: pipelines-as-code +rules: +- apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelines-as-code-monitoring + namespace: pipelines-as-code +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelines-as-code-monitoring +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: pipelines-as-code +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: pipelines-as-code-monitor + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code + annotations: + networkoperator.openshift.io/ignore-errors: "" +spec: + endpoints: + - interval: 10s + port: http-metrics + jobLabel: app + namespaceSelector: + matchNames: + - pipelines-as-code + selector: + matchLabels: + app: pipelines-as-code-watcher +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: pipelines-as-code-controller-monitor + namespace: pipelines-as-code + labels: + app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/part-of: pipelines-as-code + annotations: + networkoperator.openshift.io/ignore-errors: "" +spec: + endpoints: + - interval: 10s + port: http-metrics + jobLabel: app + namespaceSelector: + matchNames: + - pipelines-as-code + selector: + matchLabels: + app: pipelines-as-code-controller From 0b6cda803552f179101710dae77a73e162ceaee6 Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Wed, 4 Jun 2025 15:28:40 +0530 Subject: [PATCH 02/20] DNM: downgrade go version to 1.23.6 downgraded go version to 1.23.6 and downgraded cert-manager dependency version as well. Signed-off-by: Zaki Shaikh --- go.mod | 6 ++---- go.sum | 4 ++-- pkg/pipelineascode/pipelineascode.go | 2 +- .../cert-manager/cert-manager/LICENSES | 20 +++++++++---------- vendor/modules.txt | 4 ++-- 5 files changed, 17 insertions(+), 19 deletions(-) diff --git a/go.mod b/go.mod index 0e9c75b2b..7af8428ee 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,6 @@ module github.com/openshift-pipelines/pipelines-as-code -go 1.23.8 - -toolchain go1.24.2 +go 1.23.6 require ( code.gitea.io/gitea v1.23.7 @@ -51,7 +49,7 @@ require ( cel.dev/expr v0.23.1 // indirect github.com/42wim/httpsig v1.2.3 // indirect github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect - github.com/cert-manager/cert-manager v1.17.2 // indirect + github.com/cert-manager/cert-manager v1.17.1 // indirect github.com/cloudevents/sdk-go/sql/v2 v2.0.0-20240712172937-3ce6b2f1f011 // indirect github.com/coreos/go-oidc/v3 v3.14.1 // indirect github.com/fxamacker/cbor/v2 v2.8.0 // indirect diff --git a/go.sum b/go.sum index fd54357b0..63ab74fb0 100644 --- a/go.sum +++ b/go.sum @@ -81,8 +81,8 @@ github.com/bradleyfalzon/ghinstallation/v2 v2.15.0/go.mod h1:PoH9Vhy82OeRFZfxsVr github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cert-manager/cert-manager v1.17.2 h1:QQYTEOsHf/Z3BFzKH2sIILHJwZA5Ut0LYZlHyNViupg= -github.com/cert-manager/cert-manager v1.17.2/go.mod h1:2TmjsTQF8GZqc8fgLhXWCfbA6YwWCUHKxerJNbFh9eU= +github.com/cert-manager/cert-manager v1.17.1 h1:Aig+lWMoLsmpGd9TOlTvO4t0Ah3D+/vGB37x/f+ZKt0= +github.com/cert-manager/cert-manager v1.17.1/go.mod h1:zeG4D+AdzqA7hFMNpYCJgcQ2VOfFNBa+Jzm3kAwiDU4= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= diff --git a/pkg/pipelineascode/pipelineascode.go b/pkg/pipelineascode/pipelineascode.go index 6baa732c8..58e9a1c5b 100644 --- a/pkg/pipelineascode/pipelineascode.go +++ b/pkg/pipelineascode/pipelineascode.go @@ -59,7 +59,7 @@ func (p *PacRun) Run(ctx context.Context) error { matchedPRs, repo, err := p.matchRepoPR(ctx) if repo != nil && p.event.TriggerTarget == triggertype.PullRequestClosed { if err := p.cancelAllInProgressBelongingToClosedPullRequest(ctx, repo); err != nil { - return fmt.Errorf("error cancelling in progress pipelineRuns belonging to pull request %d: %w", p.event.PullRequestNumber, err) + return fmt.Errorf("error cancelling in progress PipelineRuns belonging to pull request %d: %w", p.event.PullRequestNumber, err) } return nil } diff --git a/vendor/github.com/cert-manager/cert-manager/LICENSES b/vendor/github.com/cert-manager/cert-manager/LICENSES index baf80451d..5cf5755c0 100644 --- a/vendor/github.com/cert-manager/cert-manager/LICENSES +++ b/vendor/github.com/cert-manager/cert-manager/LICENSES @@ -53,8 +53,8 @@ github.com/fsnotify/fsnotify,https://github.com/fsnotify/fsnotify/blob/v1.8.0/LI github.com/fxamacker/cbor/v2,https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE,MIT github.com/go-asn1-ber/asn1-ber,https://github.com/go-asn1-ber/asn1-ber/blob/v1.5.6/LICENSE,MIT github.com/go-http-utils/headers,https://github.com/go-http-utils/headers/blob/fed159eddc2a/LICENSE,MIT -github.com/go-jose/go-jose/v4,https://github.com/go-jose/go-jose/blob/v4.0.5/LICENSE,Apache-2.0 -github.com/go-jose/go-jose/v4/json,https://github.com/go-jose/go-jose/blob/v4.0.5/json/LICENSE,BSD-3-Clause +github.com/go-jose/go-jose/v4,https://github.com/go-jose/go-jose/blob/v4.0.2/LICENSE,Apache-2.0 +github.com/go-jose/go-jose/v4/json,https://github.com/go-jose/go-jose/blob/v4.0.2/json/LICENSE,BSD-3-Clause github.com/go-ldap/ldap/v3,https://github.com/go-ldap/ldap/blob/v3.4.8/v3/LICENSE,MIT github.com/go-logr/logr,https://github.com/go-logr/logr/blob/v1.4.2/LICENSE,Apache-2.0 github.com/go-logr/stdr,https://github.com/go-logr/stdr/blob/v1.2.2/LICENSE,Apache-2.0 @@ -63,7 +63,7 @@ github.com/go-openapi/jsonpointer,https://github.com/go-openapi/jsonpointer/blob github.com/go-openapi/jsonreference,https://github.com/go-openapi/jsonreference/blob/v0.21.0/LICENSE,Apache-2.0 github.com/go-openapi/swag,https://github.com/go-openapi/swag/blob/v0.23.0/LICENSE,Apache-2.0 github.com/gogo/protobuf,https://github.com/gogo/protobuf/blob/v1.3.2/LICENSE,BSD-3-Clause -github.com/golang-jwt/jwt/v5,https://github.com/golang-jwt/jwt/blob/v5.2.2/LICENSE,MIT +github.com/golang-jwt/jwt/v5,https://github.com/golang-jwt/jwt/blob/v5.2.1/LICENSE,MIT github.com/golang/groupcache/lru,https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE,Apache-2.0 github.com/golang/protobuf/proto,https://github.com/golang/protobuf/blob/v1.5.4/LICENSE,BSD-3-Clause github.com/golang/snappy,https://github.com/golang/snappy/blob/v0.0.4/LICENSE,BSD-3-Clause @@ -146,14 +146,14 @@ go.opentelemetry.io/otel/trace,https://github.com/open-telemetry/opentelemetry-g go.opentelemetry.io/proto/otlp,https://github.com/open-telemetry/opentelemetry-proto-go/blob/otlp/v1.4.0/otlp/LICENSE,Apache-2.0 go.uber.org/multierr,https://github.com/uber-go/multierr/blob/v1.11.0/LICENSE.txt,MIT go.uber.org/zap,https://github.com/uber-go/zap/blob/v1.27.0/LICENSE,MIT -golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.36.0:LICENSE,BSD-3-Clause +golang.org/x/crypto,https://cs.opensource.google/go/x/crypto/+/v0.31.0:LICENSE,BSD-3-Clause golang.org/x/exp,https://cs.opensource.google/go/x/exp/+/b2144cdd:LICENSE,BSD-3-Clause -golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.38.0:LICENSE,BSD-3-Clause -golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.28.0:LICENSE,BSD-3-Clause -golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.12.0:LICENSE,BSD-3-Clause -golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.31.0:LICENSE,BSD-3-Clause -golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.30.0:LICENSE,BSD-3-Clause -golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.23.0:LICENSE,BSD-3-Clause +golang.org/x/net,https://cs.opensource.google/go/x/net/+/v0.33.0:LICENSE,BSD-3-Clause +golang.org/x/oauth2,https://cs.opensource.google/go/x/oauth2/+/v0.24.0:LICENSE,BSD-3-Clause +golang.org/x/sync,https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE,BSD-3-Clause +golang.org/x/sys,https://cs.opensource.google/go/x/sys/+/v0.28.0:LICENSE,BSD-3-Clause +golang.org/x/term,https://cs.opensource.google/go/x/term/+/v0.27.0:LICENSE,BSD-3-Clause +golang.org/x/text,https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE,BSD-3-Clause golang.org/x/time/rate,https://cs.opensource.google/go/x/time/+/v0.8.0:LICENSE,BSD-3-Clause gomodules.xyz/jsonpatch/v2,https://github.com/gomodules/jsonpatch/blob/v2.4.0/v2/LICENSE,Apache-2.0 google.golang.org/api,https://github.com/googleapis/google-api-go-client/blob/v0.198.0/LICENSE,BSD-3-Clause diff --git a/vendor/modules.txt b/vendor/modules.txt index d46c004aa..7bd95056a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -57,8 +57,8 @@ github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1 github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1 github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1 github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1 -# github.com/cert-manager/cert-manager v1.17.2 -## explicit; go 1.23.8 +# github.com/cert-manager/cert-manager v1.17.1 +## explicit; go 1.23.0 github.com/cert-manager/cert-manager/pkg/apis/acme github.com/cert-manager/cert-manager/pkg/apis/acme/v1 github.com/cert-manager/cert-manager/pkg/apis/certmanager From ba5f90b4adb25c949f04a6e98b54bf75b7b92dae Mon Sep 17 00:00:00 2001 From: Pipelines as Code CI Robot Date: Wed, 4 Jun 2025 12:54:15 +0000 Subject: [PATCH 03/20] Release yaml generated from https://github.com/openshift-pipelines/pipelines-as-code/commit/0b6cda803552f179101710dae77a73e162ceaee6 for release v0.35.1 --- docs/content/ALLVERSIONS | 2 +- pkg/params/version/version.txt | 2 +- release.k8s.yaml | 70 +++++++++++++++---------------- release.yaml | 76 +++++++++++++++++----------------- 4 files changed, 75 insertions(+), 75 deletions(-) diff --git a/docs/content/ALLVERSIONS b/docs/content/ALLVERSIONS index eefd238e7..8844cfc09 100644 --- a/docs/content/ALLVERSIONS +++ b/docs/content/ALLVERSIONS @@ -1 +1 @@ -nightly,stable,v0.35.0,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 +nightly,stable,v0.35.1,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 diff --git a/pkg/params/version/version.txt b/pkg/params/version/version.txt index ab4e51c67..6911254bc 100644 --- a/pkg/params/version/version.txt +++ b/pkg/params/version/version.txt @@ -1 +1 @@ -v0.35.0 +v0.35.1 diff --git a/release.k8s.yaml b/release.k8s.yaml index abcf71d72..fa2de2cc7 100644 --- a/release.k8s.yaml +++ b/release.k8s.yaml @@ -17,7 +17,7 @@ kind: Namespace metadata: name: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code openshift.io/cluster-monitoring: "true" @@ -42,7 +42,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -61,7 +61,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -78,7 +78,7 @@ kind: ClusterRole metadata: name: pipelines-as-code-aggregate labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rbac.authorization.k8s.io/aggregate-to-edit: "true" @@ -118,7 +118,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -128,7 +128,7 @@ metadata: name: pipelines-as-code-controller-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -142,7 +142,7 @@ metadata: name: pipelines-as-code-controller-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -159,7 +159,7 @@ metadata: name: pipeline-as-code-controller-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -185,7 +185,7 @@ metadata: name: pipelines-as-code-controller-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -217,7 +217,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -227,7 +227,7 @@ metadata: name: pipelines-as-code-watcher-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -244,7 +244,7 @@ metadata: name: pipelines-as-code-watcher-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -261,7 +261,7 @@ metadata: name: pipeline-as-code-watcher-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -290,7 +290,7 @@ metadata: name: pipelines-as-code-watcher-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -322,7 +322,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -332,7 +332,7 @@ metadata: name: pipelines-as-code-webhook-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -360,7 +360,7 @@ metadata: name: pipelines-as-code-webhook-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -377,7 +377,7 @@ metadata: name: pipeline-as-code-webhook-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -402,7 +402,7 @@ metadata: name: pipelines-as-code-webhook-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -434,7 +434,7 @@ kind: CustomResourceDefinition metadata: name: repositories.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code spec: @@ -1032,7 +1032,7 @@ metadata: name: pipelines-as-code namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1058,7 +1058,7 @@ metadata: apiVersion: v1 data: # pipelines as code controller version - version: "v0.35.0" + version: "v0.35.1" # controller url to be used for configuring webhook using cli controller-url: "" @@ -1073,7 +1073,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1096,7 +1096,7 @@ metadata: name: pipelines-as-code-webhook-certs namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code # The data is populated at install time --- @@ -1105,7 +1105,7 @@ kind: ValidatingWebhookConfiguration metadata: name: validation.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code webhooks: - admissionReviewVersions: ["v1"] @@ -1138,7 +1138,7 @@ metadata: name: pipelines-as-code-config-observability namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code data: _example: | @@ -1294,7 +1294,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1312,7 +1312,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" spec: securityContext: runAsNonRoot: true @@ -1409,7 +1409,7 @@ metadata: namespace: pipelines-as-code labels: app: pipelines-as-code-controller - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1446,7 +1446,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1463,7 +1463,7 @@ spec: app.kubernetes.io/component: watcher app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app: pipelines-as-code-watcher spec: securityContext: @@ -1537,7 +1537,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code app: pipelines-as-code-watcher spec: @@ -1571,7 +1571,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1588,7 +1588,7 @@ spec: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" spec: securityContext: runAsNonRoot: true @@ -1644,7 +1644,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: ports: diff --git a/release.yaml b/release.yaml index 59a9eb1ae..fb2ec149a 100644 --- a/release.yaml +++ b/release.yaml @@ -17,7 +17,7 @@ kind: Namespace metadata: name: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code openshift.io/cluster-monitoring: "true" @@ -42,7 +42,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -61,7 +61,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -78,7 +78,7 @@ kind: ClusterRole metadata: name: pipelines-as-code-aggregate labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rbac.authorization.k8s.io/aggregate-to-edit: "true" @@ -118,7 +118,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -128,7 +128,7 @@ metadata: name: pipelines-as-code-controller-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -142,7 +142,7 @@ metadata: name: pipelines-as-code-controller-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -159,7 +159,7 @@ metadata: name: pipeline-as-code-controller-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -188,7 +188,7 @@ metadata: name: pipelines-as-code-controller-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -220,7 +220,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -230,7 +230,7 @@ metadata: name: pipelines-as-code-watcher-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -247,7 +247,7 @@ metadata: name: pipelines-as-code-watcher-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -264,7 +264,7 @@ metadata: name: pipeline-as-code-watcher-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -296,7 +296,7 @@ metadata: name: pipelines-as-code-watcher-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -328,7 +328,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -338,7 +338,7 @@ metadata: name: pipelines-as-code-webhook-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -366,7 +366,7 @@ metadata: name: pipelines-as-code-webhook-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -383,7 +383,7 @@ metadata: name: pipeline-as-code-webhook-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -408,7 +408,7 @@ metadata: name: pipelines-as-code-webhook-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -440,7 +440,7 @@ kind: CustomResourceDefinition metadata: name: repositories.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code spec: @@ -1038,7 +1038,7 @@ metadata: name: pipelines-as-code namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1064,7 +1064,7 @@ metadata: apiVersion: v1 data: # pipelines as code controller version - version: "v0.35.0" + version: "v0.35.1" # controller url to be used for configuring webhook using cli controller-url: "" @@ -1079,7 +1079,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1102,7 +1102,7 @@ metadata: name: pipelines-as-code-webhook-certs namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code # The data is populated at install time --- @@ -1111,7 +1111,7 @@ kind: ValidatingWebhookConfiguration metadata: name: validation.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code webhooks: - admissionReviewVersions: ["v1"] @@ -1144,7 +1144,7 @@ metadata: name: pipelines-as-code-config-observability namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code data: _example: | @@ -1300,7 +1300,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1318,7 +1318,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" spec: securityContext: runAsNonRoot: true @@ -1415,7 +1415,7 @@ metadata: namespace: pipelines-as-code labels: app: pipelines-as-code-controller - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1452,7 +1452,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1469,7 +1469,7 @@ spec: app.kubernetes.io/component: watcher app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app: pipelines-as-code-watcher spec: securityContext: @@ -1543,7 +1543,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code app: pipelines-as-code-watcher spec: @@ -1577,7 +1577,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1594,7 +1594,7 @@ spec: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" spec: securityContext: runAsNonRoot: true @@ -1650,7 +1650,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1685,7 +1685,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" pipelines-as-code/route: controller name: pipelines-as-code-controller namespace: pipelines-as-code @@ -1753,7 +1753,7 @@ metadata: name: pipelines-as-code-monitor namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code annotations: networkoperator.openshift.io/ignore-errors: "" @@ -1775,7 +1775,7 @@ metadata: name: pipelines-as-code-controller-monitor namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.0" + app.kubernetes.io/version: "v0.35.1" app.kubernetes.io/part-of: pipelines-as-code annotations: networkoperator.openshift.io/ignore-errors: "" From 6fcedc6b98470282f8cdf996e21f5803be9ada1c Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Mon, 26 May 2025 20:20:47 +0200 Subject: [PATCH 04/20] chore: Update controller finalizer name to include explicit part * The finalizer name used by the controller was updated. * A new constant `FinalizerName` was introduced for the explicit part of the name. * The finalizer name was updated to combine the group name and the new constant. * This change aligns the finalizer naming with common Kubernetes conventions. Fixes #1763 Signed-off-by: Chmouel Boudjnah Co-authored-by: zakisk --- pkg/action/patch_test.go | 4 ++-- pkg/apis/pipelinesascode/register.go | 1 + pkg/pipelineascode/pipelineascode_test.go | 6 +++--- pkg/provider/gitlab/gitlab.go | 3 ++- pkg/reconciler/controller.go | 3 ++- pkg/reconciler/controller_test.go | 3 ++- 6 files changed, 12 insertions(+), 8 deletions(-) diff --git a/pkg/action/patch_test.go b/pkg/action/patch_test.go index 6556cff35..1736429bb 100644 --- a/pkg/action/patch_test.go +++ b/pkg/action/patch_test.go @@ -1,7 +1,7 @@ package action import ( - "path/filepath" + "path" "testing" apipac "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode" @@ -40,7 +40,7 @@ func TestPatchPipelineRun(t *testing.T) { patchedPR, err := PatchPipelineRun(ctx, logger, "log URL", fakeClients.Tekton, testPR, getLogURLMergePatch(fakeClients, testPR)) assert.NilError(t, err) - assert.Equal(t, patchedPR.Annotations[filepath.Join(apipac.GroupName, "log-url")], "https://localhost.console/#/namespaces/namespace/pipelineruns/force-me") + assert.Equal(t, patchedPR.Annotations[path.Join(apipac.GroupName, "log-url")], "https://localhost.console/#/namespaces/namespace/pipelineruns/force-me") } func getLogURLMergePatch(clients clients.Clients, pr *pipelinev1.PipelineRun) map[string]any { diff --git a/pkg/apis/pipelinesascode/register.go b/pkg/apis/pipelinesascode/register.go index cc3f1e802..5012e3444 100644 --- a/pkg/apis/pipelinesascode/register.go +++ b/pkg/apis/pipelinesascode/register.go @@ -21,4 +21,5 @@ const ( GroupName = "pipelinesascode.tekton.dev" RepositoryKind = "Repository" V1alpha1Version = "v1alpha1" + FinalizerName = "finalizer" ) diff --git a/pkg/pipelineascode/pipelineascode_test.go b/pkg/pipelineascode/pipelineascode_test.go index 80211eaff..5be8479f8 100644 --- a/pkg/pipelineascode/pipelineascode_test.go +++ b/pkg/pipelineascode/pipelineascode_test.go @@ -8,7 +8,7 @@ import ( "fmt" "io" "net/http" - "path/filepath" + "path" "regexp" "strings" "sync" @@ -667,7 +667,7 @@ func TestRun(t *testing.T) { if pr.GetName() == "force-me" { continue } - logURL, ok := pr.Annotations[filepath.Join(apipac.GroupName, "log-url")] + logURL, ok := pr.Annotations[path.Join(apipac.GroupName, "log-url")] assert.Assert(t, ok, "failed to find log-url label on pipelinerun: %s/%s", pr.GetNamespace(), pr.GetGenerateName()) assert.Equal(t, logURL, cs.Clients.ConsoleUI().DetailURL(&pr)) @@ -700,5 +700,5 @@ func TestGetLogURLMergePatch(t *testing.T) { assert.Assert(t, ok) a, ok := m["annotations"].(map[string]string) assert.Assert(t, ok) - assert.Equal(t, a[filepath.Join(apipac.GroupName, "log-url")], con.URL()) + assert.Equal(t, a[path.Join(apipac.GroupName, "log-url")], con.URL()) } diff --git a/pkg/provider/gitlab/gitlab.go b/pkg/provider/gitlab/gitlab.go index 822e4097f..e4159a7fa 100644 --- a/pkg/provider/gitlab/gitlab.go +++ b/pkg/provider/gitlab/gitlab.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/url" + "path" "path/filepath" "regexp" "strings" @@ -212,7 +213,7 @@ func (v *Provider) SetClient(_ context.Context, run *params.Run, runevent *info. // if we don't have sourceProjectID (ie: incoming-webhook) then try to set // it ASAP if we can. if v.sourceProjectID == 0 && runevent.Organization != "" && runevent.Repository != "" { - projectSlug := filepath.Join(runevent.Organization, runevent.Repository) + projectSlug := path.Join(runevent.Organization, runevent.Repository) projectinfo, _, err := v.Client().Projects.GetProject(projectSlug, &gitlab.GetProjectOptions{}) if err != nil { return err diff --git a/pkg/reconciler/controller.go b/pkg/reconciler/controller.go index be12ac600..cc49770fc 100644 --- a/pkg/reconciler/controller.go +++ b/pkg/reconciler/controller.go @@ -2,6 +2,7 @@ package reconciler import ( "context" + "path" "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode" "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys" @@ -88,7 +89,7 @@ func checkStateAndEnqueue(impl *controller.Impl) func(obj any) { func ctrlOpts() func(impl *controller.Impl) controller.Options { return func(_ *controller.Impl) controller.Options { return controller.Options{ - FinalizerName: pipelinesascode.GroupName, + FinalizerName: path.Join(pipelinesascode.GroupName, pipelinesascode.FinalizerName), PromoteFilterFunc: func(obj any) bool { _, exist := obj.(*tektonv1.PipelineRun).GetAnnotations()[keys.State] return exist diff --git a/pkg/reconciler/controller_test.go b/pkg/reconciler/controller_test.go index 7c64de671..a667baf70 100644 --- a/pkg/reconciler/controller_test.go +++ b/pkg/reconciler/controller_test.go @@ -2,6 +2,7 @@ package reconciler import ( "context" + "path" "testing" "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode" @@ -63,7 +64,7 @@ func TestCtrlOpts(t *testing.T) { opts := ctrlOpts()(impl) // Assert that the finalizer name is set correctly. - assert.Equal(t, pipelinesascode.GroupName, opts.FinalizerName) + assert.Equal(t, path.Join(pipelinesascode.GroupName, pipelinesascode.FinalizerName), opts.FinalizerName) // Create a new PipelineRun object with the "started" state label. pr := &pipelinev1.PipelineRun{ From 86da7d4a90d3a1e666978b0c71e9b1a76629a994 Mon Sep 17 00:00:00 2001 From: PuneetPunamiya Date: Wed, 11 Jun 2025 13:09:39 +0530 Subject: [PATCH 05/20] fix(gitlab): trigger PipelineRun only on label change Before this patch pipeline was triggered on any merge request update, regardless of what changed such as title, description or assigne changes With the current change, pipeline will be triggered only when labels are added or removed Signed-off-by: PuneetPunamiya --- pkg/provider/gitlab/detect.go | 32 +++++++++++++ pkg/provider/gitlab/detect_test.go | 14 ++++++ test/gitlab_merge_request_test.go | 74 ++++++++++++++++++++++++++++++ 3 files changed, 120 insertions(+) diff --git a/pkg/provider/gitlab/detect.go b/pkg/provider/gitlab/detect.go index 7f4591ff8..3eed737fb 100644 --- a/pkg/provider/gitlab/detect.go +++ b/pkg/provider/gitlab/detect.go @@ -38,6 +38,14 @@ func (v *Provider) Detect(req *http.Request, payload string, logger *zap.Sugared switch gitEvent := eventInt.(type) { case *gitlab.MergeEvent: + // on a MR update, react only if OldRev is empty (no new commits pushed). + // If OldRev is empty, it's a metadata-only update (e.g., label changes). + if gitEvent.ObjectAttributes.Action == "update" && gitEvent.ObjectAttributes.OldRev == "" { + if !hasOnlyLabelsChanged(gitEvent) { + return setLoggerAndProceed(false, "this 'Merge Request' update event changes are not supported; cannot proceed", nil) + } + } + if provider.Valid(gitEvent.ObjectAttributes.Action, []string{"open", "reopen", "update"}) { return setLoggerAndProceed(true, "", nil) } @@ -76,3 +84,27 @@ func (v *Provider) Detect(req *http.Request, payload string, logger *zap.Sugared return setLoggerAndProceed(false, "", fmt.Errorf("gitlab: event \"%s\" is not supported", event)) } } + +// hasOnlyLabelsChanged checks if the only change in the merge request is to its labels. +// This function ensures that other fields remain unchanged. +func hasOnlyLabelsChanged(gitEvent *gitlab.MergeEvent) bool { + changes := gitEvent.Changes + + labelsChanged := len(changes.Labels.Previous) > 0 || len(changes.Labels.Current) > 0 + + // Only Labels can change — everything else must be zero or nil + onlyUpdatedAtOrLabels := labelsChanged && + changes.Assignees.Previous == nil && changes.Assignees.Current == nil && + changes.Reviewers.Previous == nil && changes.Reviewers.Current == nil && + changes.Description.Previous == "" && changes.Description.Current == "" && + changes.MergeStatus.Previous == "" && changes.MergeStatus.Current == "" && + changes.MilestoneID.Previous == 0 && changes.MilestoneID.Current == 0 && + changes.SourceBranch.Previous == "" && changes.SourceBranch.Current == "" && + changes.SourceProjectID.Previous == 0 && changes.SourceProjectID.Current == 0 && + changes.StateID.Previous == 0 && changes.StateID.Current == 0 && + changes.TargetBranch.Previous == "" && changes.TargetBranch.Current == "" && + changes.TargetProjectID.Previous == 0 && changes.TargetProjectID.Current == 0 && + changes.Title.Previous == "" && changes.Title.Current == "" + + return onlyUpdatedAtOrLabels +} diff --git a/pkg/provider/gitlab/detect_test.go b/pkg/provider/gitlab/detect_test.go index ccefcaff0..95adb28e0 100644 --- a/pkg/provider/gitlab/detect_test.go +++ b/pkg/provider/gitlab/detect_test.go @@ -72,6 +72,20 @@ func TestProvider_Detect(t *testing.T) { isGL: true, processReq: true, }, + { + name: "good/mergeRequest update Event with title", + event: sample.MREventAsJSON("update", `"title": "test"`), + eventType: gitlab.EventTypeMergeRequest, + isGL: true, + processReq: false, + }, + { + name: "good/mergeRequest update Event with description", + event: sample.MREventAsJSON("update", `"description": "test pac"`), + eventType: gitlab.EventTypeMergeRequest, + isGL: true, + processReq: false, + }, { name: "good/note event", event: sample.NoteEventAsJSON("abc"), diff --git a/test/gitlab_merge_request_test.go b/test/gitlab_merge_request_test.go index be8fda458..46550aafc 100644 --- a/test/gitlab_merge_request_test.go +++ b/test/gitlab_merge_request_test.go @@ -616,6 +616,80 @@ func TestGitlabDisableCommentsOnMR(t *testing.T) { assert.Equal(t, 2, successCommentsPost) } +func TestGitlabMergeRequestOnUpdateAtAndLabelChange(t *testing.T) { + targetNS := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pac-e2e-ns") + ctx := context.Background() + runcnx, opts, glprovider, err := tgitlab.Setup(ctx) + assert.NilError(t, err) + ctx, err = cctx.GetControllerCtxInfo(ctx, runcnx) + assert.NilError(t, err) + runcnx.Clients.Log.Info("Testing with Gitlab") + + projectinfo, resp, err := glprovider.Client().Projects.GetProject(opts.ProjectID, nil) + assert.NilError(t, err) + if resp != nil && resp.StatusCode == http.StatusNotFound { + t.Errorf("Repository %s not found in %s", opts.Organization, opts.Repo) + } + + err = tgitlab.CreateCRD(ctx, projectinfo, runcnx, opts, targetNS, nil) + assert.NilError(t, err) + + entries, err := payload.GetEntries(map[string]string{ + ".tekton/pipelinerun.yaml": "testdata/pipelinerun.yaml", + ".tekton/pipelinerun-clone.yaml": "testdata/pipelinerun-clone.yaml", + }, targetNS, projectinfo.DefaultBranch, + triggertype.PullRequest.String(), map[string]string{}) + assert.NilError(t, err) + + targetRefName := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pac-e2e-test") + + gitCloneURL, err := scm.MakeGitCloneURL(projectinfo.WebURL, opts.UserName, opts.Password) + assert.NilError(t, err) + commitTitle := "Committing files from test on " + targetRefName + scmOpts := &scm.Opts{ + GitURL: gitCloneURL, + CommitTitle: commitTitle, + Log: runcnx.Clients.Log, + WebURL: projectinfo.WebURL, + TargetRefName: targetRefName, + BaseRefName: projectinfo.DefaultBranch, + } + _ = scm.PushFilesToRefGit(t, scmOpts, entries) + + runcnx.Clients.Log.Infof("Branch %s has been created and pushed with files", targetRefName) + mrTitle := "TestMergeRequest - " + targetRefName + mrID, err := tgitlab.CreateMR(glprovider.Client(), opts.ProjectID, targetRefName, projectinfo.DefaultBranch, mrTitle) + assert.NilError(t, err) + runcnx.Clients.Log.Infof("MergeRequest %s/-/merge_requests/%d has been created", projectinfo.WebURL, mrID) + defer tgitlab.TearDown(ctx, t, runcnx, glprovider, mrID, targetRefName, targetNS, opts.ProjectID) + + sopt := twait.SuccessOpt{ + Title: commitTitle, + OnEvent: "Merge Request", + TargetNS: targetNS, + NumberofPRMatch: 2, + SHA: "", + } + twait.Succeeded(ctx, t, runcnx, opts, sopt) + prsNew, err := runcnx.Clients.Tekton.TektonV1().PipelineRuns(targetNS).List(ctx, metav1.ListOptions{}) + assert.NilError(t, err) + assert.Assert(t, len(prsNew.Items) == 2) + + runcnx.Clients.Log.Infof("Changing Title on MergeRequest %s/-/merge_requests/%d", projectinfo.WebURL, mrID) + _, _, err = glprovider.Client().MergeRequests.UpdateMergeRequest(opts.ProjectID, mrID, &clientGitlab.UpdateMergeRequestOptions{ + Title: clientGitlab.Ptr("test"), + }) + assert.NilError(t, err) + + // let's wait 10 secs and check every second that a PipelineRun is created or not. + for i := 0; i < 10; i++ { + prs, err := runcnx.Clients.Tekton.TektonV1().PipelineRuns(targetNS).List(ctx, metav1.ListOptions{}) + assert.NilError(t, err) + assert.Assert(t, len(prs.Items) == 2) + time.Sleep(1 * time.Second) + } +} + // Local Variables: // compile-command: "go test -tags=e2e -v -run ^TestGitlabMergeRequest$" // End: From ad8fea153132881f9e18c10d66cb38c0c8e55bc2 Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Fri, 30 May 2025 12:23:18 +0530 Subject: [PATCH 06/20] fix: Split error message of yaml validation on push When a PipelineRun template in the .tekton/ directory contains a YAML syntax error, the error message was prefixed with a confusing "cannot locate templates in .tekton directory" message. This change ensures the error accurately reflects that a file was found but failed to parse, avoiding confusion between empty .tekton directory and a malformed one. Signed-off-by: Zaki Shaikh --- pkg/pipelineascode/match.go | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/pkg/pipelineascode/match.go b/pkg/pipelineascode/match.go index a4926b054..75753538f 100644 --- a/pkg/pipelineascode/match.go +++ b/pkg/pipelineascode/match.go @@ -192,12 +192,23 @@ func (p *PacRun) getPipelineRunsFromRepo(ctx context.Context, repo *v1alpha1.Rep return nil, err } + + // This is for push event error logging because we can't create comment for yaml validation errors on push if err != nil || rawTemplates == "" { - msg := fmt.Sprintf("cannot locate templates in %s/ directory for this repository in %s", tektonDir, p.event.HeadBranch) + msg := "" + reason := "RepositoryPipelineRunNotFound" + logLevel := zap.InfoLevel if err != nil { + reason = "RepositoryInvalidPipelineRunTemplate" + logLevel = zap.ErrorLevel + if strings.Contains(err.Error(), "error unmarshalling yaml file") { + msg = "PipelineRun YAML validation" + } msg += fmt.Sprintf(" err: %s", err.Error()) + } else { + msg = fmt.Sprintf("cannot locate templates in %s/ directory for this repository in %s", tektonDir, p.event.HeadBranch) } - p.eventEmitter.EmitMessage(nil, zap.InfoLevel, "RepositoryPipelineRunNotFound", msg) + p.eventEmitter.EmitMessage(nil, logLevel, reason, msg) return nil, nil } From f780e96b6119e2cdbec9902894f52c00929b5f65 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 6 Jun 2025 11:19:01 +0200 Subject: [PATCH 07/20] fix: add timeouts to HTTP server - Added ReadHeaderTimeout, ReadTimeout, and IdleTimeout to HTTP server for improved security and reliability --- pkg/adapter/adapter.go | 4 +++- pkg/cmd/tknpac/bootstrap/web.go | 9 ++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/adapter/adapter.go b/pkg/adapter/adapter.go index 3844f04a8..d1a404339 100644 --- a/pkg/adapter/adapter.go +++ b/pkg/adapter/adapter.go @@ -93,11 +93,13 @@ func (l *listener) Start(ctx context.Context) error { mux.HandleFunc("/", l.handleEvent(ctx)) - //nolint: gosec srv := &http.Server{ Addr: ":" + adapterPort, Handler: http.TimeoutHandler(mux, httpTimeoutHandler, "Listener Timeout!\n"), + ReadHeaderTimeout: 5 * time.Second, + ReadTimeout: 10 * time.Second, + IdleTimeout: 30 * time.Second, } enabled, tlsCertFile, tlsKeyFile := l.isTLSEnabled() diff --git a/pkg/cmd/tknpac/bootstrap/web.go b/pkg/cmd/tknpac/bootstrap/web.go index 11230657c..42dca7a22 100644 --- a/pkg/cmd/tknpac/bootstrap/web.go +++ b/pkg/cmd/tknpac/bootstrap/web.go @@ -7,6 +7,7 @@ import ( "log" "net/http" "path/filepath" + "time" "github.com/openshift-pipelines/pipelines-as-code/pkg/cli/browser" "github.com/openshift-pipelines/pipelines-as-code/pkg/cli/info" @@ -19,7 +20,13 @@ import ( func startWebServer(ctx context.Context, opts *bootstrapOpts, run *params.Run, jeez string) error { m := http.NewServeMux() //nolint: gosec - s := http.Server{Addr: fmt.Sprintf(":%d", opts.webserverPort), Handler: m} + s := http.Server{ + Addr: fmt.Sprintf(":%d", opts.webserverPort), + Handler: m, + ReadHeaderTimeout: 5 * time.Second, + ReadTimeout: 10 * time.Second, + IdleTimeout: 30 * time.Second, + } codeCh := make(chan string) m.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { code := r.URL.Query().Get("code") From bea1de2fa54544c4a6893f44dbe54a11959283c8 Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Tue, 24 Jun 2025 11:26:33 +0200 Subject: [PATCH 08/20] fix: avoid reporting errors for non-Tekton res The handling of YAML validation errors was refactored to be more intelligent and less noisy. It now only reports errors for resources that are identified as Tekton resources or are fundamentally invalid YAML. - Refactored validation errors from a simple map to a structured type that includes the resource name, schema, and error object. - Updated YAML parsing to extract the resource's `apiVersion` (schema) even when the document fails to decode fully as a Tekton object. - Implemented filtering to report validation errors only for resources with a `tekton.dev` API group or for generic YAML syntax errors. - This prevents creating error comments on pull requests for other valid, non-Tekton YAML files located in the `.tekton` directory. Jira: https://issues.redhat.com/browse/SRVKP-7906 Signed-off-by: Chmouel Boudjnah --- docs/content/docs/guide/running.md | 9 +- pkg/errors/errors.go | 9 + pkg/pipelineascode/match.go | 26 ++- pkg/resolve/resolve.go | 52 ++--- pkg/resolve/resolve_test.go | 207 ++++++++++++++++-- test/gitea_test.go | 16 +- .../TestGithubSecondPullRequestBadYaml.golden | 2 +- test/testdata/randomcrd.yaml | 6 + 8 files changed, 272 insertions(+), 55 deletions(-) create mode 100644 pkg/errors/errors.go create mode 100644 test/testdata/randomcrd.yaml diff --git a/docs/content/docs/guide/running.md b/docs/content/docs/guide/running.md index 2a08b27ae..f565a0b23 100644 --- a/docs/content/docs/guide/running.md +++ b/docs/content/docs/guide/running.md @@ -124,12 +124,11 @@ click on it and follow the pipeline execution directly there. ## Errors When Parsing PipelineRun YAML -When Pipelines-As-Code encounters an issue with the YAML formatting in the -repository, it will log the error in the user namespace events log and -the Pipelines-as-Code controller log. +If Pipelines-As-Code encounters an issue with the YAML formatting of Tekton resources in the repository, it will create a comment on +the pull request describing the error. The error will also be logged in the user namespace events log and in the Pipelines-as-Code controller log. -Despite the error, Pipelines-As-Code will continue to run other correctly parsed -and matched PipelineRuns. +Despite validation errors, Pipelines-as-Code continues to run other correctly parsed and matched PipelineRuns. +However, if a PipelineRun has YAML syntax error, it halts the execution of all PipelineRuns, even those that are syntactically correct. {{< support_matrix github_app="true" github_webhook="true" gitea="true" gitlab="true" bitbucket_cloud="false" bitbucket_server="false" >}} diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go new file mode 100644 index 000000000..c2565670b --- /dev/null +++ b/pkg/errors/errors.go @@ -0,0 +1,9 @@ +package errors + +type PacYamlValidations struct { + Name string + Err error + Schema string +} + +const GenericBadYAMLValidation = "Generic bad YAML Validation" diff --git a/pkg/pipelineascode/match.go b/pkg/pipelineascode/match.go index 75753538f..6859dfe46 100644 --- a/pkg/pipelineascode/match.go +++ b/pkg/pipelineascode/match.go @@ -9,6 +9,7 @@ import ( apipac "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys" "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/v1alpha1" + pacerrors "github.com/openshift-pipelines/pipelines-as-code/pkg/errors" "github.com/openshift-pipelines/pipelines-as-code/pkg/matcher" "github.com/openshift-pipelines/pipelines-as-code/pkg/opscomments" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype" @@ -186,7 +187,15 @@ func (p *PacRun) getPipelineRunsFromRepo(ctx context.Context, repo *v1alpha1.Rep reg := regexp.MustCompile(`error unmarshalling yaml file\s([^:]*):\s*(yaml:\s*)?(.*)`) matches := reg.FindStringSubmatch(err.Error()) if len(matches) == 4 { - p.reportValidationErrors(ctx, repo, map[string]string{matches[1]: matches[3]}) + p.reportValidationErrors(ctx, repo, + []*pacerrors.PacYamlValidations{ + { + Name: matches[1], + Err: fmt.Errorf("yaml validation error: %s", matches[3]), + Schema: pacerrors.GenericBadYAMLValidation, + }, + }, + ) return nil, nil } @@ -463,12 +472,19 @@ func (p *PacRun) createNeutralStatus(ctx context.Context) error { // 1. Creating error messages for each validation error // 2. Emitting error messages to the event system // 3. Creating a markdown formatted comment on the repository with all errors. -func (p *PacRun) reportValidationErrors(ctx context.Context, repo *v1alpha1.Repository, validationErrors map[string]string) { +func (p *PacRun) reportValidationErrors(ctx context.Context, repo *v1alpha1.Repository, validationErrors []*pacerrors.PacYamlValidations) { errorRows := make([]string, 0, len(validationErrors)) - for name, err := range validationErrors { - errorRows = append(errorRows, fmt.Sprintf("| %s | `%s` |", name, err)) + for _, err := range validationErrors { + // if the error is a TektonConversionError, we don't want to report it since it may be a file that is not a tekton resource + // and we don't want to report it as a validation error. + if strings.HasPrefix(err.Schema, tektonv1.SchemeGroupVersion.Group) || err.Schema == pacerrors.GenericBadYAMLValidation { + errorRows = append(errorRows, fmt.Sprintf("| %s | `%s` |", err.Name, err.Err.Error())) + } p.eventEmitter.EmitMessage(repo, zap.ErrorLevel, "PipelineRunValidationErrors", - fmt.Sprintf("cannot read the PipelineRun: %s, error: %s", name, err)) + fmt.Sprintf("cannot read the PipelineRun: %s, error: %s", err.Name, err.Err.Error())) + } + if len(errorRows) == 0 { + return } markdownErrMessage := fmt.Sprintf(`%s %s`, validationErrorTemplate, strings.Join(errorRows, "\n")) diff --git a/pkg/resolve/resolve.go b/pkg/resolve/resolve.go index 7546bc17b..f8f853eef 100644 --- a/pkg/resolve/resolve.go +++ b/pkg/resolve/resolve.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "regexp" + "slices" "strings" apipac "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys" + pacerrors "github.com/openshift-pipelines/pipelines-as-code/pkg/errors" "github.com/openshift-pipelines/pipelines-as-code/pkg/formatting" "github.com/openshift-pipelines/pipelines-as-code/pkg/matcher" "github.com/openshift-pipelines/pipelines-as-code/pkg/params" @@ -15,7 +17,6 @@ import ( tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8scheme "k8s.io/client-go/kubernetes/scheme" yaml "sigs.k8s.io/yaml/goyaml.v2" ) @@ -26,7 +27,7 @@ type TektonTypes struct { Pipelines []*tektonv1.Pipeline TaskRuns []*tektonv1.TaskRun Tasks []*tektonv1.Task - ValidationErrors map[string]string + ValidationErrors []*pacerrors.PacYamlValidations } // Contains Fetched Resources for Event, with key equals to annotation value. @@ -43,31 +44,34 @@ type FetchedResourcesForRun struct { func NewTektonTypes() TektonTypes { return TektonTypes{ - ValidationErrors: map[string]string{}, + ValidationErrors: []*pacerrors.PacYamlValidations{}, } } var yamlDocSeparatorRe = regexp.MustCompile(`(?m)^---\s*$`) -// detectAtleastNameOrGenerateNameFromPipelineRun detects the name or +// detectAtleastNameOrGenerateNameAndSchemaFromPipelineRun detects the name or // generateName of a yaml files even if there is an error decoding it as tekton types. -func detectAtleastNameOrGenerateNameFromPipelineRun(data string) string { - var metadataName struct { - Metadata metav1.ObjectMeta +func detectAtleastNameOrGenerateNameAndSchemaFromPipelineRun(data string) (string, string) { + var genericKubeObj struct { + APIVersion string `yaml:"apiVersion"` + Metadata struct { + Name string `yaml:"name,omitempty"` + GenerateName string `yaml:"generateName,omitempty"` + } `yaml:"metadata"` } - err := yaml.Unmarshal([]byte(data), &metadataName) + err := yaml.Unmarshal([]byte(data), &genericKubeObj) if err != nil { - return "" + return "nokube", "" } - if metadataName.Metadata.Name != "" { - return metadataName.Metadata.Name + if genericKubeObj.Metadata.Name != "" { + return genericKubeObj.Metadata.Name, genericKubeObj.APIVersion } - // TODO: yaml Unmarshal don't want to parse generatename and i have no idea why - if metadataName.Metadata.GenerateName != "" { - return metadataName.Metadata.GenerateName + if genericKubeObj.Metadata.GenerateName != "" { + return genericKubeObj.Metadata.GenerateName, genericKubeObj.APIVersion } - return "unknown" + return "unknown", genericKubeObj.APIVersion } // getPipelineByName returns the Pipeline with the given name the first one found @@ -106,15 +110,6 @@ func pipelineRunsWithSameName(prs []*tektonv1.PipelineRun) error { return nil } -func skippingTask(taskName string, skippedTasks []string) bool { - for _, value := range skippedTasks { - if value == taskName { - return true - } - } - return false -} - func isTektonAPIVersion(apiVersion string) bool { return strings.HasPrefix(apiVersion, "tekton.dev/") || apiVersion == "" } @@ -126,7 +121,7 @@ func inlineTasks(tasks []tektonv1.PipelineTask, ropt *Opts, remoteResource Fetch task.TaskRef.Resolver == "" && isTektonAPIVersion(task.TaskRef.APIVersion) && string(task.TaskRef.Kind) != "ClusterTask" && - !skippingTask(task.TaskRef.Name, ropt.SkipInlining) { + !slices.Contains(ropt.SkipInlining, task.TaskRef.Name) { taskResolved, ok := remoteResource.Tasks[task.TaskRef.Name] if !ok { return nil, fmt.Errorf("cannot find referenced task %s. if it's a remote task make sure to add it in the annotations", task.TaskRef.Name) @@ -164,7 +159,12 @@ func ReadTektonTypes(ctx context.Context, log *zap.SugaredLogger, data string) ( obj, _, err := decoder.Decode([]byte(doc), nil, nil) if err != nil { - types.ValidationErrors[detectAtleastNameOrGenerateNameFromPipelineRun(doc)] = err.Error() + dt, dv := detectAtleastNameOrGenerateNameAndSchemaFromPipelineRun(doc) + types.ValidationErrors = append(types.ValidationErrors, &pacerrors.PacYamlValidations{ + Name: dt, + Err: fmt.Errorf("error decoding yaml document: %w", err), + Schema: dv, + }) continue } switch o := obj.(type) { diff --git a/pkg/resolve/resolve_test.go b/pkg/resolve/resolve_test.go index 76cd05fc8..fe2ad877b 100644 --- a/pkg/resolve/resolve_test.go +++ b/pkg/resolve/resolve_test.go @@ -9,6 +9,7 @@ import ( "testing" apipac "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys" + pacerrors "github.com/openshift-pipelines/pipelines-as-code/pkg/errors" "github.com/openshift-pipelines/pipelines-as-code/pkg/params" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/clients" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/info" @@ -202,18 +203,21 @@ func TestReportBadTektonYaml(t *testing.T) { wantErr bool validError string validErrorName string + expectedSchema string }{ { name: "bad tekton yaml name", filename: "bad-tekton-yaml-name", validError: `json: cannot unmarshal object into Go struct field PipelineSpec.spec.pipelineSpec.tasks of type []v1beta1.PipelineTask`, validErrorName: "bad-name", + expectedSchema: "tekton.dev/v1beta1", // Assuming this is the schema in the test file }, { name: "bad tekton yaml generateName", filename: "bad-tekton-yaml-generate-name", validError: `json: cannot unmarshal object into Go struct field PipelineSpec.spec.pipelineSpec.tasks of type []v1beta1.PipelineTask`, - validErrorName: "unknown", + validErrorName: "bad-generate-name", + expectedSchema: "", // When name/generateName cannot be determined }, } for _, tt := range tests { @@ -222,19 +226,198 @@ func TestReportBadTektonYaml(t *testing.T) { assert.NilError(t, err) types, err := ReadTektonTypes(context.TODO(), nil, string(data)) assert.NilError(t, err) - if value, ok := types.ValidationErrors[tt.validErrorName]; ok { - assert.Equal(t, value, tt.validError, "error message mismatch") - } else { + + // Find the validation error by name + found := false + for _, validationError := range types.ValidationErrors { + if validationError.Name == tt.validErrorName { + // Test the structured error + assert.Assert(t, strings.Contains(validationError.Err.Error(), tt.validError), + "error message mismatch: expected %s to contain %s", validationError.Err.Error(), tt.validError) + + // Test that the error has the expected structure + assert.Assert(t, validationError.Name != "", "validation error should have a name") + assert.Assert(t, validationError.Err != nil, "validation error should have an error") + + // Test schema field if we expect one + if tt.expectedSchema != "" { + assert.Equal(t, validationError.Schema, tt.expectedSchema, "schema mismatch") + } + found = true + break + } + } + if !found { t.Errorf("could not find the task %s in the validation errors: %+v", tt.validErrorName, types.ValidationErrors) } }) } +} + +func TestDetectNameOrGenerateNameAndSchema(t *testing.T) { + tests := []struct { + name string + yamlContent string + expectedName string + expectedSchema string + }{ + { + name: "valid yaml with name and apiVersion", + yamlContent: `apiVersion: tekton.dev/v1 +metadata: + name: test-pipeline`, + expectedName: "test-pipeline", + expectedSchema: "tekton.dev/v1", + }, + { + name: "valid yaml with generateName and apiVersion", + yamlContent: `apiVersion: tekton.dev/v1beta1 +metadata: + generateName: test-pipeline- +`, + expectedName: "test-pipeline-", + expectedSchema: "tekton.dev/v1beta1", + }, + { + name: "invalid yaml", + yamlContent: `- babdakdja`, + expectedName: "nokube", + expectedSchema: "", + }, + { + name: "yaml without name or generateName", + yamlContent: `apiVersion: tekton.dev/v1 +metadata: + namespace: default`, + expectedName: "unknown", + expectedSchema: "tekton.dev/v1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + name, schema := detectAtleastNameOrGenerateNameAndSchemaFromPipelineRun(tt.yamlContent) + assert.Equal(t, name, tt.expectedName, "name mismatch") + assert.Equal(t, schema, tt.expectedSchema, "schema mismatch") + }) + } +} + +func TestValidationErrorStructure(t *testing.T) { + // Test that validation errors follow the new structure + testYaml := `apiVersion: tekton.dev/v1 +kind: PipelineRun +metadata: + name: test-pr +spec: + pipelineSpec: + tasks: + - name: invalid-task + taskSpec: + invalid-field: "this should cause an error"` + + types, err := ReadTektonTypes(context.TODO(), nil, testYaml) + assert.NilError(t, err) + + // Each validation error should have the proper structure + for _, validationError := range types.ValidationErrors { + assert.Assert(t, validationError != nil, "validation error should not be nil") + assert.Assert(t, validationError.Name != "", "validation error should have a name") + assert.Assert(t, validationError.Err != nil, "validation error should have an error") + // Schema field exists as a string (can be empty) + } +} + +func TestGenericBadYAMLValidation(t *testing.T) { + // Test that the GenericBadYAMLValidation constant is used properly + assert.Equal(t, pacerrors.GenericBadYAMLValidation, "Generic bad YAML Validation") +} + +func TestValidationErrorFiltering(t *testing.T) { + // Test the schema filtering logic that determines which errors should be reported + tests := []struct { + name string + validationErr *pacerrors.PacYamlValidations + shouldBeReported bool + }{ + { + name: "tekton resource error should be reported", + validationErr: &pacerrors.PacYamlValidations{ + Name: "test-pipeline", + Err: fmt.Errorf("some tekton error"), + Schema: "tekton.dev/v1", + }, + shouldBeReported: true, + }, + { + name: "tekton v1beta1 resource error should be reported", + validationErr: &pacerrors.PacYamlValidations{ + Name: "test-task", + Err: fmt.Errorf("some tekton error"), + Schema: "tekton.dev/v1beta1", + }, + shouldBeReported: true, + }, + { + name: "generic bad yaml error should be reported", + validationErr: &pacerrors.PacYamlValidations{ + Name: "bad-yaml", + Err: fmt.Errorf("yaml syntax error"), + Schema: pacerrors.GenericBadYAMLValidation, + }, + shouldBeReported: true, + }, + { + name: "non-tekton resource error should not be reported", + validationErr: &pacerrors.PacYamlValidations{ + Name: "some-config", + Err: fmt.Errorf("some other error"), + Schema: "v1", + }, + shouldBeReported: false, + }, + { + name: "empty schema error should not be reported", + validationErr: &pacerrors.PacYamlValidations{ + Name: "unknown-resource", + Err: fmt.Errorf("unknown error"), + Schema: "", + }, + shouldBeReported: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test the filtering logic that would be used in reportValidationErrors + shouldReport := strings.HasPrefix(tt.validationErr.Schema, tektonv1.SchemeGroupVersion.Group) || + tt.validationErr.Schema == pacerrors.GenericBadYAMLValidation + + assert.Equal(t, shouldReport, tt.shouldBeReported, + "filtering result mismatch for schema: %s", tt.validationErr.Schema) + }) + } +} + +func TestErrorMessageFormat(t *testing.T) { + // Test that error messages are properly formatted with "error decoding yaml document:" prefix + testYaml := `invalid yaml content: + - this: should + cause: [an error` - assert.Equal(t, "", detectAtleastNameOrGenerateNameFromPipelineRun("- babdakdja")) + types, err := ReadTektonTypes(context.TODO(), nil, testYaml) + assert.NilError(t, err) + + // Should have at least one validation error + assert.Assert(t, len(types.ValidationErrors) > 0, "should have validation errors") + + // Check that error messages have the proper format + for _, validationError := range types.ValidationErrors { + assert.Assert(t, strings.Contains(validationError.Err.Error(), "error decoding yaml document:"), + "error message should contain 'error decoding yaml document:' prefix, got: %s", validationError.Err.Error()) + } } -// test if we have the task in .tekton dir not referenced in annotations but taskRef in a task. -// should embed since in repo. func TestInRepoShouldNotEmbedIfNoAnnotations(t *testing.T) { resolved, _, err := readTDfile(t, "in-repo-in-ref-no-annotation", false, true) assert.NilError(t, err) @@ -496,16 +679,6 @@ func TestMetadataResolve(t *testing.T) { } } -func TestSkippingTask(t *testing.T) { - skippedTasks := []string{"task1", "task3"} - - // Test case where taskName is in skippedTasks - assert.Equal(t, skippingTask("task1", skippedTasks), true) - - // Test case where taskName is not in skippedTasks - assert.Equal(t, skippingTask("task2", skippedTasks), false) -} - func TestTaskRunPassMetadataAnnotations(t *testing.T) { resolved, _, err := readTDfile(t, "pipelinerun-pipelinespec-taskref-pass-annotations", false, true) assert.NilError(t, err) diff --git a/test/gitea_test.go b/test/gitea_test.go index 44fe3c5db..a6198473d 100644 --- a/test/gitea_test.go +++ b/test/gitea_test.go @@ -238,6 +238,20 @@ func TestGiteaBadYamlReportingOnPR(t *testing.T) { assert.Equal(t, len(comments), 1, "should have only one comment") } +func TestGiteaYamlReportingNotReportingNotTektonResources(t *testing.T) { + topts := &tgitea.TestOpts{ + TargetEvent: triggertype.PullRequest.String(), + YAMLFiles: map[string]string{".tekton/randomcrd.yaml": "testdata/randomcrd.yaml"}, + ExpectEvents: true, + } + + _, f := tgitea.TestPR(t, topts) + defer f() + comments, _, err := topts.GiteaCNX.Client().ListRepoIssueComments(topts.PullRequest.Base.Repository.Owner.UserName, topts.PullRequest.Base.Repository.Name, gitea.ListIssueCommentOptions{}) + assert.NilError(t, err) + assert.Equal(t, len(comments), 0, "should have zero comments") +} + // TestGiteaBadYaml we can't check pr status but this shows up in the // controller, so let's dig ourself in there.... TargetNS is a random string, so // it can only success if it matches it. @@ -252,7 +266,7 @@ func TestGiteaBadYamlValidation(t *testing.T) { defer f() maxLines := int64(20) assert.NilError(t, twait.RegexpMatchingInControllerLog(ctx, topts.ParamsRun, *regexp.MustCompile( - "cannot read the PipelineRun: pr-bad-format.yaml, error: line 3: could not find expected ':'"), + "cannot read the PipelineRun: pr-bad-format.yaml, error: yaml validation error: line 3: could not find expected ':'"), 10, "controller", &maxLines)) } diff --git a/test/testdata/TestGithubSecondPullRequestBadYaml.golden b/test/testdata/TestGithubSecondPullRequestBadYaml.golden index 946349c16..f38bcdf03 100644 --- a/test/testdata/TestGithubSecondPullRequestBadYaml.golden +++ b/test/testdata/TestGithubSecondPullRequestBadYaml.golden @@ -3,4 +3,4 @@ | PipelineRun | Error | |------|-------| -| bad-yaml.yaml | `line 3: could not find expected ':'` | \ No newline at end of file +| bad-yaml.yaml | `yaml validation error: line 3: could not find expected ':'` | \ No newline at end of file diff --git a/test/testdata/randomcrd.yaml b/test/testdata/randomcrd.yaml new file mode 100644 index 000000000..c620df381 --- /dev/null +++ b/test/testdata/randomcrd.yaml @@ -0,0 +1,6 @@ +apiVersion: blah.openshift.io/v1 +kind: Blahblah +metadata: + name: blahblah +spec: + blah: blah From 0510f33085e50980704b4ee81c1728bc12830e2d Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Fri, 6 Jun 2025 11:26:42 +0200 Subject: [PATCH 09/20] fix: handle response body close errors - Added error handling for closing HTTP response body to improve robustness - Changed list function to return result of writer flush for better error reporting Signed-off-by: Chmouel Boudjnah --- pkg/cmd/tknpac/bootstrap/route.go | 4 +++- pkg/cmd/tknpac/list/list.go | 3 +-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/cmd/tknpac/bootstrap/route.go b/pkg/cmd/tknpac/bootstrap/route.go index af760a756..5a88507e9 100644 --- a/pkg/cmd/tknpac/bootstrap/route.go +++ b/pkg/cmd/tknpac/bootstrap/route.go @@ -59,7 +59,9 @@ func detectSelfSignedCertificate(ctx context.Context, url string) string { } else if err != nil { return fmt.Sprintf("⚠️ could not connect to the route %s, make sure the pipelines-as-code controller is running", url) } - resp.Body.Close() + if err := resp.Body.Close(); err != nil { + return fmt.Sprintf("⚠️ could not close the response body: %v", err) + } return "" } diff --git a/pkg/cmd/tknpac/list/list.go b/pkg/cmd/tknpac/list/list.go index 7a07c3ee4..53b1d3fe9 100644 --- a/pkg/cmd/tknpac/list/list.go +++ b/pkg/cmd/tknpac/list/list.go @@ -188,6 +188,5 @@ func list(ctx context.Context, cs *params.Run, opts *cli.PacCliOpts, ioStreams * if err := t.Execute(w, data); err != nil { return err } - w.Flush() - return nil + return w.Flush() } From 3dc9d6351c8fc45543bdd0cc07a68806ba241d2f Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Mon, 16 Jun 2025 11:36:07 +0530 Subject: [PATCH 10/20] fix: inavlidate empty commits in push events in bitbucket dc invalidated empty commits in push in bitbucket data center as we may want to check commit field for commit SHA. Signed-off-by: Zaki Shaikh --- pkg/provider/bitbucketdatacenter/parse_payload.go | 4 ++++ .../bitbucketdatacenter/parse_payload_test.go | 11 +++++++++-- pkg/provider/bitbucketdatacenter/test/test.go | 3 ++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/pkg/provider/bitbucketdatacenter/parse_payload.go b/pkg/provider/bitbucketdatacenter/parse_payload.go index 45009e4c0..6d40f83d9 100644 --- a/pkg/provider/bitbucketdatacenter/parse_payload.go +++ b/pkg/provider/bitbucketdatacenter/parse_payload.go @@ -164,6 +164,10 @@ func (v *Provider) ParsePayload(_ context.Context, _ *params.Run, request *http. return nil, fmt.Errorf("push event contains no commits under 'changes'; cannot proceed") } + if len(e.Commits) == 0 { + return nil, fmt.Errorf("push event contains no commits; cannot proceed") + } + processedEvent.SHA = e.Changes[0].ToHash processedEvent.URL = e.Repository.Links.Self[0].Href processedEvent.BaseBranch = e.Changes[0].RefID diff --git a/pkg/provider/bitbucketdatacenter/parse_payload_test.go b/pkg/provider/bitbucketdatacenter/parse_payload_test.go index 790e1a809..e51bb73e0 100644 --- a/pkg/provider/bitbucketdatacenter/parse_payload_test.go +++ b/pkg/provider/bitbucketdatacenter/parse_payload_test.go @@ -611,16 +611,23 @@ func TestParsePayload(t *testing.T) { { name: "good/push", eventType: "repo:refs_changed", - payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{{ToHash: ev1.SHA, RefID: "base"}}), + payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{{ToHash: ev1.SHA, RefID: "base"}}, []types.Commit{{ID: ev1.SHA}}), expEvent: ev1, }, { name: "bad/changes are empty in push", eventType: "repo:refs_changed", - payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{}), + payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{}, []types.Commit{}), expEvent: ev1, wantErrSubstr: "push event contains no commits under 'changes'; cannot proceed", }, + { + name: "bad/commits are empty in push", + eventType: "repo:refs_changed", + payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{{ToHash: ev1.SHA, RefID: "base"}}, []types.Commit{}), + expEvent: ev1, + wantErrSubstr: "push event contains no commits; cannot proceed", + }, { name: "good/comment ok-to-test", eventType: "pr:comment:added", diff --git a/pkg/provider/bitbucketdatacenter/test/test.go b/pkg/provider/bitbucketdatacenter/test/test.go index f04e09d27..887d8944d 100644 --- a/pkg/provider/bitbucketdatacenter/test/test.go +++ b/pkg/provider/bitbucketdatacenter/test/test.go @@ -338,7 +338,7 @@ func MakePREvent(event *info.Event, comment string) *types.PullRequestEvent { return pr } -func MakePushEvent(event *info.Event, changes []types.PushRequestEventChange) *types.PushRequestEvent { +func MakePushEvent(event *info.Event, changes []types.PushRequestEventChange, commits []types.Commit) *types.PushRequestEvent { iii, _ := strconv.Atoi(event.AccountID) return &types.PushRequestEvent{ @@ -366,5 +366,6 @@ func MakePushEvent(event *info.Event, changes []types.PushRequestEventChange) *t }, }, Changes: changes, + Commits: commits, } } From 71027f940a37b85244d9b528a88c3160aebea74e Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Sat, 21 Jun 2025 18:49:54 +0530 Subject: [PATCH 11/20] fix: issue in on-path-change on pr merge in bitbucket data center fixed an issue where push pipeline run was not triggering on pr merge when pipeline run definition is having on-path-cahnge annotation. this was happening due to bitbucket data center api behavior that it creates merge commit on pr merge that is set as HEAD commit of the branch and when changes for that commit is retrieved, they're being received empty because merge commit in bitbucket data center doesn't contain code changes of its parent. https://issues.redhat.com/browse/SRVKP-7432 Signed-off-by: Zaki Shaikh --- .../bitbucketdatacenter/parse_payload.go | 23 +++++- .../bitbucketdatacenter/parse_payload_test.go | 53 +++++++++++++ .../bitbucket_datacenter_pull_request_test.go | 78 ++++++++++++++++++- test/bitbucket_datacenter_push_test.go | 2 +- test/pkg/bitbucketdatacenter/pr.go | 18 +++-- test/pkg/bitbucketdatacenter/setup.go | 9 ++- test/pkg/options/options.go | 1 + 7 files changed, 166 insertions(+), 18 deletions(-) diff --git a/pkg/provider/bitbucketdatacenter/parse_payload.go b/pkg/provider/bitbucketdatacenter/parse_payload.go index 6d40f83d9..bd7ede332 100644 --- a/pkg/provider/bitbucketdatacenter/parse_payload.go +++ b/pkg/provider/bitbucketdatacenter/parse_payload.go @@ -88,7 +88,7 @@ func sanitizeOwner(owner string) string { } // ParsePayload parses the payload from the event. -func (v *Provider) ParsePayload(_ context.Context, _ *params.Run, request *http.Request, +func (v *Provider) ParsePayload(_ context.Context, run *params.Run, request *http.Request, payload string, ) (*info.Event, error) { processedEvent := info.NewEvent() @@ -169,6 +169,27 @@ func (v *Provider) ParsePayload(_ context.Context, _ *params.Run, request *http. } processedEvent.SHA = e.Changes[0].ToHash + + // In Bitbucket Data Center, when a pull request is merged, it creates two commits in the repository: + // 1. A merge commit, which is represented by `changes[0].ToHash`. + // 2. The actual commit containing the changes from the source branch. + // + // However, the merge commit often does not contain any file changes itself, + // which can cause issues when determining whether file modifications should trigger PipelineRuns. + // + // Typically, a regular (non-merge) commit has a single parent, but a merge commit has two parents: + // - The first parent is the previous HEAD of the destination branch (the branch into which the PR was merged). + // - The second parent is the HEAD of the source branch (the branch being merged). + // + // To correctly identify the actual commit that contains the changes (i.e., the source branch's HEAD), + // we inspect `e.Commits[0]`, and if it has more than one parent, we take the second parent. + // This helps ensure we reference the correct commit. + if len(e.Commits) > 1 && len(e.Commits[0].Parents) > 1 { + processedEvent.SHA = e.Commits[0].Parents[1].ID + run.Clients.Log.Infof("Detected a merge commit as HEAD; "+ + "using second parent commit SHA %s (source branch HEAD) to target push event", e.Commits[0].Parents[1].ID) + } + processedEvent.URL = e.Repository.Links.Self[0].Href processedEvent.BaseBranch = e.Changes[0].RefID processedEvent.HeadBranch = e.Changes[0].RefID diff --git a/pkg/provider/bitbucketdatacenter/parse_payload_test.go b/pkg/provider/bitbucketdatacenter/parse_payload_test.go index e51bb73e0..09c9b6ca4 100644 --- a/pkg/provider/bitbucketdatacenter/parse_payload_test.go +++ b/pkg/provider/bitbucketdatacenter/parse_payload_test.go @@ -9,6 +9,7 @@ import ( "github.com/openshift-pipelines/pipelines-as-code/pkg/params/info" bbv1test "github.com/openshift-pipelines/pipelines-as-code/pkg/provider/bitbucketdatacenter/test" "github.com/openshift-pipelines/pipelines-as-code/pkg/provider/bitbucketdatacenter/types" + "github.com/openshift-pipelines/pipelines-as-code/pkg/test/logger" "gotest.tools/v3/assert" rtesting "knative.dev/pkg/reconciler/testing" ) @@ -570,6 +571,7 @@ func TestParsePayload(t *testing.T) { payloadEvent any expEvent *info.Event eventType string + wantSHA string wantErrSubstr string rawStr string targetPipelinerun string @@ -607,12 +609,14 @@ func TestParsePayload(t *testing.T) { eventType: "pr:opened", payloadEvent: bbv1test.MakePREvent(ev1, ""), expEvent: ev1, + wantSHA: "abcd", }, { name: "good/push", eventType: "repo:refs_changed", payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{{ToHash: ev1.SHA, RefID: "base"}}, []types.Commit{{ID: ev1.SHA}}), expEvent: ev1, + wantSHA: "abcd", }, { name: "bad/changes are empty in push", @@ -628,17 +632,59 @@ func TestParsePayload(t *testing.T) { expEvent: ev1, wantErrSubstr: "push event contains no commits; cannot proceed", }, + { + name: "good/changes are empty in push", + eventType: "repo:refs_changed", + payloadEvent: bbv1test.MakePushEvent(ev1, []types.PushRequestEventChange{ + { + Ref: types.Ref{ID: "refs/heads/main"}, + ToHash: "abcd", + }, + }, []types.Commit{ + { + Parents: []struct { + ID string `json:"id"` + DisplayID string `json:"displayId"` + }{ + { + ID: "efghabcd", + DisplayID: "efgh", + }, + { + ID: "abcdefgh", + DisplayID: "abcd", + }, + }, + }, + { + ID: "abcdefgh", + Parents: []struct { + ID string `json:"id"` + DisplayID string `json:"displayId"` + }{ + { + ID: "weroiusf", + DisplayID: "wero", + }, + }, + }, + }), + expEvent: ev1, + wantSHA: "abcdefgh", + }, { name: "good/comment ok-to-test", eventType: "pr:comment:added", payloadEvent: bbv1test.MakePREvent(ev1, "/ok-to-test"), expEvent: ev1, + wantSHA: "abcd", }, { name: "good/comment test", eventType: "pr:comment:added", payloadEvent: bbv1test.MakePREvent(ev1, "/test"), expEvent: ev1, + wantSHA: "abcd", }, { name: "good/comment retest a pr", @@ -646,6 +692,7 @@ func TestParsePayload(t *testing.T) { payloadEvent: bbv1test.MakePREvent(ev1, "/retest dummy"), expEvent: ev1, targetPipelinerun: "dummy", + wantSHA: "abcd", }, { name: "good/comment cancel a pr", @@ -653,12 +700,14 @@ func TestParsePayload(t *testing.T) { payloadEvent: bbv1test.MakePREvent(ev1, "/cancel dummy"), expEvent: ev1, canceltargetPipelinerun: "dummy", + wantSHA: "abcd", }, { name: "good/comment cancel all", eventType: "pr:comment:added", payloadEvent: bbv1test.MakePREvent(ev1, "/cancel"), expEvent: ev1, + wantSHA: "abcd", }, } for _, tt := range tests { @@ -672,6 +721,7 @@ func TestParsePayload(t *testing.T) { run := ¶ms.Run{ Info: info.Info{}, } + run.Clients.Log, _ = logger.GetLogger() _b, err := json.Marshal(tt.payloadEvent) assert.NilError(t, err) payload := string(_b) @@ -686,6 +736,9 @@ func TestParsePayload(t *testing.T) { } assert.NilError(t, err) + // assert SHA ID + assert.Equal(t, tt.wantSHA, got.SHA) + assert.Equal(t, got.AccountID, tt.expEvent.AccountID) // test that we got slashed diff --git a/test/bitbucket_datacenter_pull_request_test.go b/test/bitbucket_datacenter_pull_request_test.go index f7acea18e..0afa3b206 100644 --- a/test/bitbucket_datacenter_pull_request_test.go +++ b/test/bitbucket_datacenter_pull_request_test.go @@ -9,12 +9,17 @@ import ( "os" "testing" + "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/keys" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype" tbbdc "github.com/openshift-pipelines/pipelines-as-code/test/pkg/bitbucketdatacenter" + "github.com/openshift-pipelines/pipelines-as-code/test/pkg/options" + "github.com/openshift-pipelines/pipelines-as-code/test/pkg/payload" "github.com/openshift-pipelines/pipelines-as-code/test/pkg/wait" + "github.com/jenkins-x/go-scm/scm" "github.com/tektoncd/pipeline/pkg/names" "gotest.tools/v3/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestBitbucketDataCenterPullRequest(t *testing.T) { @@ -35,9 +40,11 @@ func TestBitbucketDataCenterPullRequest(t *testing.T) { files[fmt.Sprintf(".tekton/pipelinerun-%d.yaml", i)] = "testdata/pipelinerun.yaml" } + files, err = payload.GetEntries(files, targetNS, options.MainBranch, triggertype.PullRequest.String(), map[string]string{}) + assert.NilError(t, err) + pr := tbbdc.CreatePR(ctx, t, client, runcnx, opts, repo, files, bitbucketWSOwner, targetNS) - runcnx.Clients.Log.Infof("Pull Request with title '%s' is created", pr.Title) - defer tbbdc.TearDown(ctx, t, runcnx, client, pr.Number, bitbucketWSOwner, targetNS) + defer tbbdc.TearDown(ctx, t, runcnx, client, pr, bitbucketWSOwner, targetNS) successOpts := wait.SuccessOpt{ TargetNS: targetNS, @@ -64,9 +71,11 @@ func TestBitbucketDataCenterCELPathChangeInPullRequest(t *testing.T) { ".tekton/pipelinerun.yaml": "testdata/pipelinerun-cel-path-changed.yaml", } + files, err = payload.GetEntries(files, targetNS, options.MainBranch, triggertype.PullRequest.String(), map[string]string{}) + assert.NilError(t, err) + pr := tbbdc.CreatePR(ctx, t, client, runcnx, opts, repo, files, bitbucketWSOwner, targetNS) - runcnx.Clients.Log.Infof("Pull Request with title '%s' is created", pr.Title) - defer tbbdc.TearDown(ctx, t, runcnx, client, pr.Number, bitbucketWSOwner, targetNS) + defer tbbdc.TearDown(ctx, t, runcnx, client, pr, bitbucketWSOwner, targetNS) successOpts := wait.SuccessOpt{ TargetNS: targetNS, @@ -76,3 +85,64 @@ func TestBitbucketDataCenterCELPathChangeInPullRequest(t *testing.T) { } wait.Succeeded(ctx, t, runcnx, opts, successOpts) } + +func TestBitbucketDataCenterOnPathChangeAnnotationOnPRMerge(t *testing.T) { + targetNS := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pac-e2e-ns") + // this would be a temporary base branch for the pull request we're going to raise + // we need this because we're going to merge the pull request so that after test + // we can delete the temporary base branch and our main branch should not be affected + // by this merge because we run the E2E frequently. + tempBaseBranch := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("pac-e2e-ns") + + ctx := context.Background() + bitbucketWSOwner := os.Getenv("TEST_BITBUCKET_SERVER_E2E_REPOSITORY") + + ctx, runcnx, opts, client, err := tbbdc.Setup(ctx) + assert.NilError(t, err) + + repo := tbbdc.CreateCRD(ctx, t, client, runcnx, bitbucketWSOwner, targetNS) + runcnx.Clients.Log.Infof("Repository %s has been created", repo.Name) + defer tbbdc.TearDownNs(ctx, t, runcnx, targetNS) + + branch, resp, err := client.Git.CreateRef(ctx, bitbucketWSOwner, tempBaseBranch, repo.Branch) + assert.NilError(t, err, "error creating branch: http status code: %d : %v", resp.Status, err) + runcnx.Clients.Log.Infof("Base branch %s has been created", branch.Name) + + opts.BaseBranch = branch.Name + + if os.Getenv("TEST_NOCLEANUP") != "true" { + defer func() { + _, err := client.Git.DeleteRef(ctx, bitbucketWSOwner, tempBaseBranch) + assert.NilError(t, err, "error deleting branch: http status code: %d : %v", resp.Status, err) + }() + } + + files := map[string]string{ + ".tekton/pr.yaml": "testdata/pipelinerun-on-path-change.yaml", + "doc/foo/bar/README.md": "README.md", + } + + files, err = payload.GetEntries(files, targetNS, tempBaseBranch, triggertype.Push.String(), map[string]string{}) + assert.NilError(t, err) + + pr := tbbdc.CreatePR(ctx, t, client, runcnx, opts, repo, files, bitbucketWSOwner, targetNS) + defer tbbdc.TearDown(ctx, t, runcnx, client, nil, bitbucketWSOwner, targetNS) + + // merge the pull request so that we can get push event. + _, err = client.PullRequests.Merge(ctx, bitbucketWSOwner, pr.Number, &scm.PullRequestMergeOptions{}) + assert.NilError(t, err) + + successOpts := wait.SuccessOpt{ + TargetNS: targetNS, + OnEvent: triggertype.Push.String(), + NumberofPRMatch: 1, + MinNumberStatus: 1, + } + wait.Succeeded(ctx, t, runcnx, opts, successOpts) + + pipelineRuns, err := runcnx.Clients.Tekton.TektonV1().PipelineRuns(targetNS).List(ctx, metav1.ListOptions{}) + assert.NilError(t, err) + assert.Equal(t, len(pipelineRuns.Items), 1) + // check that pipeline run contains on-path-change annotation. + assert.Equal(t, pipelineRuns.Items[0].GetAnnotations()[keys.OnPathChange], "[doc/***.md]") +} diff --git a/test/bitbucket_datacenter_push_test.go b/test/bitbucket_datacenter_push_test.go index a8e442cc6..36d95f640 100644 --- a/test/bitbucket_datacenter_push_test.go +++ b/test/bitbucket_datacenter_push_test.go @@ -39,7 +39,7 @@ func TestBitbucketDataCenterCELPathChangeOnPush(t *testing.T) { branch, resp, err := client.Git.CreateRef(ctx, bitbucketWSOwner, targetNS, mainBranchRef) assert.NilError(t, err, "error creating branch: http status code: %d : %v", resp.Status, err) runcnx.Clients.Log.Infof("Branch %s has been created", branch.Name) - defer tbbs.TearDown(ctx, t, runcnx, client, -1, bitbucketWSOwner, branch.Name) + defer tbbs.TearDown(ctx, t, runcnx, client, nil, bitbucketWSOwner, branch.Name) files, err = payload.GetEntries(files, targetNS, branch.Name, triggertype.Push.String(), map[string]string{}) assert.NilError(t, err) diff --git a/test/pkg/bitbucketdatacenter/pr.go b/test/pkg/bitbucketdatacenter/pr.go index 1a5b1af15..c87615c62 100644 --- a/test/pkg/bitbucketdatacenter/pr.go +++ b/test/pkg/bitbucketdatacenter/pr.go @@ -6,9 +6,7 @@ import ( "testing" "github.com/openshift-pipelines/pipelines-as-code/pkg/params" - "github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype" "github.com/openshift-pipelines/pipelines-as-code/test/pkg/options" - "github.com/openshift-pipelines/pipelines-as-code/test/pkg/payload" "github.com/openshift-pipelines/pipelines-as-code/test/pkg/scm" goscm "github.com/jenkins-x/go-scm/scm" @@ -16,21 +14,24 @@ import ( ) func CreatePR(ctx context.Context, t *testing.T, client *goscm.Client, runcnx *params.Run, opts options.E2E, repo *goscm.Repository, files map[string]string, orgAndRepo, targetNS string) *goscm.PullRequest { - mainBranchRef := "refs/heads/main" - branch, resp, err := client.Git.CreateRef(ctx, orgAndRepo, targetNS, mainBranchRef) + baseBranchRef := repo.Branch + if opts.BaseBranch != "" { + baseBranchRef = opts.BaseBranch + } + + branch, resp, err := client.Git.CreateRef(ctx, orgAndRepo, targetNS, baseBranchRef) assert.NilError(t, err, "error creating branch: http status code: %d : %v", resp.Status, err) runcnx.Clients.Log.Infof("Branch %s has been created", branch.Name) - files, err = payload.GetEntries(files, targetNS, options.MainBranch, triggertype.PullRequest.String(), map[string]string{}) - assert.NilError(t, err) gitCloneURL, err := scm.MakeGitCloneURL(repo.Clone, opts.UserName, opts.Password) assert.NilError(t, err) + scmOpts := &scm.Opts{ GitURL: gitCloneURL, Log: runcnx.Clients.Log, WebURL: repo.Clone, TargetRefName: targetNS, - BaseRefName: repo.Branch, + BaseRefName: baseBranchRef, CommitTitle: fmt.Sprintf("commit %s", targetNS), } scm.PushFilesToRefGit(t, scmOpts, files) @@ -41,9 +42,10 @@ func CreatePR(ctx context.Context, t *testing.T, client *goscm.Client, runcnx *p Title: title, Body: "Test PAC on bitbucket data center", Head: targetNS, - Base: "main", + Base: baseBranchRef, } pr, resp, err := client.PullRequests.Create(ctx, orgAndRepo, prOpts) assert.NilError(t, err, "error creating pull request: http status code: %d : %v", resp.Status, err) + runcnx.Clients.Log.Infof("Created Pull Request with Title '%s'. Head branch '%s' ⮕ Base Branch '%s'", pr.Title, pr.Head.Ref, pr.Base.Ref) return pr } diff --git a/test/pkg/bitbucketdatacenter/setup.go b/test/pkg/bitbucketdatacenter/setup.go index 069594502..9e4b8a2e3 100644 --- a/test/pkg/bitbucketdatacenter/setup.go +++ b/test/pkg/bitbucketdatacenter/setup.go @@ -82,15 +82,16 @@ func TearDownNs(ctx context.Context, t *testing.T, runcnx *params.Run, targetNS repository.NSTearDown(ctx, t, runcnx, targetNS) } -func TearDown(ctx context.Context, t *testing.T, runcnx *params.Run, client *scm.Client, prID int, orgAndRepo, ref string) { +func TearDown(ctx context.Context, t *testing.T, runcnx *params.Run, client *scm.Client, pr *scm.PullRequest, orgAndRepo, ref string) { if os.Getenv("TEST_NOCLEANUP") == "true" { runcnx.Clients.Log.Infof("Not cleaning up and closing PR since TEST_NOCLEANUP is set") return } - if prID != -1 { - runcnx.Clients.Log.Infof("Deleting PR #%d", prID) - _, err := client.PullRequests.DeletePullRequest(ctx, orgAndRepo, prID) + // in Bitbucket Data Center, merged pull requests cannot be deleted. + if pr != nil && !pr.Merged { + runcnx.Clients.Log.Infof("Deleting PR #%d", pr.Number) + _, err := client.PullRequests.DeletePullRequest(ctx, orgAndRepo, pr.Number) assert.NilError(t, err) } diff --git a/test/pkg/options/options.go b/test/pkg/options/options.go index 283bacfc0..29f0b806e 100644 --- a/test/pkg/options/options.go +++ b/test/pkg/options/options.go @@ -4,6 +4,7 @@ import "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascod type E2E struct { Repo, Organization string + BaseBranch string DirectWebhook bool ProjectID int ControllerURL string From 66d16b3509480eb66a0032b3f9274fdf9fe18b8f Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Tue, 10 Jun 2025 17:05:58 +0530 Subject: [PATCH 12/20] fix: migrate from brews to homebrew_casks in goreleaser made changes to goreleaser to use homebrews_casks instead of brews Signed-off-by: Zaki Shaikh --- .goreleaser.yml | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 42ace2a3d..07c709b2d 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -74,25 +74,26 @@ release: https://release-{{ replace .Tag "." "-" }}.pipelines-as-code.pages.dev -brews: +homebrew_casks: - name: tektoncd-pac repository: owner: openshift-pipelines name: homebrew-pipelines-as-code directory: Formula dependencies: - - name: tektoncd-cli - type: optional - - name: git + - formula: tektoncd-cli + - formula: git homepage: "https://pipelinesascode.com" description: tkn-pac - A command line interface for interacting with Pipelines as Code - install: | - bin.install "tkn-pac" => "tkn-pac" - output = Utils.popen_read("SHELL=bash #{bin}/tkn-pac completion bash") - (bash_completion/"tkn-pac").write output - output = Utils.popen_read("SHELL=zsh #{bin}/tkn-pac completion zsh") - (zsh_completion/"_tkn-pac").write output - prefix.install_metafiles + hooks: + pre: + install: | + bin.install "tkn-pac" => "tkn-pac" + output = Utils.popen_read("SHELL=bash #{bin}/tkn-pac completion bash") + (bash_completion/"tkn-pac").write output + output = Utils.popen_read("SHELL=zsh #{bin}/tkn-pac completion zsh") + (zsh_completion/"_tkn-pac").write output + prefix.install_metafiles nfpms: - file_name_template: >- tkn-pac- From 3eac400bb9a678d89b90c6323c37d6e51a342b15 Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Wed, 18 Jun 2025 16:10:14 +0530 Subject: [PATCH 13/20] fix: use correct event type in log in GetTektonDir func this fixes logs message in GetTektonDir func for event type. Signed-off-by: Zaki Shaikh --- pkg/provider/bitbucketcloud/bitbucket.go | 2 +- pkg/provider/bitbucketcloud/bitbucket_test.go | 14 ++++++-- .../bitbucketdatacenter.go | 2 +- pkg/provider/gitea/gitea.go | 2 +- pkg/provider/github/github.go | 6 +++- pkg/provider/github/github_test.go | 36 ++++++++++++++----- pkg/provider/gitlab/gitlab.go | 6 +++- pkg/provider/gitlab/gitlab_test.go | 25 +++++++++++-- 8 files changed, 73 insertions(+), 20 deletions(-) diff --git a/pkg/provider/bitbucketcloud/bitbucket.go b/pkg/provider/bitbucketcloud/bitbucket.go index 28da230eb..fc4e5b0af 100644 --- a/pkg/provider/bitbucketcloud/bitbucket.go +++ b/pkg/provider/bitbucketcloud/bitbucket.go @@ -171,7 +171,7 @@ func (v *Provider) getDir(event *info.Event, path string) ([]bitbucket.Repositor revision = event.DefaultBranch v.Logger.Infof("Using PipelineRun definition from default_branch: %s", event.DefaultBranch) } else { - v.Logger.Infof("Using PipelineRun definition from source pull request SHA: %s", event.SHA) + v.Logger.Infof("Using PipelineRun definition from source %s commit SHA: %s", event.TriggerTarget.String(), event.SHA) } repoFileOpts := &bitbucket.RepositoryFilesOptions{ Owner: event.Organization, diff --git a/pkg/provider/bitbucketcloud/bitbucket_test.go b/pkg/provider/bitbucketcloud/bitbucket_test.go index 0e0ccafa5..8641cee5a 100644 --- a/pkg/provider/bitbucketcloud/bitbucket_test.go +++ b/pkg/provider/bitbucketcloud/bitbucket_test.go @@ -8,6 +8,7 @@ import ( "github.com/openshift-pipelines/pipelines-as-code/pkg/params" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/info" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/settings" + "github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype" "github.com/openshift-pipelines/pipelines-as-code/pkg/provider" bbcloudtest "github.com/openshift-pipelines/pipelines-as-code/pkg/provider/bitbucketcloud/test" "github.com/openshift-pipelines/pipelines-as-code/pkg/provider/bitbucketcloud/types" @@ -35,11 +36,18 @@ func TestGetTektonDir(t *testing.T) { filterMessageSnippet string }{ { - name: "Get Tekton Directory", - event: bbcloudtest.MakeEvent(nil), + name: "Get Tekton Directory on pull request", + event: bbcloudtest.MakeEvent(&info.Event{TriggerTarget: triggertype.PullRequest}), testDirPath: "../../pipelineascode/testdata/pull_request/.tekton", contentContains: "kind: PipelineRun", - filterMessageSnippet: "Using PipelineRun definition from source pull request SHA", + filterMessageSnippet: "Using PipelineRun definition from source pull_request commit SHA", + }, + { + name: "Get Tekton Directory on push", + event: bbcloudtest.MakeEvent(&info.Event{TriggerTarget: triggertype.Push}), + testDirPath: "../../pipelineascode/testdata/pull_request/.tekton", + contentContains: "kind: PipelineRun", + filterMessageSnippet: "Using PipelineRun definition from source push commit SHA", }, { name: "Get Tekton Directory Mainbranch", diff --git a/pkg/provider/bitbucketdatacenter/bitbucketdatacenter.go b/pkg/provider/bitbucketdatacenter/bitbucketdatacenter.go index e84764df4..34117e38b 100644 --- a/pkg/provider/bitbucketdatacenter/bitbucketdatacenter.go +++ b/pkg/provider/bitbucketdatacenter/bitbucketdatacenter.go @@ -216,7 +216,7 @@ func (v *Provider) GetTektonDir(ctx context.Context, event *info.Event, path, pr at := "" if v.provenance == "source" { at = event.SHA - v.Logger.Infof("Using PipelineRun definition from source pull request SHA: %s", event.SHA) + v.Logger.Infof("Using PipelineRun definition from source %s commit SHA: %s", event.TriggerTarget.String(), event.SHA) } else { v.Logger.Infof("Using PipelineRun definition from default_branch: %s", event.DefaultBranch) } diff --git a/pkg/provider/gitea/gitea.go b/pkg/provider/gitea/gitea.go index db0e547ae..816659dde 100644 --- a/pkg/provider/gitea/gitea.go +++ b/pkg/provider/gitea/gitea.go @@ -254,7 +254,7 @@ func (v *Provider) GetTektonDir(_ context.Context, event *info.Event, path, prov revision = event.DefaultBranch v.Logger.Infof("Using PipelineRun definition from default_branch: %s", event.DefaultBranch) } else { - v.Logger.Infof("Using PipelineRun definition from source pull request SHA: %s", event.SHA) + v.Logger.Infof("Using PipelineRun definition from source %s commit SHA: %s", event.TriggerTarget.String(), event.SHA) } tektonDirSha := "" diff --git a/pkg/provider/github/github.go b/pkg/provider/github/github.go index d61fbf1ae..67a946d5d 100644 --- a/pkg/provider/github/github.go +++ b/pkg/provider/github/github.go @@ -324,7 +324,11 @@ func (v *Provider) GetTektonDir(ctx context.Context, runevent *info.Event, path, revision = runevent.DefaultBranch v.Logger.Infof("Using PipelineRun definition from default_branch: %s", runevent.DefaultBranch) } else { - v.Logger.Infof("Using PipelineRun definition from source pull request %s/%s#%d SHA on %s", runevent.Organization, runevent.Repository, runevent.PullRequestNumber, runevent.SHA) + prInfo := "" + if runevent.TriggerTarget == triggertype.PullRequest { + prInfo = fmt.Sprintf("%s/%s#%d", runevent.Organization, runevent.Repository, runevent.PullRequestNumber) + } + v.Logger.Infof("Using PipelineRun definition from source %s %s on commit SHA %s", runevent.TriggerTarget.String(), prInfo, runevent.SHA) } rootobjects, _, err := v.Client().Git.GetTree(ctx, runevent.Organization, runevent.Repository, revision, false) diff --git a/pkg/provider/github/github_test.go b/pkg/provider/github/github_test.go index 6ae39ae47..0a6c2f781 100644 --- a/pkg/provider/github/github_test.go +++ b/pkg/provider/github/github_test.go @@ -240,15 +240,32 @@ func TestGetTektonDir(t *testing.T) { expectedGHApiCalls int64 }{ { - name: "test no subtree", + name: "test no subtree on pull request", event: &info.Event{ - Organization: "tekton", - Repository: "cat", - SHA: "123", + Organization: "tekton", + Repository: "cat", + SHA: "123", + TriggerTarget: triggertype.PullRequest, + }, + expectedString: "PipelineRun", + treepath: "testdata/tree/simple", + filterMessageSnippet: "Using PipelineRun definition from source pull_request tekton/cat#0", + // 1. Get Repo root objects + // 2. Get Tekton Dir objects + // 3/4. Get object content for each object (pipelinerun.yaml, pipeline.yaml) + expectedGHApiCalls: 4, + }, + { + name: "test no subtree on push", + event: &info.Event{ + Organization: "tekton", + Repository: "cat", + SHA: "123", + TriggerTarget: triggertype.Push, }, expectedString: "PipelineRun", treepath: "testdata/tree/simple", - filterMessageSnippet: "Using PipelineRun definition from source pull request tekton/cat#0", + filterMessageSnippet: "Using PipelineRun definition from source push", // 1. Get Repo root objects // 2. Get Tekton Dir objects // 3/4. Get object content for each object (pipelinerun.yaml, pipeline.yaml) @@ -302,13 +319,14 @@ func TestGetTektonDir(t *testing.T) { { name: "test no tekton directory", event: &info.Event{ - Organization: "tekton", - Repository: "cat", - SHA: "123", + Organization: "tekton", + Repository: "cat", + SHA: "123", + TriggerTarget: triggertype.PullRequest, }, expectedString: "", treepath: "testdata/tree/notektondir", - filterMessageSnippet: "Using PipelineRun definition from source pull request tekton/cat#0", + filterMessageSnippet: "Using PipelineRun definition from source pull_request tekton/cat#0", // 1. Get Repo root objects // _. No tekton dir to fetch expectedGHApiCalls: 1, diff --git a/pkg/provider/gitlab/gitlab.go b/pkg/provider/gitlab/gitlab.go index e4159a7fa..7201436f6 100644 --- a/pkg/provider/gitlab/gitlab.go +++ b/pkg/provider/gitlab/gitlab.go @@ -329,7 +329,11 @@ func (v *Provider) GetTektonDir(_ context.Context, event *info.Event, path, prov revision = event.DefaultBranch v.Logger.Infof("Using PipelineRun definition from default_branch: %s", event.DefaultBranch) } else { - v.Logger.Infof("Using PipelineRun definition from source merge request SHA: %s", event.SHA) + trigger := event.TriggerTarget.String() + if event.TriggerTarget == triggertype.PullRequest { + trigger = "merge request" + } + v.Logger.Infof("Using PipelineRun definition from source %s on commit SHA: %s", trigger, event.SHA) } opt := &gitlab.ListTreeOptions{ diff --git a/pkg/provider/gitlab/gitlab_test.go b/pkg/provider/gitlab/gitlab_test.go index 8a3509ad8..8c88467b9 100644 --- a/pkg/provider/gitlab/gitlab_test.go +++ b/pkg/provider/gitlab/gitlab_test.go @@ -15,6 +15,7 @@ import ( "github.com/openshift-pipelines/pipelines-as-code/pkg/params/clients" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/info" "github.com/openshift-pipelines/pipelines-as-code/pkg/params/settings" + "github.com/openshift-pipelines/pipelines-as-code/pkg/params/triggertype" "github.com/openshift-pipelines/pipelines-as-code/pkg/provider" thelp "github.com/openshift-pipelines/pipelines-as-code/pkg/provider/gitlab/test" testclient "github.com/openshift-pipelines/pipelines-as-code/pkg/test/clients" @@ -460,12 +461,30 @@ func TestGetTektonDir(t *testing.T) { wantErr: "error unmarshalling yaml file pr.yaml: yaml: line 4: could not find expected ':'", }, { - name: "list tekton dir", + name: "list tekton dir on pull request", prcontent: string(samplePR), args: args{ path: ".tekton", event: &info.Event{ - HeadBranch: "main", + HeadBranch: "main", + TriggerTarget: triggertype.PullRequest, + }, + }, + fields: fields{ + sourceProjectID: 100, + }, + wantClient: true, + wantStr: "kind: PipelineRun", + filterMessageSnippet: `Using PipelineRun definition from source merge request on commit SHA`, + }, + { + name: "list tekton dir on push", + prcontent: string(samplePR), + args: args{ + path: ".tekton", + event: &info.Event{ + HeadBranch: "main", + TriggerTarget: triggertype.Push, }, }, fields: fields{ @@ -473,7 +492,7 @@ func TestGetTektonDir(t *testing.T) { }, wantClient: true, wantStr: "kind: PipelineRun", - filterMessageSnippet: `Using PipelineRun definition from source merge request SHA`, + filterMessageSnippet: `Using PipelineRun definition from source push on commit SHA`, }, { name: "list tekton dir on default_branch", From 522240f568d7ae3984f451c58a11cf11b09b318f Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Mon, 30 Jun 2025 18:27:56 +0530 Subject: [PATCH 14/20] fix(ci): golangci-lint revive var-naming error in linters fixed golangci-lint revive var-naming error in linters Signed-off-by: Zaki Shaikh --- pkg/provider/bitbucketcloud/types/types.go | 1 + pkg/provider/bitbucketdatacenter/types/types.go | 1 + pkg/secrets/types/types.go | 1 + 3 files changed, 3 insertions(+) diff --git a/pkg/provider/bitbucketcloud/types/types.go b/pkg/provider/bitbucketcloud/types/types.go index dd34db076..fe74310c8 100644 --- a/pkg/provider/bitbucketcloud/types/types.go +++ b/pkg/provider/bitbucketcloud/types/types.go @@ -1,3 +1,4 @@ +//revive:disable-next-line:var-naming package types type Workspace struct { diff --git a/pkg/provider/bitbucketdatacenter/types/types.go b/pkg/provider/bitbucketdatacenter/types/types.go index 395b3e972..94c524e00 100644 --- a/pkg/provider/bitbucketdatacenter/types/types.go +++ b/pkg/provider/bitbucketdatacenter/types/types.go @@ -1,3 +1,4 @@ +//revive:disable-next-line:var-naming package types type UserWithMetadata struct { diff --git a/pkg/secrets/types/types.go b/pkg/secrets/types/types.go index 1975de140..e377bae17 100644 --- a/pkg/secrets/types/types.go +++ b/pkg/secrets/types/types.go @@ -1,3 +1,4 @@ +//revive:disable-next-line:var-naming package types type SecretValue struct { From 04374631b0b353200334e9d66eeca25bec451d65 Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Wed, 2 Jul 2025 17:00:29 +0530 Subject: [PATCH 15/20] chore: Make go-testing PipelineRun targeted by all branches this commits changes value of on-target-branch annotation in go-testing PipelineRun to make it be triggered when any branch is targeted in the PaC repo. Signed-off-by: Zaki Shaikh --- .tekton/go.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.tekton/go.yaml b/.tekton/go.yaml index 57f0a22a0..fd73cab87 100644 --- a/.tekton/go.yaml +++ b/.tekton/go.yaml @@ -7,7 +7,7 @@ metadata: pipelinesascode.tekton.dev/max-keep-runs: "2" pipelinesascode.tekton.dev/cancel-in-progress: "true" pipelinesascode.tekton.dev/on-event: "pull_request" - pipelinesascode.tekton.dev/on-target-branch: "main" + pipelinesascode.tekton.dev/on-target-branch: "[*]" pipelinesascode.tekton.dev/on-path-change: "[***/*.go]" spec: params: From 165d92a94166d0f3cae78553fae776cff06edc02 Mon Sep 17 00:00:00 2001 From: Zaki Shaikh Date: Fri, 4 Jul 2025 10:50:03 +0530 Subject: [PATCH 16/20] fix(github): guard checkWebhookSecretValidity against nil response Fix nil-pointer panic when /rate_limit returns success without SCIM data or when the HTTP response itself is nil. Add early err/resp checks and ensure rl and rl.SCIM are non-nil before accessing them. Jira: https://issues.redhat.com/browse/SRVKP-8075 Signed-off-by: Zaki Shaikh --- pkg/provider/github/github.go | 15 ++++++--- pkg/provider/github/github_test.go | 54 ++++++++++++++++++++++++++---- 2 files changed, 58 insertions(+), 11 deletions(-) diff --git a/pkg/provider/github/github.go b/pkg/provider/github/github.go index 67a946d5d..da3d91bfa 100644 --- a/pkg/provider/github/github.go +++ b/pkg/provider/github/github.go @@ -258,7 +258,7 @@ func parseTS(headerTS string) (time.Time, error) { // the issue was. func (v *Provider) checkWebhookSecretValidity(ctx context.Context, cw clockwork.Clock) error { rl, resp, err := v.Client().RateLimit.Get(ctx) - if resp.StatusCode == http.StatusNotFound { + if resp != nil && resp.StatusCode == http.StatusNotFound { v.Logger.Info("skipping checking if token has expired, rate_limit api is not enabled on token") return nil } @@ -266,18 +266,25 @@ func (v *Provider) checkWebhookSecretValidity(ctx context.Context, cw clockwork. if err != nil { return fmt.Errorf("error making request to the GitHub API checking rate limit: %w", err) } - if resp.Header.Get("GitHub-Authentication-Token-Expiration") != "" { + + if resp != nil && resp.Header.Get("GitHub-Authentication-Token-Expiration") != "" { ts, err := parseTS(resp.Header.Get("GitHub-Authentication-Token-Expiration")) if err != nil { return fmt.Errorf("error parsing token expiration date: %w", err) } if cw.Now().After(ts) { - errm := fmt.Sprintf("token has expired at %s", resp.TokenExpiration.Format(time.RFC1123)) - return fmt.Errorf("%s", errm) + errMsg := fmt.Sprintf("token has expired at %s", resp.TokenExpiration.Format(time.RFC1123)) + return fmt.Errorf("%s", errMsg) } } + // Guard against nil rl or rl.SCIM which could lead to a panic. + if rl == nil || rl.SCIM == nil { + v.Logger.Info("skipping token expiration check, SCIM rate limit API is not available for this token") + return nil + } + if rl.SCIM.Remaining == 0 { return fmt.Errorf("api rate limit exceeded. Access will be restored at %s", rl.SCIM.Reset.Format(time.RFC1123)) } diff --git a/pkg/provider/github/github_test.go b/pkg/provider/github/github_test.go index 0a6c2f781..a069a32f0 100644 --- a/pkg/provider/github/github_test.go +++ b/pkg/provider/github/github_test.go @@ -914,6 +914,12 @@ func TestGetFiles(t *testing.T) { } } +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return f(r) +} + func TestProvider_checkWebhookSecretValidity(t *testing.T) { t1 := time.Date(1999, time.February, 3, 4, 5, 6, 7, time.UTC) cw := clockwork.NewFakeClockAt(t1) @@ -925,7 +931,9 @@ func TestProvider_checkWebhookSecretValidity(t *testing.T) { expHeaderSet bool apiNotEnabled bool wantLogSnippet string - report500 bool + statusCode int + wantNilSCIM bool + wantNilResp bool }{ { name: "remaining scim calls", @@ -950,6 +958,22 @@ func TestProvider_checkWebhookSecretValidity(t *testing.T) { name: "no header mean unlimited", remaining: 5, }, + { + name: "skipping api rate limit is not enabled", + remaining: 0, + statusCode: http.StatusNotFound, + }, + { + name: "skipping because scim is not available", + remaining: 0, + wantNilSCIM: true, + }, + { + name: "resp is nil", + remaining: 0, + wantNilResp: true, + wantSubErr: "error making request to the GitHub API checking rate limit", + }, { name: "no header but no remaining scim calls", remaining: 0, @@ -958,7 +982,12 @@ func TestProvider_checkWebhookSecretValidity(t *testing.T) { { name: "api error", wantSubErr: "error making request to the GitHub API checking rate limit", - report500: true, + statusCode: http.StatusInternalServerError, + }, + { + name: "not enabled", + apiNotEnabled: true, + wantLogSnippet: "skipping checking", }, { name: "not enabled", @@ -974,14 +1003,15 @@ func TestProvider_checkWebhookSecretValidity(t *testing.T) { if !tt.apiNotEnabled { mux.HandleFunc("/rate_limit", func(rw http.ResponseWriter, _ *http.Request) { - if tt.report500 { - rw.WriteHeader(http.StatusInternalServerError) + if tt.statusCode != 0 { + rw.WriteHeader(tt.statusCode) return } - s := &github.RateLimits{ - SCIM: &github.Rate{ + s := &github.RateLimits{} + if !tt.wantNilSCIM { + s.SCIM = &github.Rate{ Remaining: tt.remaining, - }, + } } st := new(struct { Resources *github.RateLimits `json:"resources"` @@ -996,6 +1026,16 @@ func TestProvider_checkWebhookSecretValidity(t *testing.T) { }) } defer teardown() + + // create bad round tripper to make response nil and test that it handles that case. + if tt.wantNilResp { + errRT := roundTripperFunc(func(*http.Request) (*http.Response, error) { + return nil, fmt.Errorf("network down") + }) + httpClient := &http.Client{Transport: errRT} + fakeclient = github.NewClient(httpClient) + } + v := &Provider{ ghClient: fakeclient, Logger: logger, From 25d6e84b19d79a45a9e325bf4bd1089fd884be56 Mon Sep 17 00:00:00 2001 From: Pipelines as Code CI Robot Date: Fri, 4 Jul 2025 16:36:02 +0000 Subject: [PATCH 17/20] Release yaml generated from https://github.com/openshift-pipelines/pipelines-as-code/commit/165d92a94166d0f3cae78553fae776cff06edc02 for release v0.35.2 --- docs/content/ALLVERSIONS | 2 +- pkg/params/version/version.txt | 2 +- release.k8s.yaml | 70 +++++++++++++++---------------- release.yaml | 76 +++++++++++++++++----------------- 4 files changed, 75 insertions(+), 75 deletions(-) diff --git a/docs/content/ALLVERSIONS b/docs/content/ALLVERSIONS index 8844cfc09..ce4f3d5db 100644 --- a/docs/content/ALLVERSIONS +++ b/docs/content/ALLVERSIONS @@ -1 +1 @@ -nightly,stable,v0.35.1,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 +nightly,stable,v0.36.0,v0.35.2,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 diff --git a/pkg/params/version/version.txt b/pkg/params/version/version.txt index 6911254bc..84ee7160a 100644 --- a/pkg/params/version/version.txt +++ b/pkg/params/version/version.txt @@ -1 +1 @@ -v0.35.1 +v0.35.2 diff --git a/release.k8s.yaml b/release.k8s.yaml index fa2de2cc7..6af7ea9ec 100644 --- a/release.k8s.yaml +++ b/release.k8s.yaml @@ -17,7 +17,7 @@ kind: Namespace metadata: name: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code openshift.io/cluster-monitoring: "true" @@ -42,7 +42,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -61,7 +61,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -78,7 +78,7 @@ kind: ClusterRole metadata: name: pipelines-as-code-aggregate labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rbac.authorization.k8s.io/aggregate-to-edit: "true" @@ -118,7 +118,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -128,7 +128,7 @@ metadata: name: pipelines-as-code-controller-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -142,7 +142,7 @@ metadata: name: pipelines-as-code-controller-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -159,7 +159,7 @@ metadata: name: pipeline-as-code-controller-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -185,7 +185,7 @@ metadata: name: pipelines-as-code-controller-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -217,7 +217,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -227,7 +227,7 @@ metadata: name: pipelines-as-code-watcher-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -244,7 +244,7 @@ metadata: name: pipelines-as-code-watcher-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -261,7 +261,7 @@ metadata: name: pipeline-as-code-watcher-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -290,7 +290,7 @@ metadata: name: pipelines-as-code-watcher-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -322,7 +322,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -332,7 +332,7 @@ metadata: name: pipelines-as-code-webhook-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -360,7 +360,7 @@ metadata: name: pipelines-as-code-webhook-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -377,7 +377,7 @@ metadata: name: pipeline-as-code-webhook-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -402,7 +402,7 @@ metadata: name: pipelines-as-code-webhook-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -434,7 +434,7 @@ kind: CustomResourceDefinition metadata: name: repositories.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code spec: @@ -1032,7 +1032,7 @@ metadata: name: pipelines-as-code namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1058,7 +1058,7 @@ metadata: apiVersion: v1 data: # pipelines as code controller version - version: "v0.35.1" + version: "v0.35.2" # controller url to be used for configuring webhook using cli controller-url: "" @@ -1073,7 +1073,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1096,7 +1096,7 @@ metadata: name: pipelines-as-code-webhook-certs namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code # The data is populated at install time --- @@ -1105,7 +1105,7 @@ kind: ValidatingWebhookConfiguration metadata: name: validation.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code webhooks: - admissionReviewVersions: ["v1"] @@ -1138,7 +1138,7 @@ metadata: name: pipelines-as-code-config-observability namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code data: _example: | @@ -1294,7 +1294,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1312,7 +1312,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" spec: securityContext: runAsNonRoot: true @@ -1409,7 +1409,7 @@ metadata: namespace: pipelines-as-code labels: app: pipelines-as-code-controller - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1446,7 +1446,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1463,7 +1463,7 @@ spec: app.kubernetes.io/component: watcher app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app: pipelines-as-code-watcher spec: securityContext: @@ -1537,7 +1537,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code app: pipelines-as-code-watcher spec: @@ -1571,7 +1571,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1588,7 +1588,7 @@ spec: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" spec: securityContext: runAsNonRoot: true @@ -1644,7 +1644,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: ports: diff --git a/release.yaml b/release.yaml index fb2ec149a..958f9a7e6 100644 --- a/release.yaml +++ b/release.yaml @@ -17,7 +17,7 @@ kind: Namespace metadata: name: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code openshift.io/cluster-monitoring: "true" @@ -42,7 +42,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -61,7 +61,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -78,7 +78,7 @@ kind: ClusterRole metadata: name: pipelines-as-code-aggregate labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rbac.authorization.k8s.io/aggregate-to-edit: "true" @@ -118,7 +118,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -128,7 +128,7 @@ metadata: name: pipelines-as-code-controller-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -142,7 +142,7 @@ metadata: name: pipelines-as-code-controller-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -159,7 +159,7 @@ metadata: name: pipeline-as-code-controller-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -188,7 +188,7 @@ metadata: name: pipelines-as-code-controller-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -220,7 +220,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -230,7 +230,7 @@ metadata: name: pipelines-as-code-watcher-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -247,7 +247,7 @@ metadata: name: pipelines-as-code-watcher-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -264,7 +264,7 @@ metadata: name: pipeline-as-code-watcher-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -296,7 +296,7 @@ metadata: name: pipelines-as-code-watcher-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -328,7 +328,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -338,7 +338,7 @@ metadata: name: pipelines-as-code-webhook-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -366,7 +366,7 @@ metadata: name: pipelines-as-code-webhook-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -383,7 +383,7 @@ metadata: name: pipeline-as-code-webhook-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -408,7 +408,7 @@ metadata: name: pipelines-as-code-webhook-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -440,7 +440,7 @@ kind: CustomResourceDefinition metadata: name: repositories.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code spec: @@ -1038,7 +1038,7 @@ metadata: name: pipelines-as-code namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1064,7 +1064,7 @@ metadata: apiVersion: v1 data: # pipelines as code controller version - version: "v0.35.1" + version: "v0.35.2" # controller url to be used for configuring webhook using cli controller-url: "" @@ -1079,7 +1079,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1102,7 +1102,7 @@ metadata: name: pipelines-as-code-webhook-certs namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code # The data is populated at install time --- @@ -1111,7 +1111,7 @@ kind: ValidatingWebhookConfiguration metadata: name: validation.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code webhooks: - admissionReviewVersions: ["v1"] @@ -1144,7 +1144,7 @@ metadata: name: pipelines-as-code-config-observability namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code data: _example: | @@ -1300,7 +1300,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1318,7 +1318,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" spec: securityContext: runAsNonRoot: true @@ -1415,7 +1415,7 @@ metadata: namespace: pipelines-as-code labels: app: pipelines-as-code-controller - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1452,7 +1452,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1469,7 +1469,7 @@ spec: app.kubernetes.io/component: watcher app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app: pipelines-as-code-watcher spec: securityContext: @@ -1543,7 +1543,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code app: pipelines-as-code-watcher spec: @@ -1577,7 +1577,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1594,7 +1594,7 @@ spec: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" spec: securityContext: runAsNonRoot: true @@ -1650,7 +1650,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1685,7 +1685,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" pipelines-as-code/route: controller name: pipelines-as-code-controller namespace: pipelines-as-code @@ -1753,7 +1753,7 @@ metadata: name: pipelines-as-code-monitor namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code annotations: networkoperator.openshift.io/ignore-errors: "" @@ -1775,7 +1775,7 @@ metadata: name: pipelines-as-code-controller-monitor namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.1" + app.kubernetes.io/version: "v0.35.2" app.kubernetes.io/part-of: pipelines-as-code annotations: networkoperator.openshift.io/ignore-errors: "" From 238d2cfbfb99e7daa218eaaae24df2460a0de54b Mon Sep 17 00:00:00 2001 From: Chmouel Boudjnah Date: Wed, 16 Jul 2025 11:24:15 +0200 Subject: [PATCH 18/20] fix: Ignore non-Tekton resources errors reporting unmarshal non-Tekton custom resources, such as `ImageDigestMirrorSet`, or `StepActions` as PipelineRuns. This caused the PaC bot to post misleading "PipelineRun failure" comments on pull requests, even when all CI jobs were successful. This change updates the parsing logic to specifically check for and ignore non-Tekton resources found within the `.tekton` directory. By skipping these files, we prevent erroneous parsing errors and stop the bot from posting false-positive comments. This resolves the issue where developers were confused by failure notifications on otherwise successful PRs, improving the overall developer experience by reducing unnecessary noise. The fix ensures that comments are only posted for legitimate PipelineRun failures. Jira: https://issues.redhat.com/browse/SRVKP-8112 Signed-off-by: Chmouel Boudjnah --- pkg/pipelineascode/errors.go | 69 ++++++++++ pkg/pipelineascode/errors_test.go | 217 ++++++++++++++++++++++++++++++ pkg/pipelineascode/match.go | 53 -------- 3 files changed, 286 insertions(+), 53 deletions(-) create mode 100644 pkg/pipelineascode/errors.go create mode 100644 pkg/pipelineascode/errors_test.go diff --git a/pkg/pipelineascode/errors.go b/pkg/pipelineascode/errors.go new file mode 100644 index 000000000..d1aae06c0 --- /dev/null +++ b/pkg/pipelineascode/errors.go @@ -0,0 +1,69 @@ +package pipelineascode + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/v1alpha1" + pacerrors "github.com/openshift-pipelines/pipelines-as-code/pkg/errors" + "github.com/openshift-pipelines/pipelines-as-code/pkg/provider" + tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "go.uber.org/zap" +) + +const validationErrorTemplate = `> [!CAUTION] +> There are some errors in your PipelineRun template. + +| PipelineRun | Error | +|------|-------|` + +var regexpIgnoreErrors = regexp.MustCompile(`.*no kind.*is registered for version.*in scheme.*`) + +func (p *PacRun) checkAccessOrErrror(ctx context.Context, repo *v1alpha1.Repository, status provider.StatusOpts, viamsg string) (bool, error) { + allowed, err := p.vcx.IsAllowed(ctx, p.event) + if err != nil { + return false, fmt.Errorf("unable to verify event authorization: %w", err) + } + if allowed { + return true, nil + } + msg := fmt.Sprintf("User %s is not allowed to trigger CI %s in this repo.", p.event.Sender, viamsg) + if p.event.AccountID != "" { + msg = fmt.Sprintf("User: %s AccountID: %s is not allowed to trigger CI %s in this repo.", p.event.Sender, p.event.AccountID, viamsg) + } + p.eventEmitter.EmitMessage(repo, zap.InfoLevel, "RepositoryPermissionDenied", msg) + status.Text = msg + + if err := p.vcx.CreateStatus(ctx, p.event, status); err != nil { + return false, fmt.Errorf("failed to run create status, user is not allowed to run the CI:: %w", err) + } + return false, nil +} + +// reportValidationErrors reports validation errors found in PipelineRuns by: +// 1. Creating error messages for each validation error +// 2. Emitting error messages to the event system +// 3. Creating a markdown formatted comment on the repository with all errors. +func (p *PacRun) reportValidationErrors(ctx context.Context, repo *v1alpha1.Repository, validationErrors []*pacerrors.PacYamlValidations) { + errorRows := make([]string, 0, len(validationErrors)) + for _, err := range validationErrors { + // if the error is a TektonConversionError, we don't want to report it since it may be a file that is not a tekton resource + // and we don't want to report it as a validation error. + if !regexpIgnoreErrors.MatchString(err.Err.Error()) && (strings.HasPrefix(err.Schema, tektonv1.SchemeGroupVersion.Group) || err.Schema == pacerrors.GenericBadYAMLValidation) { + errorRows = append(errorRows, fmt.Sprintf("| %s | `%s` |", err.Name, err.Err.Error())) + } + p.eventEmitter.EmitMessage(repo, zap.ErrorLevel, "PipelineRunValidationErrors", + fmt.Sprintf("cannot read the PipelineRun: %s, error: %s", err.Name, err.Err.Error())) + } + if len(errorRows) == 0 { + return + } + markdownErrMessage := fmt.Sprintf(`%s +%s`, validationErrorTemplate, strings.Join(errorRows, "\n")) + if err := p.vcx.CreateComment(ctx, p.event, markdownErrMessage, validationErrorTemplate); err != nil { + p.eventEmitter.EmitMessage(repo, zap.ErrorLevel, "PipelineRunCommentCreationError", + fmt.Sprintf("failed to create comment: %s", err.Error())) + } +} diff --git a/pkg/pipelineascode/errors_test.go b/pkg/pipelineascode/errors_test.go new file mode 100644 index 000000000..5a89603e6 --- /dev/null +++ b/pkg/pipelineascode/errors_test.go @@ -0,0 +1,217 @@ +package pipelineascode + +import ( + "context" + "errors" + "testing" + + "github.com/openshift-pipelines/pipelines-as-code/pkg/apis/pipelinesascode/v1alpha1" + pacerrors "github.com/openshift-pipelines/pipelines-as-code/pkg/errors" + "github.com/openshift-pipelines/pipelines-as-code/pkg/events" + "github.com/openshift-pipelines/pipelines-as-code/pkg/params/info" + "github.com/openshift-pipelines/pipelines-as-code/pkg/provider" + testclient "github.com/openshift-pipelines/pipelines-as-code/pkg/test/clients" + testprovider "github.com/openshift-pipelines/pipelines-as-code/pkg/test/provider" + "go.uber.org/zap" + zapobserver "go.uber.org/zap/zaptest/observer" + "gotest.tools/v3/assert" + rtesting "knative.dev/pkg/reconciler/testing" +) + +func TestCheckAccessOrErrror(t *testing.T) { + tests := []struct { + name string + allowIt bool + sender string + accountID string + createStatusError bool + expectedErr bool + expectedAllowed bool + expectedErrMsg string + }{ + { + name: "user is allowed", + allowIt: true, + expectedAllowed: true, + }, + { + name: "user is not allowed - no account ID", + allowIt: false, + sender: "johndoe", + expectedAllowed: false, + }, + { + name: "user is not allowed - with account ID", + allowIt: false, + sender: "johndoe", + accountID: "user123", + expectedAllowed: false, + }, + { + name: "create status error", + allowIt: false, + sender: "johndoe", + createStatusError: true, + expectedErr: true, + expectedErrMsg: "failed to run create status", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup observer to capture logs + observerCore, _ := zapobserver.New(zap.InfoLevel) + logger := zap.New(observerCore).Sugar() + + // Create test event + testEvent := &info.Event{ + Sender: tt.sender, + AccountID: tt.accountID, + } + + // Create mock provider + prov := &testprovider.TestProviderImp{ + AllowIT: tt.allowIt, + } + + // Set createStatus error if needed + if tt.createStatusError { + prov.CreateStatusErorring = true + } + + ctx, _ := rtesting.SetupFakeContext(t) + stdata, _ := testclient.SeedTestData(t, ctx, testclient.Data{}) + // Create mock event emitter + eventEmitter := events.NewEventEmitter(stdata.Kube, logger) + + // Create PacRun + p := &PacRun{ + event: testEvent, + vcx: prov, + logger: logger, + eventEmitter: eventEmitter, + } + + // Call the function + repo := &v1alpha1.Repository{} + status := provider.StatusOpts{} + allowed, err := p.checkAccessOrErrror(context.Background(), repo, status, "via test") + + // Verify results + if tt.expectedErr { + assert.Assert(t, err != nil, "Expected error but got nil") + if tt.expectedErrMsg != "" { + assert.Assert(t, err.Error() != "", "Expected error message but got empty string") + assert.ErrorContains(t, err, tt.expectedErrMsg) + } + } else { + assert.NilError(t, err) + } + + assert.Equal(t, tt.expectedAllowed, allowed) + }) + } +} + +func TestReportValidationErrors(t *testing.T) { + tests := []struct { + name string + validationErrors []*pacerrors.PacYamlValidations + expectCommentCreation bool + }{ + { + name: "no validation errors", + validationErrors: []*pacerrors.PacYamlValidations{}, + expectCommentCreation: false, + }, + { + name: "tekton validation errors", + validationErrors: []*pacerrors.PacYamlValidations{ + { + Name: "test-pipeline-1", + Err: errors.New("invalid pipeline spec"), + Schema: "tekton.dev", + }, + }, + expectCommentCreation: true, + }, + { + name: "non-tekton schema errors", + validationErrors: []*pacerrors.PacYamlValidations{ + { + Name: "test-other", + Err: errors.New("some error"), + Schema: "other.schema", + }, + }, + expectCommentCreation: false, + }, + { + name: "ignored errors by regex", + validationErrors: []*pacerrors.PacYamlValidations{ + { + Name: "test-ignored", + Err: errors.New("no kind test is registered for version v1 in scheme"), + Schema: "tekton.dev", + }, + }, + expectCommentCreation: false, + }, + { + name: "create comment error", + validationErrors: []*pacerrors.PacYamlValidations{ + { + Name: "test-pipeline", + Err: errors.New("validation error"), + Schema: "tekton.dev", + }, + }, + expectCommentCreation: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup observer to capture logs + observerCore, logs := zapobserver.New(zap.InfoLevel) + logger := zap.New(observerCore).Sugar() + ctx, _ := rtesting.SetupFakeContext(t) + + // Create test event + testEvent := &info.Event{} + + // Create mock provider + prov := &testprovider.TestProviderImp{} + + stdata, _ := testclient.SeedTestData(t, ctx, testclient.Data{}) + + // Create mock event emitter + eventEmitter := events.NewEventEmitter(stdata.Kube, logger) + // Create PacRun + p := &PacRun{ + event: testEvent, + vcx: prov, + logger: logger, + eventEmitter: eventEmitter, + } + + // Call the function + repo := &v1alpha1.Repository{} + p.reportValidationErrors(context.Background(), repo, tt.validationErrors) + + // Verify results + // Verify log messages for validation errors + logEntries := logs.All() + errorLogCount := 0 + for _, entry := range logEntries { + if entry.Level == zap.ErrorLevel { + errorLogCount++ + } + } + + // We should have at least one error log per validation error + assert.Assert(t, errorLogCount >= len(tt.validationErrors), + "Expected at least %d error logs, got %d", len(tt.validationErrors), errorLogCount) + }) + } +} diff --git a/pkg/pipelineascode/match.go b/pkg/pipelineascode/match.go index 6859dfe46..f9f849b9d 100644 --- a/pkg/pipelineascode/match.go +++ b/pkg/pipelineascode/match.go @@ -22,12 +22,6 @@ import ( "go.uber.org/zap" ) -const validationErrorTemplate = `> [!CAUTION] -> There are some errors in your PipelineRun template. - -| PipelineRun | Error | -|------|-------|` - func (p *PacRun) matchRepoPR(ctx context.Context) ([]matcher.Match, *v1alpha1.Repository, error) { repo, err := p.verifyRepoAndUser(ctx) if err != nil { @@ -432,27 +426,6 @@ func (p *PacRun) checkNeedUpdate(_ string) (string, bool) { return "", false } -func (p *PacRun) checkAccessOrErrror(ctx context.Context, repo *v1alpha1.Repository, status provider.StatusOpts, viamsg string) (bool, error) { - allowed, err := p.vcx.IsAllowed(ctx, p.event) - if err != nil { - return false, fmt.Errorf("unable to verify event authorization: %w", err) - } - if allowed { - return true, nil - } - msg := fmt.Sprintf("User %s is not allowed to trigger CI %s in this repo.", p.event.Sender, viamsg) - if p.event.AccountID != "" { - msg = fmt.Sprintf("User: %s AccountID: %s is not allowed to trigger CI %s in this repo.", p.event.Sender, p.event.AccountID, viamsg) - } - p.eventEmitter.EmitMessage(repo, zap.InfoLevel, "RepositoryPermissionDenied", msg) - status.Text = msg - - if err := p.vcx.CreateStatus(ctx, p.event, status); err != nil { - return false, fmt.Errorf("failed to run create status, user is not allowed to run the CI:: %w", err) - } - return false, nil -} - func (p *PacRun) createNeutralStatus(ctx context.Context) error { status := provider.StatusOpts{ Status: CompletedStatus, @@ -467,29 +440,3 @@ func (p *PacRun) createNeutralStatus(ctx context.Context) error { return nil } - -// reportValidationErrors reports validation errors found in PipelineRuns by: -// 1. Creating error messages for each validation error -// 2. Emitting error messages to the event system -// 3. Creating a markdown formatted comment on the repository with all errors. -func (p *PacRun) reportValidationErrors(ctx context.Context, repo *v1alpha1.Repository, validationErrors []*pacerrors.PacYamlValidations) { - errorRows := make([]string, 0, len(validationErrors)) - for _, err := range validationErrors { - // if the error is a TektonConversionError, we don't want to report it since it may be a file that is not a tekton resource - // and we don't want to report it as a validation error. - if strings.HasPrefix(err.Schema, tektonv1.SchemeGroupVersion.Group) || err.Schema == pacerrors.GenericBadYAMLValidation { - errorRows = append(errorRows, fmt.Sprintf("| %s | `%s` |", err.Name, err.Err.Error())) - } - p.eventEmitter.EmitMessage(repo, zap.ErrorLevel, "PipelineRunValidationErrors", - fmt.Sprintf("cannot read the PipelineRun: %s, error: %s", err.Name, err.Err.Error())) - } - if len(errorRows) == 0 { - return - } - markdownErrMessage := fmt.Sprintf(`%s -%s`, validationErrorTemplate, strings.Join(errorRows, "\n")) - if err := p.vcx.CreateComment(ctx, p.event, markdownErrMessage, validationErrorTemplate); err != nil { - p.eventEmitter.EmitMessage(repo, zap.ErrorLevel, "PipelineRunCommentCreationError", - fmt.Sprintf("failed to create comment: %s", err.Error())) - } -} From 793677d69df066d2328347b76dcd2aa95b32c138 Mon Sep 17 00:00:00 2001 From: Ben Dronen Date: Tue, 8 Jul 2025 09:37:33 -0400 Subject: [PATCH 19/20] fix(homebrew): goreleaser issue and cask install doc Signed-off-by: Ben Dronen --- .goreleaser.yml | 1 - docs/content/docs/guide/cli.md | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 07c709b2d..954e29133 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -79,7 +79,6 @@ homebrew_casks: repository: owner: openshift-pipelines name: homebrew-pipelines-as-code - directory: Formula dependencies: - formula: tektoncd-cli - formula: git diff --git a/docs/content/docs/guide/cli.md b/docs/content/docs/guide/cli.md index 39bd83c00..15d8e90c2 100644 --- a/docs/content/docs/guide/cli.md +++ b/docs/content/docs/guide/cli.md @@ -44,13 +44,13 @@ On Windows, tkn-pac will look for the Kubernetes config in `%USERPROFILE%\.kube\ tkn pac plug-in is available from HomeBrew as a "Tap". You simply need to run this command to install it: ```shell -brew install openshift-pipelines/pipelines-as-code/tektoncd-pac +brew install --cask openshift-pipelines/pipelines-as-code/tektoncd-pac ``` and if you need to upgrade it: ```shell -brew upgrade openshift-pipelines/pipelines-as-code/tektoncd-pac +brew upgrade --cask openshift-pipelines/pipelines-as-code/tektoncd-pac ``` `tkn pac` plug-in is compatible with [Homebrew on Linux](https://docs.brew.sh/Homebrew-on-Linux) From 842f13c659870f5bb59c94fd2e92f44b18dbe02a Mon Sep 17 00:00:00 2001 From: Pipelines as Code CI Robot Date: Wed, 16 Jul 2025 18:12:40 +0000 Subject: [PATCH 20/20] Release yaml generated from https://github.com/openshift-pipelines/pipelines-as-code/commit/793677d69df066d2328347b76dcd2aa95b32c138 for release v0.35.3 --- docs/content/ALLVERSIONS | 2 +- pkg/params/version/version.txt | 2 +- release.k8s.yaml | 70 +++++++++++++++---------------- release.yaml | 76 +++++++++++++++++----------------- 4 files changed, 75 insertions(+), 75 deletions(-) diff --git a/docs/content/ALLVERSIONS b/docs/content/ALLVERSIONS index ce4f3d5db..3f3e26de7 100644 --- a/docs/content/ALLVERSIONS +++ b/docs/content/ALLVERSIONS @@ -1 +1 @@ -nightly,stable,v0.36.0,v0.35.2,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 +nightly,stable,v0.36.0,v0.35.3,v0.34.0,v0.33.2,v0.32.0,v0.31.0,v0.30.0,v0.29.1,v0.28.2,v0.27.2,v0.26.0,v0.25.0,v0.24.7,v0.23.0,v0.22.6,v0.21.5,v0.20.0,v0.19.6,v0.18.0,v0.17.7,v0.16.0,v0.15.6,v0.14.3,v0.13.1,v0.12.0,v0.11.1 diff --git a/pkg/params/version/version.txt b/pkg/params/version/version.txt index 84ee7160a..05edec9d6 100644 --- a/pkg/params/version/version.txt +++ b/pkg/params/version/version.txt @@ -1 +1 @@ -v0.35.2 +v0.35.3 diff --git a/release.k8s.yaml b/release.k8s.yaml index 6af7ea9ec..b02b84508 100644 --- a/release.k8s.yaml +++ b/release.k8s.yaml @@ -17,7 +17,7 @@ kind: Namespace metadata: name: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code openshift.io/cluster-monitoring: "true" @@ -42,7 +42,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -61,7 +61,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -78,7 +78,7 @@ kind: ClusterRole metadata: name: pipelines-as-code-aggregate labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rbac.authorization.k8s.io/aggregate-to-edit: "true" @@ -118,7 +118,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -128,7 +128,7 @@ metadata: name: pipelines-as-code-controller-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -142,7 +142,7 @@ metadata: name: pipelines-as-code-controller-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -159,7 +159,7 @@ metadata: name: pipeline-as-code-controller-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -185,7 +185,7 @@ metadata: name: pipelines-as-code-controller-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -217,7 +217,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -227,7 +227,7 @@ metadata: name: pipelines-as-code-watcher-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -244,7 +244,7 @@ metadata: name: pipelines-as-code-watcher-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -261,7 +261,7 @@ metadata: name: pipeline-as-code-watcher-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -290,7 +290,7 @@ metadata: name: pipelines-as-code-watcher-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -322,7 +322,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -332,7 +332,7 @@ metadata: name: pipelines-as-code-webhook-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -360,7 +360,7 @@ metadata: name: pipelines-as-code-webhook-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -377,7 +377,7 @@ metadata: name: pipeline-as-code-webhook-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -402,7 +402,7 @@ metadata: name: pipelines-as-code-webhook-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -434,7 +434,7 @@ kind: CustomResourceDefinition metadata: name: repositories.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code spec: @@ -1032,7 +1032,7 @@ metadata: name: pipelines-as-code namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1058,7 +1058,7 @@ metadata: apiVersion: v1 data: # pipelines as code controller version - version: "v0.35.2" + version: "v0.35.3" # controller url to be used for configuring webhook using cli controller-url: "" @@ -1073,7 +1073,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1096,7 +1096,7 @@ metadata: name: pipelines-as-code-webhook-certs namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code # The data is populated at install time --- @@ -1105,7 +1105,7 @@ kind: ValidatingWebhookConfiguration metadata: name: validation.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code webhooks: - admissionReviewVersions: ["v1"] @@ -1138,7 +1138,7 @@ metadata: name: pipelines-as-code-config-observability namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code data: _example: | @@ -1294,7 +1294,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1312,7 +1312,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" spec: securityContext: runAsNonRoot: true @@ -1409,7 +1409,7 @@ metadata: namespace: pipelines-as-code labels: app: pipelines-as-code-controller - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1446,7 +1446,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1463,7 +1463,7 @@ spec: app.kubernetes.io/component: watcher app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app: pipelines-as-code-watcher spec: securityContext: @@ -1537,7 +1537,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code app: pipelines-as-code-watcher spec: @@ -1571,7 +1571,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1588,7 +1588,7 @@ spec: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" spec: securityContext: runAsNonRoot: true @@ -1644,7 +1644,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: ports: diff --git a/release.yaml b/release.yaml index 958f9a7e6..c4d7d05e0 100644 --- a/release.yaml +++ b/release.yaml @@ -17,7 +17,7 @@ kind: Namespace metadata: name: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code openshift.io/cluster-monitoring: "true" @@ -42,7 +42,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -61,7 +61,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -78,7 +78,7 @@ kind: ClusterRole metadata: name: pipelines-as-code-aggregate labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rbac.authorization.k8s.io/aggregate-to-edit: "true" @@ -118,7 +118,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -128,7 +128,7 @@ metadata: name: pipelines-as-code-controller-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -142,7 +142,7 @@ metadata: name: pipelines-as-code-controller-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -159,7 +159,7 @@ metadata: name: pipeline-as-code-controller-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -188,7 +188,7 @@ metadata: name: pipelines-as-code-controller-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -220,7 +220,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -230,7 +230,7 @@ metadata: name: pipelines-as-code-watcher-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -247,7 +247,7 @@ metadata: name: pipelines-as-code-watcher-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -264,7 +264,7 @@ metadata: name: pipeline-as-code-watcher-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -296,7 +296,7 @@ metadata: name: pipelines-as-code-watcher-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -328,7 +328,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code --- @@ -338,7 +338,7 @@ metadata: name: pipelines-as-code-webhook-role namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -366,7 +366,7 @@ metadata: name: pipelines-as-code-webhook-binding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -383,7 +383,7 @@ metadata: name: pipeline-as-code-webhook-clusterrole namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code rules: @@ -408,7 +408,7 @@ metadata: name: pipelines-as-code-webhook-clusterbinding namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code subjects: @@ -440,7 +440,7 @@ kind: CustomResourceDefinition metadata: name: repositories.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code spec: @@ -1038,7 +1038,7 @@ metadata: name: pipelines-as-code namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1064,7 +1064,7 @@ metadata: apiVersion: v1 data: # pipelines as code controller version - version: "v0.35.2" + version: "v0.35.3" # controller url to be used for configuring webhook using cli controller-url: "" @@ -1079,7 +1079,7 @@ metadata: name: pipelines-as-code-info namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code --- @@ -1102,7 +1102,7 @@ metadata: name: pipelines-as-code-webhook-certs namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code # The data is populated at install time --- @@ -1111,7 +1111,7 @@ kind: ValidatingWebhookConfiguration metadata: name: validation.pipelinesascode.tekton.dev labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code webhooks: - admissionReviewVersions: ["v1"] @@ -1144,7 +1144,7 @@ metadata: name: pipelines-as-code-config-observability namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code data: _example: | @@ -1300,7 +1300,7 @@ metadata: name: pipelines-as-code-controller namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1318,7 +1318,7 @@ spec: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" spec: securityContext: runAsNonRoot: true @@ -1415,7 +1415,7 @@ metadata: namespace: pipelines-as-code labels: app: pipelines-as-code-controller - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1452,7 +1452,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1469,7 +1469,7 @@ spec: app.kubernetes.io/component: watcher app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app: pipelines-as-code-watcher spec: securityContext: @@ -1543,7 +1543,7 @@ metadata: name: pipelines-as-code-watcher namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code app: pipelines-as-code-watcher spec: @@ -1577,7 +1577,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: replicas: 1 @@ -1594,7 +1594,7 @@ spec: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" spec: securityContext: runAsNonRoot: true @@ -1650,7 +1650,7 @@ metadata: name: pipelines-as-code-webhook namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code spec: ports: @@ -1685,7 +1685,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: pipelines-as-code - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" pipelines-as-code/route: controller name: pipelines-as-code-controller namespace: pipelines-as-code @@ -1753,7 +1753,7 @@ metadata: name: pipelines-as-code-monitor namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code annotations: networkoperator.openshift.io/ignore-errors: "" @@ -1775,7 +1775,7 @@ metadata: name: pipelines-as-code-controller-monitor namespace: pipelines-as-code labels: - app.kubernetes.io/version: "v0.35.2" + app.kubernetes.io/version: "v0.35.3" app.kubernetes.io/part-of: pipelines-as-code annotations: networkoperator.openshift.io/ignore-errors: ""