From 563483f60876dd0d4f08237852e49b3ac2400ced Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Tue, 7 Oct 2025 10:21:58 +0200 Subject: [PATCH 01/20] WIP: Trying hive. Take #3 ;-) --- .../pipelines/e2e-main-pipeline.yaml | 4 +- integration-tests/pipelines/tssc-cli-e2e.yaml | 258 +++++++++++++----- integration-tests/tasks/start-pipelines.yaml | 2 +- 3 files changed, 194 insertions(+), 70 deletions(-) diff --git a/integration-tests/pipelines/e2e-main-pipeline.yaml b/integration-tests/pipelines/e2e-main-pipeline.yaml index f5a43dc41..d9fc8f693 100644 --- a/integration-tests/pipelines/e2e-main-pipeline.yaml +++ b/integration-tests/pipelines/e2e-main-pipeline.yaml @@ -64,9 +64,9 @@ spec: resolver: git params: - name: url - value: https://github.com/redhat-appstudio/tssc-cli.git + value: https://github.com/rhopp/tssc-cli.git - name: revision - value: main + value: hive-try3 - name: pathInRepo value: integration-tests/tasks/start-pipelines.yaml params: diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 72e9ae6e4..81630d9ff 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -47,46 +47,175 @@ spec: description: 'Optional testplan.json content encoded in base64 format. If not provided, testplan will be downloaded from the repository.' default: "" tasks: - - name: rosa-hcp-metadata - taskRef: - resolver: git - params: - - name: url - value: https://github.com/konflux-ci/tekton-integration-catalog.git - - name: revision - value: main - - name: pathInRepo - value: tasks/rosa/hosted-cp/rosa-hcp-metadata/0.1/rosa-hcp-metadata.yaml - - name: provision-rosa - runAfter: - - rosa-hcp-metadata - taskRef: - resolver: git - params: - - name: url - value: https://github.com/konflux-ci/tekton-integration-catalog.git - - name: revision - value: main - - name: pathInRepo - value: tasks/rosa/hosted-cp/rosa-hcp-provision/0.2/rosa-hcp-provision.yaml - params: - - name: cluster-name - value: "$(tasks.rosa-hcp-metadata.results.cluster-name)" - - name: ocp-version - value: "$(params.ocp-version)" - - name: replicas - value: "$(params.replicas)" - - name: machine-type - value: "$(params.machine-type)" - - name: konflux-test-infra-secret - value: "$(params.konflux-test-infra-secret)" - - name: cloud-credential-key - value: "$(params.cloud-credential-key)" - - name: oci-container - value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" + - name: provision-cluster + taskSpec: + results: + - name: ocp-login-command + value: "$(steps.claim-cluster.results.ocp-login-command)" + volumes: + - name: hive-creds-volume + secret: + secretName: rhopp-test + - name: credentials + emptyDir: {} + steps: + - name: claim-cluster + image: registry.redhat.io/openshift4/ose-cli@sha256:15da03b04318bcc842060b71e9dd6d6c2595edb4e8fdd11b0c6781eeb03ca182 + volumeMounts: + - name: hive-creds-volume + mountPath: /usr/local/hive-creds + results: + - name: ocp-login-command + description: "Ocp login command" + script: | + #!/usr/bin/bash + oc login $(cat /usr/local/hive-creds/kube_api_url) -u cluster-admin -p $(cat /usr/local/hive-creds/password) + oc whoami + oc get clusterpool -n hive + oc create -f - < $(step.results.ocp-login-command.path) + + kubeconfig_secret=$(oc get clusterdeployment -n $cp_namespace $cp_namespace -o jsonpath={.spec.clusterMetadata.adminKubeconfigSecretRef.name}) + + oc get secret -n $cp_namespace $kubeconfig_secret -o jsonpath={.data.kubeconfig} |base64 -d > /tmp/ephemereal.config + export KUBECONFIG=/tmp/ephemereal.config + csr_max_retries=5 + csr_sleep_duration=10 + approved_csrs=false + + console_max_retries=30 + console_sleep_duration=10 + console_connect_timeout=10 + console_accessible=false + + echo "--- Starting CSR Approval Process ---" + for ((i=1; i<=csr_max_retries; i++)); do + echo "CSR Attempt $i of $csr_max_retries: Checking for pending CSRs..." + if ! oc get csr 2>/dev/null | grep -i Pending; then + echo "No pending CSRs found. Continuing" + approved_csrs=true + break + else + echo "There are pending CSRs. That probably means cluster was hibernated for more than 24 hours. Need to approve them (until OCPBUGS-55339 is resolved)" + if oc get csr -oname | xargs oc adm certificate approve; then + echo "Successfully submitted approval for CSRs on attempt $i." + sleep 2 # Small delay for changes to propagate + if ! oc get csr 2>/dev/null | grep -i Pending; then + echo "Confirmed no pending CSRs after approval." + approved_csrs=true + break + else + echo "Pending CSRs still exist after approval attempt $i." + fi + else + echo "Failed to run approval command for CSRs on attempt $i." + fi + fi + + if [[ "$i" -lt "$csr_max_retries" ]]; then + echo "Sleeping for $csr_sleep_duration seconds before next CSR retry..." + sleep "$csr_sleep_duration" + fi + done + + if [[ "$approved_csrs" == "true" ]]; then + echo "CSR check and approval process completed successfully." + else + echo "Failed to ensure all pending CSRs were approved after $csr_max_retries attempts." + exit 1 + fi + echo "--- CSR Approval Process Finished ---" + + # --- Console URL Accessibility Check --- + echo "--- Starting Console Accessibility Check ---" + + + oc whoami + console_url=$(oc whoami --show-console) + echo "Console URL: $console_url" + # # Check if routes are available (OpenShift-specific resource) + # echo "Checking if routes are available..." + # if ! oc api-resources | grep -q "routes"; then + # echo "Warning: Routes are not available. This might not be an OpenShift cluster or it's not fully ready." + # echo "Waiting for OpenShift components to be ready..." + # sleep 30 + # if ! oc api-resources | grep -q "routes"; then + # echo "Error: Routes still not available. This doesn't appear to be an OpenShift cluster." + # exit 1 + # fi + # fi + + # # Check if openshift-console namespace exists + # echo "Checking if openshift-console namespace exists..." + # if ! oc get namespace openshift-console &>/dev/null; then + # echo "Error: openshift-console namespace not found." + # exit 1 + # fi + + # # Wait for console route to be available + # echo "Waiting for console route to be available..." + # for ((k=1; k<=10; k++)); do + # if oc get route console -n openshift-console &>/dev/null; then + # echo "Console route found." + # break + # fi + # echo "Console route not found, attempt $k/10. Waiting 30 seconds..." + # sleep 30 + # done + + # console_url="https://$(oc get route console -n openshift-console -o jsonpath='{.spec.host}' 2>/dev/null)" + + if [[ -z "$console_url" ]]; then + echo "Error: Could not retrieve OpenShift console URL." + exit 1 + else + echo "Console URL found: $console_url" + for ((j=1; j<=console_max_retries; j++)); do + echo "Console Check Attempt $j of $console_max_retries: Checking console URL accessibility..." + if curl -k --silent --output /dev/null --head --fail --connect-timeout "$console_connect_timeout" "$console_url"; then + echo "Console URL $console_url is accessible (HTTP 2xx)." + console_accessible=true + break + else + curl_exit_code=$? + echo "Console URL $console_url not accessible on attempt $j (curl exit code: $curl_exit_code)." + fi + + if [[ "$j" -lt "$console_max_retries" ]]; then + echo "Sleeping for $console_sleep_duration seconds before next console check retry..." + sleep "$console_sleep_duration" + fi + done + + if [[ "$console_accessible" == "true" ]]; then + echo "Console is ready. Continuing." + else + echo "Failed to access console URL $console_url after $console_max_retries attempts." + exit 1 + fi + fi + echo "--- Console Accessibility Check Finished ---" - name: tssc-install runAfter: - - provision-rosa + - provision-cluster taskRef: resolver: git params: @@ -98,7 +227,7 @@ spec: value: integration-tests/tasks/tssc-install.yaml params: - name: ocp-login-command - value: "$(tasks.provision-rosa.results.ocp-login-command)" + value: "$(tasks.provision-cluster.results.ocp-login-command)" - name: job-spec value: "$(params.job-spec)" - name: rhads-config @@ -119,7 +248,7 @@ spec: value: tasks/sprayproxy/sprayproxy-provision/0.1/sprayproxy-provision.yaml params: - name: ocp-login-command - value: "$(tasks.provision-rosa.results.ocp-login-command)" + value: "$(tasks.provision-cluster.results.ocp-login-command)" - name: tssc-e2e-tests runAfter: - sprayproxy-provision @@ -136,7 +265,7 @@ spec: - name: job-spec value: $(params.job-spec) - name: ocp-login-command - value: "$(tasks.provision-rosa.results.ocp-login-command)" + value: "$(tasks.provision-cluster.results.ocp-login-command)" - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image @@ -160,7 +289,7 @@ spec: - name: job-spec value: $(params.job-spec) - name: ocp-login-command - value: "$(tasks.provision-rosa.results.ocp-login-command)" + value: "$(tasks.provision-cluster.results.ocp-login-command)" - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image @@ -168,31 +297,26 @@ spec: - name: testplan value: $(params.testplan) finally: - - name: deprovision-rosa-collect-artifacts - taskRef: - resolver: git - params: - - name: url - value: https://github.com/konflux-ci/tekton-integration-catalog.git - - name: revision - value: main - - name: pathInRepo - value: tasks/rosa/hosted-cp/rosa-hcp-deprovision/0.2/rosa-hcp-deprovision.yaml - params: - - name: test-name - value: $(context.pipelineRun.name) - - name: ocp-login-command - value: "$(tasks.provision-rosa.results.ocp-login-command)" - - name: oci-container - value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - - name: cluster-name - value: "$(tasks.rosa-hcp-metadata.results.cluster-name)" - - name: konflux-test-infra-secret - value: "$(params.konflux-test-infra-secret)" - - name: cloud-credential-key - value: "$(params.cloud-credential-key)" - - name: pipeline-aggregate-status - value: "$(tasks.status)" + - name: deprovision-cluster + taskSpec: + volumes: + - name: hive-creds-volume + secret: + secretName: rhopp-test + - name: credentials + emptyDir: {} + steps: + - name: deprovision-cluster + image: registry.redhat.io/openshift4/ose-cli@sha256:15da03b04318bcc842060b71e9dd6d6c2595edb4e8fdd11b0c6781eeb03ca182 + volumeMounts: + - name: hive-creds-volume + mountPath: /usr/local/hive-creds + script: | + #!/usr/bin/bash + set -x + oc login $(cat /usr/local/hive-creds/kube_api_url) -u cluster-admin -p $(cat /usr/local/hive-creds/password) + oc whoami + oc delete clusterclaims.hive.openshift.io $(context.pipelineRun.name) -n hive - name: sprayproxy-deprovision when: - input: "$(tasks.sprayproxy-provision.status)" diff --git a/integration-tests/tasks/start-pipelines.yaml b/integration-tests/tasks/start-pipelines.yaml index b156d7ea0..15005d7b3 100644 --- a/integration-tests/tasks/start-pipelines.yaml +++ b/integration-tests/tasks/start-pipelines.yaml @@ -166,7 +166,7 @@ spec: "--serviceaccount" "konflux-integration-runner" ) echo "Starting pipeline with testplan (${#testplan_b64} chars): ${testplan_b64:0:50}..." - pipeline_run=$(tkn pipeline start -f https://raw.githubusercontent.com/$REPO_ORG/tssc-cli/refs/heads/$BRANCH/integration-tests/pipelines/tssc-cli-e2e.yaml "${tkn_params[@]}") + pipeline_run=$(tkn pipeline start -f https://raw.githubusercontent.com/rhopp/tssc-cli/refs/heads/hive-try3/integration-tests/pipelines/tssc-cli-e2e.yaml "${tkn_params[@]}") # Construct console URL for the new PipelineRun CONSOLE_URL="${KONFLUX_URL}/ns/${KONFLUX_NAMESPACE}/applications/${KONFLUX_APPLICATION_NAME}/pipelineruns/${pipeline_run}" echo "Started new pipelinerun: ${CONSOLE_URL}" From 1f775f254e9c36cb8c88cdc8999c16863402fe98 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Tue, 7 Oct 2025 10:34:28 +0200 Subject: [PATCH 02/20] fix result reference --- integration-tests/pipelines/tssc-cli-e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 81630d9ff..fe947faf7 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -334,7 +334,7 @@ spec: value: tasks/sprayproxy/sprayproxy-deprovision/0.1/sprayproxy-deprovision.yaml params: - name: ocp-login-command - value: "$(tasks.provision-rosa.results.ocp-login-command)" + value: "$(tasks.provision-cluster.results.ocp-login-command)" - name: pull-request-status-message taskRef: resolver: git From 0ab69989a57570e4e29ec720ca1508b828447f87 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 8 Oct 2025 14:51:31 +0200 Subject: [PATCH 03/20] increase timeout --- integration-tests/pipelines/tssc-cli-e2e.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index fe947faf7..8f738aece 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -81,9 +81,9 @@ spec: spec: clusterPoolName: clusterpool EOF - ## wait for cluster for up to 30 minutes - if ! kubectl wait --for=condition=ClusterRunning clusterclaims.hive.openshift.io/$(context.pipelineRun.name) -n hive --timeout 30m; then - echo "Cluster failed to start in 30 minutes. Deleting clusterClaim" + ## wait for cluster for up to 60 minutes + if ! kubectl wait --for=condition=ClusterRunning clusterclaims.hive.openshift.io/$(context.pipelineRun.name) -n hive --timeout 60m; then + echo "Cluster failed to start in 60 minutes. Deleting clusterClaim" oc delete clusterclaims.hive.openshift.io/$(context.pipelineRun.name) -n hive exit 1 fi From d3ec2373a1768ac50e074d719f59388237eadcb0 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 8 Oct 2025 14:52:44 +0200 Subject: [PATCH 04/20] run just single pipeline --- integration-tests/config/rhads-config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/config/rhads-config b/integration-tests/config/rhads-config index bd22aeb92..8805b31e1 100644 --- a/integration-tests/config/rhads-config +++ b/integration-tests/config/rhads-config @@ -1,4 +1,4 @@ -OCP="4.17,4.18,4.19" +OCP="4.19" ACS="remote" REGISTRY="quay,artifactory,nexus" TPA="local" From bdea7849c09123050ab39499b32f4bc36cc62ebe Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 8 Oct 2025 14:53:54 +0200 Subject: [PATCH 05/20] switch clusterpoo --- integration-tests/pipelines/tssc-cli-e2e.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 8f738aece..750a415f1 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -71,7 +71,7 @@ spec: #!/usr/bin/bash oc login $(cat /usr/local/hive-creds/kube_api_url) -u cluster-admin -p $(cat /usr/local/hive-creds/password) oc whoami - oc get clusterpool -n hive + oc get rhopp-test-clusterpool -n hive oc create -f - < Date: Wed, 8 Oct 2025 17:58:49 +0200 Subject: [PATCH 06/20] Increase some timeout ;-) --- integration-tests/pipelines/tssc-cli-e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 750a415f1..438eca1a9 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -117,7 +117,7 @@ spec: echo "There are pending CSRs. That probably means cluster was hibernated for more than 24 hours. Need to approve them (until OCPBUGS-55339 is resolved)" if oc get csr -oname | xargs oc adm certificate approve; then echo "Successfully submitted approval for CSRs on attempt $i." - sleep 2 # Small delay for changes to propagate + sleep 60 # Small delay for changes to propagate if ! oc get csr 2>/dev/null | grep -i Pending; then echo "Confirmed no pending CSRs after approval." approved_csrs=true From 603106ab62afb27ace7bbfbdc0c395b410e2e5f5 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Thu, 9 Oct 2025 16:16:51 +0200 Subject: [PATCH 07/20] Disable tls in tests --- integration-tests/pipelines/tssc-cli-e2e.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 438eca1a9..799360632 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -256,9 +256,9 @@ spec: resolver: git params: - name: url - value: https://github.com/redhat-appstudio/tssc-test.git + value: https://github.com/rhopp/tssc-test.git - name: revision - value: main + value: disable_tls - name: pathInRepo value: integration-tests/tasks/tssc-e2e.yaml params: From 2710785552cc51f0e4c4785c4c8a118598810e2f Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Fri, 10 Oct 2025 13:44:12 +0200 Subject: [PATCH 08/20] Update tssc-test-image in e2e tests to use a specific self-signed image --- integration-tests/pipelines/tssc-cli-e2e.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 799360632..6328dc42d 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -269,7 +269,7 @@ spec: - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image - value: $(params.tssc-test-image) + value: "quay.io/rhopp/tssc-tests:self_signed" - name: testplan value: $(params.testplan) - name: rhtap-ui-tests @@ -293,7 +293,7 @@ spec: - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image - value: $(params.tssc-test-image) + value: "$(params.tssc-test-image)" - name: testplan value: $(params.testplan) finally: From 5dcbdb6510c63d3ee9ad4cb35d5dbb9c1131b65a Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 22 Oct 2025 17:21:10 +0200 Subject: [PATCH 09/20] try the convalescence logic multiple times --- integration-tests/pipelines/tssc-cli-e2e.yaml | 202 +++++++++--------- 1 file changed, 103 insertions(+), 99 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 6328dc42d..182ec9ecd 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -97,122 +97,126 @@ spec: oc get secret -n $cp_namespace $kubeconfig_secret -o jsonpath={.data.kubeconfig} |base64 -d > /tmp/ephemereal.config export KUBECONFIG=/tmp/ephemereal.config - csr_max_retries=5 - csr_sleep_duration=10 - approved_csrs=false - console_max_retries=30 - console_sleep_duration=10 - console_connect_timeout=10 - console_accessible=false + # --- Cluster Provisioning Retry Loop --- + provisioning_max_retries=3 + provisioning_successful=false - echo "--- Starting CSR Approval Process ---" - for ((i=1; i<=csr_max_retries; i++)); do - echo "CSR Attempt $i of $csr_max_retries: Checking for pending CSRs..." - if ! oc get csr 2>/dev/null | grep -i Pending; then - echo "No pending CSRs found. Continuing" - approved_csrs=true - break - else - echo "There are pending CSRs. That probably means cluster was hibernated for more than 24 hours. Need to approve them (until OCPBUGS-55339 is resolved)" - if oc get csr -oname | xargs oc adm certificate approve; then - echo "Successfully submitted approval for CSRs on attempt $i." - sleep 60 # Small delay for changes to propagate - if ! oc get csr 2>/dev/null | grep -i Pending; then - echo "Confirmed no pending CSRs after approval." - approved_csrs=true - break + for ((provisioning_attempt=1; provisioning_attempt<=provisioning_max_retries; provisioning_attempt++)); do + echo "=== Cluster Provisioning Attempt $provisioning_attempt of $provisioning_max_retries ===" + + # Reset flags for this attempt + csr_max_retries=5 + csr_sleep_duration=10 + approved_csrs=false + + console_max_retries=30 + console_sleep_duration=10 + console_connect_timeout=10 + console_accessible=false + + echo "--- Starting CSR Approval Process ---" + for ((i=1; i<=csr_max_retries; i++)); do + echo "CSR Attempt $i of $csr_max_retries: Checking for pending CSRs..." + if ! oc get csr 2>/dev/null | grep -i Pending; then + echo "No pending CSRs found. Continuing" + approved_csrs=true + break + else + echo "There are pending CSRs. That probably means cluster was hibernated for more than 24 hours. Need to approve them (until OCPBUGS-55339 is resolved)" + if oc get csr -oname | xargs oc adm certificate approve; then + echo "Successfully submitted approval for CSRs on attempt $i." + sleep 60 # Small delay for changes to propagate + if ! oc get csr 2>/dev/null | grep -i Pending; then + echo "Confirmed no pending CSRs after approval." + approved_csrs=true + break + else + echo "Pending CSRs still exist after approval attempt $i." + fi else - echo "Pending CSRs still exist after approval attempt $i." + echo "Failed to run approval command for CSRs on attempt $i." fi - else - echo "Failed to run approval command for CSRs on attempt $i." fi - fi - if [[ "$i" -lt "$csr_max_retries" ]]; then - echo "Sleeping for $csr_sleep_duration seconds before next CSR retry..." - sleep "$csr_sleep_duration" - fi - done + if [[ "$i" -lt "$csr_max_retries" ]]; then + echo "Sleeping for $csr_sleep_duration seconds before next CSR retry..." + sleep "$csr_sleep_duration" + fi + done - if [[ "$approved_csrs" == "true" ]]; then + if [[ "$approved_csrs" == "false" ]]; then + echo "Failed to ensure all pending CSRs were approved after $csr_max_retries attempts." + if [[ "$provisioning_attempt" -lt "$provisioning_max_retries" ]]; then + echo "Will retry entire provisioning process..." + continue + else + echo "All provisioning attempts exhausted. Exiting." + exit 1 + fi + fi echo "CSR check and approval process completed successfully." - else - echo "Failed to ensure all pending CSRs were approved after $csr_max_retries attempts." - exit 1 - fi - echo "--- CSR Approval Process Finished ---" - - # --- Console URL Accessibility Check --- - echo "--- Starting Console Accessibility Check ---" - + echo "--- CSR Approval Process Finished ---" - oc whoami - console_url=$(oc whoami --show-console) - echo "Console URL: $console_url" - # # Check if routes are available (OpenShift-specific resource) - # echo "Checking if routes are available..." - # if ! oc api-resources | grep -q "routes"; then - # echo "Warning: Routes are not available. This might not be an OpenShift cluster or it's not fully ready." - # echo "Waiting for OpenShift components to be ready..." - # sleep 30 - # if ! oc api-resources | grep -q "routes"; then - # echo "Error: Routes still not available. This doesn't appear to be an OpenShift cluster." - # exit 1 - # fi - # fi - - # # Check if openshift-console namespace exists - # echo "Checking if openshift-console namespace exists..." - # if ! oc get namespace openshift-console &>/dev/null; then - # echo "Error: openshift-console namespace not found." - # exit 1 - # fi - - # # Wait for console route to be available - # echo "Waiting for console route to be available..." - # for ((k=1; k<=10; k++)); do - # if oc get route console -n openshift-console &>/dev/null; then - # echo "Console route found." - # break - # fi - # echo "Console route not found, attempt $k/10. Waiting 30 seconds..." - # sleep 30 - # done + # --- Console URL Accessibility Check --- + echo "--- Starting Console Accessibility Check ---" - # console_url="https://$(oc get route console -n openshift-console -o jsonpath='{.spec.host}' 2>/dev/null)" + oc whoami + console_url=$(oc whoami --show-console) + echo "Console URL: $console_url" - if [[ -z "$console_url" ]]; then - echo "Error: Could not retrieve OpenShift console URL." - exit 1 - else - echo "Console URL found: $console_url" - for ((j=1; j<=console_max_retries; j++)); do - echo "Console Check Attempt $j of $console_max_retries: Checking console URL accessibility..." - if curl -k --silent --output /dev/null --head --fail --connect-timeout "$console_connect_timeout" "$console_url"; then - echo "Console URL $console_url is accessible (HTTP 2xx)." - console_accessible=true - break + if [[ -z "$console_url" ]]; then + echo "Error: Could not retrieve OpenShift console URL." + if [[ "$provisioning_attempt" -lt "$provisioning_max_retries" ]]; then + echo "Will retry entire provisioning process..." + continue else - curl_exit_code=$? - echo "Console URL $console_url not accessible on attempt $j (curl exit code: $curl_exit_code)." + echo "All provisioning attempts exhausted. Exiting." + exit 1 fi + else + echo "Console URL found: $console_url" + for ((j=1; j<=console_max_retries; j++)); do + echo "Console Check Attempt $j of $console_max_retries: Checking console URL accessibility..." + if curl -k --silent --output /dev/null --head --fail --connect-timeout "$console_connect_timeout" "$console_url"; then + echo "Console URL $console_url is accessible (HTTP 2xx)." + console_accessible=true + break + else + curl_exit_code=$? + echo "Console URL $console_url not accessible on attempt $j (curl exit code: $curl_exit_code)." + fi - if [[ "$j" -lt "$console_max_retries" ]]; then - echo "Sleeping for $console_sleep_duration seconds before next console check retry..." - sleep "$console_sleep_duration" - fi - done + if [[ "$j" -lt "$console_max_retries" ]]; then + echo "Sleeping for $console_sleep_duration seconds before next console check retry..." + sleep "$console_sleep_duration" + fi + done - if [[ "$console_accessible" == "true" ]]; then - echo "Console is ready. Continuing." - else - echo "Failed to access console URL $console_url after $console_max_retries attempts." - exit 1 + if [[ "$console_accessible" == "false" ]]; then + echo "Failed to access console URL $console_url after $console_max_retries attempts." + if [[ "$provisioning_attempt" -lt "$provisioning_max_retries" ]]; then + echo "Will retry entire provisioning process..." + continue + else + echo "All provisioning attempts exhausted. Exiting." + exit 1 + fi + fi fi + echo "Console is ready. Continuing." + echo "--- Console Accessibility Check Finished ---" + + # If we reach here, both CSR approval and console accessibility succeeded + provisioning_successful=true + echo "=== Cluster Provisioning Completed Successfully on Attempt $provisioning_attempt ===" + break + done + + if [[ "$provisioning_successful" == "false" ]]; then + echo "Cluster provisioning failed after $provisioning_max_retries attempts." + exit 1 fi - echo "--- Console Accessibility Check Finished ---" - name: tssc-install runAfter: - provision-cluster From eb45b46501ab35aedf8c07d9698bdc8409b13670 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 22 Oct 2025 17:49:14 +0200 Subject: [PATCH 10/20] Add retry loop for oc login with kubeadmin credentials MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a 5-minute retry loop (30 attempts with 10-second intervals) to ensure successful login to the provisioned cluster using kubeadmin credentials. This handles cases where the cluster API is accessible but authentication may not be immediately ready. The retry loop includes proper validation via 'oc whoami' and integrates with the existing provisioning retry logic. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- integration-tests/pipelines/tssc-cli-e2e.yaml | 45 ++++++++++++++++++- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 182ec9ecd..a8dd29084 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -207,12 +207,53 @@ spec: echo "Console is ready. Continuing." echo "--- Console Accessibility Check Finished ---" - # If we reach here, both CSR approval and console accessibility succeeded + # --- OC Login Retry Loop --- + echo "--- Starting OC Login Process ---" + login_max_retries=30 # 5 minutes with 10-second intervals + login_sleep_duration=10 + login_successful=false + + for ((k=1; k<=login_max_retries; k++)); do + echo "OC Login Attempt $k of $login_max_retries: Attempting to login with kubeadmin..." + if oc login --insecure-skip-tls-verify=true "$api_url" -u kubeadmin -p "$kubeadminpass" 2>&1; then + echo "Successfully logged in as kubeadmin on attempt $k." + if oc whoami 2>&1; then + echo "Confirmed login - current user: $(oc whoami)" + login_successful=true + break + else + echo "Login succeeded but 'oc whoami' failed on attempt $k." + fi + else + oc_login_exit_code=$? + echo "OC login failed on attempt $k (exit code: $oc_login_exit_code)." + fi + + if [[ "$k" -lt "$login_max_retries" ]]; then + echo "Sleeping for $login_sleep_duration seconds before next login retry..." + sleep "$login_sleep_duration" + fi + done + + if [[ "$login_successful" == "false" ]]; then + echo "Failed to login with kubeadmin after $login_max_retries attempts." + if [[ "$provisioning_attempt" -lt "$provisioning_max_retries" ]]; then + echo "Will retry entire provisioning process..." + continue + else + echo "All provisioning attempts exhausted. Exiting." + exit 1 + fi + fi + echo "OC login completed successfully." + echo "--- OC Login Process Finished ---" + + # If we reach here, CSR approval, console accessibility, and OC login all succeeded provisioning_successful=true echo "=== Cluster Provisioning Completed Successfully on Attempt $provisioning_attempt ===" break done - + oc get co if [[ "$provisioning_successful" == "false" ]]; then echo "Cluster provisioning failed after $provisioning_max_retries attempts." exit 1 From 40fc0298791539633e47f771503c5d0efb08104e Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Tue, 4 Nov 2025 09:35:58 +0100 Subject: [PATCH 11/20] Fix authentication race condition in OC login retry loop MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous implementation had a race condition where 'oc whoami' would succeed immediately after login but fail moments later when called again. This caused intermittent authentication failures even though login was reported as successful. Changes: - Add 2-second wait after successful login to allow auth to propagate - Capture 'oc whoami' output once instead of calling it multiple times - Add additional verification step with 'oc version' to ensure cluster commands work - Improve error logging to show exit codes and output for debugging This should resolve the "Unauthorized" errors that occurred right after successful login (as seen in lines 399-405 of the previous run logs). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- integration-tests/pipelines/tssc-cli-e2e.yaml | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index a8dd29084..db8b77631 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -217,12 +217,23 @@ spec: echo "OC Login Attempt $k of $login_max_retries: Attempting to login with kubeadmin..." if oc login --insecure-skip-tls-verify=true "$api_url" -u kubeadmin -p "$kubeadminpass" 2>&1; then echo "Successfully logged in as kubeadmin on attempt $k." - if oc whoami 2>&1; then - echo "Confirmed login - current user: $(oc whoami)" - login_successful=true - break + # Wait a moment for the authentication to fully propagate + sleep 2 + # Verify login with whoami and capture the output + whoami_output=$(oc whoami 2>&1) + whoami_exit_code=$? + if [[ "$whoami_exit_code" -eq 0 ]] && [[ -n "$whoami_output" ]]; then + echo "Confirmed login - current user: $whoami_output" + # Additional verification: try a simple cluster command + if oc version --short 2>/dev/null; then + echo "Login verification successful - can execute cluster commands." + login_successful=true + break + else + echo "Login succeeded but cannot execute cluster commands on attempt $k." + fi else - echo "Login succeeded but 'oc whoami' failed on attempt $k." + echo "Login succeeded but 'oc whoami' failed on attempt $k (exit code: $whoami_exit_code, output: '$whoami_output')." fi else oc_login_exit_code=$? From feb40b5db6e569b3cb28b63ffd4011fc9ca86719 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Tue, 4 Nov 2025 13:03:33 +0100 Subject: [PATCH 12/20] Replace invalid oc version --short with oc get namespaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The --short flag is not supported by the oc version command (unlike kubectl). Using 'oc get namespaces' instead provides better verification because: - It actually requires authentication and cluster access to succeed - oc version can show client version even without being logged in - This ensures we're truly authenticated and can access cluster resources 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- integration-tests/pipelines/tssc-cli-e2e.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index db8b77631..d184bec9a 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -224,8 +224,8 @@ spec: whoami_exit_code=$? if [[ "$whoami_exit_code" -eq 0 ]] && [[ -n "$whoami_output" ]]; then echo "Confirmed login - current user: $whoami_output" - # Additional verification: try a simple cluster command - if oc version --short 2>/dev/null; then + # Additional verification: try a simple cluster command that requires authentication + if oc get namespaces >/dev/null 2>&1; then echo "Login verification successful - can execute cluster commands." login_successful=true break From a7ebb102c0db29f38bd42707352b0d12fdaed496 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 5 Nov 2025 16:02:13 +0100 Subject: [PATCH 13/20] Add CUSTOM_ROOT_CA var to gitops repo --- integration-tests/pipelines/tssc-cli-e2e.yaml | 4 ++-- integration-tests/tasks/tssc-install.yaml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index d184bec9a..5e2945db0 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -276,9 +276,9 @@ spec: resolver: git params: - name: url - value: https://github.com/redhat-appstudio/tssc-cli.git + value: https://github.com/rhopp/tssc-cli.git - name: revision - value: main + value: hive-try3 - name: pathInRepo value: integration-tests/tasks/tssc-install.yaml params: diff --git a/integration-tests/tasks/tssc-install.yaml b/integration-tests/tasks/tssc-install.yaml index 780e99fcc..11b7c5fbf 100644 --- a/integration-tests/tasks/tssc-install.yaml +++ b/integration-tests/tasks/tssc-install.yaml @@ -103,13 +103,13 @@ spec: echo "export auth_config=\"$AUTH_CONFIG\"" >> ${env_file} # Check if GIT_REPO is tssc-sample-templates and update Developer Hub catalog url based on PR changes - if [[ "${GIT_REPO}" = "tssc-sample-templates" ]]; then + # if [[ "${GIT_REPO}" = "tssc-sample-templates" ]]; then GIT_REVISION="${GIT_REVISION:-$(echo "$JOB_SPEC" | jq -r '.git.commit_sha')}" GIT_URL="${GIT_URL:-$(echo "$JOB_SPEC" | jq -r '.git.source_repo_url')}" TEST_DH_CATALOG_URL="$GIT_URL/blob/$GIT_REVISION/all.yaml" # Update DEVELOPER_HUB__CATALOG__URL value in .ci-env - sed -i "s|^export DEVELOPER_HUB__CATALOG__URL.*|export DEVELOPER_HUB__CATALOG__URL=${TEST_DH_CATALOG_URL}|" "${env_file}" - fi + sed -i "s|^export DEVELOPER_HUB__CATALOG__URL.*|export DEVELOPER_HUB__CATALOG__URL=https://github.com/rhopp/tssc-sample-templates/blob/addCUSTOM_ROOT_CAGitops/all.yaml|" "${env_file}" + # fi echo "INFO: .env file for tssc installation" cat ${env_file} From 1318be56ed0c53b525ebb26b5650910b8c1c710c Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Thu, 6 Nov 2025 10:15:06 +0100 Subject: [PATCH 14/20] Update test image Signed-off-by: Radim Hopp --- integration-tests/pipelines/tssc-cli-e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 5e2945db0..3b85e5471 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -325,7 +325,7 @@ spec: - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image - value: "quay.io/rhopp/tssc-tests:self_signed" + value: "quay.io/rhopp/tssc-tests:self_signed1" - name: testplan value: $(params.testplan) - name: rhtap-ui-tests From f72ffddb2546a1864b4c623628667984b29ae058 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Thu, 6 Nov 2025 17:25:33 +0100 Subject: [PATCH 15/20] New version of tesplan Signed-off-by: Radim Hopp --- integration-tests/pipelines/tssc-cli-e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 3b85e5471..ebc732689 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -325,7 +325,7 @@ spec: - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image - value: "quay.io/rhopp/tssc-tests:self_signed1" + value: "quay.io/rhopp/tssc-tests:self_signed2" - name: testplan value: $(params.testplan) - name: rhtap-ui-tests From aee0ab0a6e4da91586d299cc8d685cc3c24d42f0 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Thu, 13 Nov 2025 12:24:07 +0100 Subject: [PATCH 16/20] Add 10-minute cluster stability observation after provisioning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add comprehensive stability monitoring to diagnose intermittent authorization failures that occur after successful cluster provisioning. This will help identify if the cluster becomes unstable over time or if there are specific patterns to the failures. The observation loop runs for 10 minutes (120 iterations at 5-second intervals) and tests three critical components: 1. Cluster Operators (oc get co) - validates cluster operator availability 2. Console URL accessibility - ensures the web console remains reachable 3. API Server (oc get namespaces) - verifies authentication and API access For each test, the script tracks: - Success/failure counts - Pattern string showing timeline (e.g., "SSSSSFFFSSSS" where S=success, F=failure) - Timestamped logs for any failures - Progress updates every ~100 seconds This diagnostic data will help determine: - If failures are sporadic or follow a pattern - Which component(s) are unstable - How long it takes for the cluster to stabilize - Whether the issue is authentication-specific or broader 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- integration-tests/pipelines/tssc-cli-e2e.yaml | 90 ++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index ebc732689..dc8b67df3 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -264,11 +264,99 @@ spec: echo "=== Cluster Provisioning Completed Successfully on Attempt $provisioning_attempt ===" break done - oc get co + if [[ "$provisioning_successful" == "false" ]]; then echo "Cluster provisioning failed after $provisioning_max_retries attempts." exit 1 fi + + # --- Cluster Stability Observation --- + echo "--- Starting Cluster Stability Observation ---" + observation_duration=600 # 10 minutes + observation_interval=5 # Check every 5 seconds + observation_iterations=$((observation_duration / observation_interval)) + + co_success=0 + co_failure=0 + console_success=0 + console_failure=0 + api_success=0 + api_failure=0 + + co_pattern="" + console_pattern="" + api_pattern="" + + echo "Will perform $observation_iterations observations over $observation_duration seconds" + echo "Observation interval: ${observation_interval}s" + echo "" + + for ((obs=1; obs<=observation_iterations; obs++)); do + timestamp=$(date '+%Y-%m-%d %H:%M:%S') + + # Test 1: Cluster Operators + if oc get co >/dev/null 2>&1; then + ((co_success++)) + co_pattern="${co_pattern}S" + else + ((co_failure++)) + co_pattern="${co_pattern}F" + echo "[$timestamp] Observation $obs: FAILURE - oc get co failed" + fi + + # Test 2: Console URL accessibility + if curl -k --silent --output /dev/null --head --fail --connect-timeout 5 "$console_url"; then + ((console_success++)) + console_pattern="${console_pattern}S" + else + ((console_failure++)) + console_pattern="${console_pattern}F" + echo "[$timestamp] Observation $obs: FAILURE - console URL not accessible" + fi + + # Test 3: API server (namespaces) + if oc get namespaces >/dev/null 2>&1; then + ((api_success++)) + api_pattern="${api_pattern}S" + else + ((api_failure++)) + api_pattern="${api_pattern}F" + echo "[$timestamp] Observation $obs: FAILURE - oc get namespaces failed" + fi + + # Print progress every 20 observations (approximately every 100 seconds) + if (( obs % 20 == 0 )); then + echo "[$timestamp] Progress: $obs/$observation_iterations observations completed" + fi + + # Sleep between observations (except on the last iteration) + if [[ "$obs" -lt "$observation_iterations" ]]; then + sleep "$observation_interval" + fi + done + + echo "" + echo "=== Cluster Stability Observation Results ===" + echo "Total observations: $observation_iterations" + echo "" + echo "Cluster Operators (oc get co):" + echo " Success: $co_success, Failures: $co_failure" + echo " Pattern: $co_pattern" + echo "" + echo "Console URL accessibility:" + echo " Success: $console_success, Failures: $console_failure" + echo " Pattern: $console_pattern" + echo "" + echo "API Server (oc get namespaces):" + echo " Success: $api_success, Failures: $api_failure" + echo " Pattern: $api_pattern" + echo "" + echo "--- Cluster Stability Observation Finished ---" + + # Show current cluster operator status for reference + echo "" + echo "Final cluster operator status:" + oc get co - name: tssc-install runAfter: - provision-cluster From bbe2e18df20ae4c48f839dc42109be71ecbc469e Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Thu, 13 Nov 2025 14:57:19 +0100 Subject: [PATCH 17/20] Add cluster artifact collection task to finally section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new task to collect cluster artifacts when the pipeline fails. This task: - Runs in the finally section to execute even when other tasks fail - Only executes when pipeline status is not "Succeeded" - Logs into the provisioned cluster using the ocp-login-command - Runs gather-extra.sh script to collect diagnostic information - Pushes collected artifacts to OCI storage for later analysis The collected artifacts will help diagnose issues that occur during test execution, particularly the intermittent authorization failures. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- integration-tests/pipelines/tssc-cli-e2e.yaml | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index dc8b67df3..785223995 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -517,3 +517,44 @@ spec: value: $(tasks.status) - name: pipelinerun-name value: $(context.pipelineRun.name) + - name: collect-cluster-artifacts + taskSpec: + volumes: + - name: konflux-test-infra-volume + secret: + secretName: "$(params.konflux-test-infra-secret)" + steps: + - name: collect-artifacts + workingDir: /workspace/cluster-artifacts + onError: continue + image: quay.io/konflux-qe-incubator/konflux-qe-tools:latest + script: | + #!/bin/sh + $(tasks.provision-cluster.results.ocp-login-command) + + curl -sSL https://raw.githubusercontent.com/konflux-ci/konflux-qe-definitions/main/scripts/gather-extra.sh | bash + when: + - input: $(tasks.status) + operator: notin + values: ["Succeeded"] + - name: secure-push-oci + ref: + resolver: git + params: + - name: url + value: https://github.com/konflux-ci/tekton-integration-catalog.git + - name: revision + value: main + - name: pathInRepo + value: stepactions/secure-push-oci/0.1/secure-push-oci.yaml + params: + - name: workdir-path + value: /workspace + - name: oci-ref + value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" + - name: credentials-volume-name + value: konflux-test-infra-volume + when: + - input: $(tasks.status) + operator: notin + values: ["Succeeded"] From e02708b4b8cc7d42a70e195b2f258874aff5345e Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Fri, 14 Nov 2025 13:07:08 +0100 Subject: [PATCH 18/20] Add cert to proxy/cluster --- integration-tests/scripts/install.sh | 36 ++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/integration-tests/scripts/install.sh b/integration-tests/scripts/install.sh index 0ff625214..7c70f0840 100755 --- a/integration-tests/scripts/install.sh +++ b/integration-tests/scripts/install.sh @@ -306,6 +306,42 @@ create_cluster_config() { echo "[INFO] Cluster configuration created successfully" } +wait_for() { + local command="${1}" + local description="${2}" + local timeout="${3}" + local interval="${4}" + printf "Waiting for %s for %s...\n" "${description}" "${timeout}" + timeout --foreground "${timeout}" bash -c " + set -x + until ${command} + do + printf \"Waiting for %s... Trying again in ${interval}s\n\" \"${description}\" + sleep ${interval} + done + set +x + " || return 1 + printf "%s finished!\n" "${description}" +} + +updateCert() { + set -x + kubectl create configmap root-ca -n openshift-config --from-literal=ca-bundle.crt="$(kubectl get configmap "kube-root-ca.crt" -o=json |jq -r '.data["ca.crt"]')" + BASE_DOMAIN=$(oc get ingress.config.openshift.io cluster -o jsonpath='{.spec.domain}') + REGISTRY_URL="rhtap-quay-quay-rhtap-quay.$BASE_DOMAIN" + # REGISTRY=$(oc get routes/rhtap-quay-quay -n rhtap-quay -o jsonpath="{.spec.host}") + kubectl create configmap root-ca-image -n openshift-config --from-literal="$REGISTRY_URL"="$(kubectl get configmap "kube-root-ca.crt" -o=json |jq -r '.data["ca.crt"]')" + kubectl get cm root-ca -n openshift-config + oc patch proxy/cluster --type=merge --patch='{"spec":{"trustedCA":{"name":"root-ca"}}}' + oc patch image.config/cluster --type=merge --patch='{"spec":{"additionalTrustedCA":{"name":"root-ca-image"}}}' + + sleep 5 + oc get co + wait_for "kubectl get clusteroperators -A" "cluster operators to be accessible" "10m" "30" + echo "[INFO] Cluster operators were updated." + set +x +} + install_tssc() { echo "[INFO] Start installing TSSC" From 25d994fdb9e6c372c093ffcdf5f81fa9f25ead0f Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Fri, 14 Nov 2025 14:40:30 +0100 Subject: [PATCH 19/20] forgot to call the function :-( --- integration-tests/scripts/install.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/integration-tests/scripts/install.sh b/integration-tests/scripts/install.sh index 7c70f0840..9a957892b 100755 --- a/integration-tests/scripts/install.sh +++ b/integration-tests/scripts/install.sh @@ -351,6 +351,8 @@ install_tssc() { cat "$tpl_file" set +x + updateCert + jenkins_integration azure_integration tpa_integration From 1da4d8a0a88a0ee35044e14514fea362121e4685 Mon Sep 17 00:00:00 2001 From: Radim Hopp Date: Wed, 19 Nov 2025 13:33:44 +0100 Subject: [PATCH 20/20] New version of tests --- integration-tests/pipelines/tssc-cli-e2e.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/pipelines/tssc-cli-e2e.yaml b/integration-tests/pipelines/tssc-cli-e2e.yaml index 785223995..95bf68226 100644 --- a/integration-tests/pipelines/tssc-cli-e2e.yaml +++ b/integration-tests/pipelines/tssc-cli-e2e.yaml @@ -413,7 +413,7 @@ spec: - name: oci-container value: "quay.io/konflux-test-storage/rhtap-team/rhtap-cli:$(context.pipelineRun.name)" - name: tssc-test-image - value: "quay.io/rhopp/tssc-tests:self_signed2" + value: "quay.io/rhopp/tssc-tests:self_signed3" - name: testplan value: $(params.testplan) - name: rhtap-ui-tests