Changed 'when: always' to 'when: on_success' in integration test rules. This ensures tests only run after successful deployments, not after failures where they would hit stale environments.
323 lines
9.5 KiB
YAML
323 lines
9.5 KiB
YAML
include:
|
|
- project: 'esv/bsf/pypi/prosper'
|
|
ref: v0.64.1
|
|
file: '/prosper/templates/projects/docker.yml'
|
|
|
|
variables:
|
|
# renovate: datasource=gitlab-tags depName=esv/bsf/pypi/prosper versioning=semver registryUrl=https://gitlab.global.bsf.tools
|
|
PROSPER_VERSION: v0.64.1
|
|
|
|
# Prevent duplicate pipelines for MRs
|
|
workflow:
|
|
rules:
|
|
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
|
when: never
|
|
- when: always
|
|
|
|
# Define stages - extends Prosper's stages with our custom ones
|
|
stages:
|
|
- .pre
|
|
- lint
|
|
- build
|
|
- test
|
|
- analyze
|
|
- deploy
|
|
|
|
kics:
|
|
allow_failure: true
|
|
variables:
|
|
KICS_CONFIG: kics.config
|
|
|
|
hadolint:
|
|
allow_failure: true
|
|
|
|
# secrets job is a blocking check
|
|
|
|
# Post-deployment integration tests template
|
|
.integration_test_template: &integration_test_template
|
|
stage: deploy # Runs in deploy stage, but after deployment due to 'needs'
|
|
image: deps.global.bsf.tools/docker/python:3.12-slim
|
|
timeout: 10m
|
|
before_script:
|
|
- pip install httpx
|
|
script:
|
|
- |
|
|
python - <<'PYTEST_SCRIPT'
|
|
import httpx
|
|
import os
|
|
import sys
|
|
|
|
BASE_URL = os.environ.get("BASE_URL")
|
|
if not BASE_URL:
|
|
print("ERROR: BASE_URL not set")
|
|
sys.exit(1)
|
|
|
|
print(f"Running integration tests against {BASE_URL}")
|
|
client = httpx.Client(base_url=BASE_URL, timeout=30.0)
|
|
|
|
errors = []
|
|
|
|
# Test 1: Health endpoint
|
|
print("\n=== Test 1: Health endpoint ===")
|
|
r = client.get("/health")
|
|
if r.status_code == 200:
|
|
print("PASS: Health check passed")
|
|
else:
|
|
errors.append(f"Health check failed: {r.status_code}")
|
|
|
|
# Test 2: API responds (list projects)
|
|
print("\n=== Test 2: API responds ===")
|
|
r = client.get("/api/v1/projects")
|
|
if r.status_code == 200:
|
|
projects = r.json()
|
|
print(f"PASS: API responding, found {len(projects)} project(s)")
|
|
else:
|
|
errors.append(f"API check failed: {r.status_code}")
|
|
|
|
# Test 3: Frontend served
|
|
print("\n=== Test 3: Frontend served ===")
|
|
r = client.get("/")
|
|
if r.status_code == 200 and "</html>" in r.text:
|
|
print("PASS: Frontend is being served")
|
|
else:
|
|
errors.append(f"Frontend check failed: {r.status_code}")
|
|
|
|
# Report results
|
|
print("\n" + "=" * 50)
|
|
if errors:
|
|
print(f"FAILED: {len(errors)} error(s)")
|
|
for e in errors:
|
|
print(f" FAIL: {e}")
|
|
sys.exit(1)
|
|
else:
|
|
print("SUCCESS: All integration tests passed!")
|
|
sys.exit(0)
|
|
PYTEST_SCRIPT
|
|
|
|
# Integration tests for stage deployment
|
|
integration_test_stage:
|
|
<<: *integration_test_template
|
|
needs: [deploy_stage]
|
|
variables:
|
|
BASE_URL: https://orchard-stage.common.global.bsf.tools
|
|
rules:
|
|
- if: '$CI_COMMIT_BRANCH == "main"'
|
|
when: on_success
|
|
|
|
# Integration tests for feature deployment
|
|
integration_test_feature:
|
|
<<: *integration_test_template
|
|
needs: [deploy_feature]
|
|
variables:
|
|
BASE_URL: https://orchard-$CI_COMMIT_REF_SLUG.common.global.bsf.tools
|
|
rules:
|
|
- if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "main"'
|
|
when: on_success
|
|
|
|
# Run Python backend tests
|
|
python_tests:
|
|
stage: test
|
|
needs: [] # Run in parallel with build
|
|
image: deps.global.bsf.tools/docker/python:3.12-slim
|
|
timeout: 15m
|
|
variables:
|
|
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.pip-cache"
|
|
cache:
|
|
key: pip-$CI_COMMIT_REF_SLUG
|
|
paths:
|
|
- .pip-cache/
|
|
policy: pull-push
|
|
before_script:
|
|
- pip install -r backend/requirements.txt
|
|
- pip install pytest pytest-asyncio pytest-cov httpx
|
|
script:
|
|
- cd backend
|
|
# Only run unit tests - integration tests require Docker Compose services
|
|
- python -m pytest tests/unit/ -v --cov=app --cov-report=term --cov-report=xml:coverage.xml --cov-report=html:coverage_html --junitxml=pytest-report.xml
|
|
artifacts:
|
|
when: always
|
|
expire_in: 1 week
|
|
paths:
|
|
- backend/coverage.xml
|
|
- backend/coverage_html/
|
|
- backend/pytest-report.xml
|
|
reports:
|
|
junit: backend/pytest-report.xml
|
|
coverage_report:
|
|
coverage_format: cobertura
|
|
path: backend/coverage.xml
|
|
coverage: '/TOTAL.*\s+(\d+%)/'
|
|
|
|
# Run frontend tests
|
|
frontend_tests:
|
|
stage: test
|
|
needs: [] # Run in parallel with build
|
|
image: deps.global.bsf.tools/docker/node:20-alpine
|
|
timeout: 15m
|
|
cache:
|
|
key: npm-$CI_COMMIT_REF_SLUG
|
|
paths:
|
|
- frontend/node_modules/
|
|
policy: pull-push
|
|
before_script:
|
|
- cd frontend
|
|
- npm config set registry https://deps.global.bsf.tools/artifactory/api/npm/registry.npmjs.org
|
|
- npm ci --verbose
|
|
script:
|
|
- npm run test -- --run --reporter=verbose --coverage
|
|
artifacts:
|
|
when: always
|
|
expire_in: 1 week
|
|
paths:
|
|
- frontend/coverage/
|
|
reports:
|
|
coverage_report:
|
|
coverage_format: cobertura
|
|
path: frontend/coverage/cobertura-coverage.xml
|
|
coverage: '/All files[^|]*\|[^|]*\s+([\d\.]+)/'
|
|
|
|
# Shared deploy configuration
|
|
.deploy_template: &deploy_template
|
|
stage: deploy
|
|
needs: [build_image]
|
|
image: deps.global.bsf.tools/registry-1.docker.io/alpine/k8s:1.29.12
|
|
|
|
.helm_setup: &helm_setup
|
|
- helm version
|
|
- helm repo add stable https://charts.helm.sh/stable
|
|
- helm repo add bitnami https://charts.bitnami.com/bitnami
|
|
- cd helm/orchard
|
|
- helm dependency update
|
|
- helm repo update
|
|
|
|
.verify_deployment: &verify_deployment |
|
|
echo "=== Waiting for health endpoint (certs may take a few minutes) ==="
|
|
for i in $(seq 1 30); do
|
|
if curl -sf --max-time 10 "$BASE_URL/health" > /dev/null 2>&1; then
|
|
echo "Health check passed!"
|
|
break
|
|
fi
|
|
echo "Attempt $i/30 - waiting 10s..."
|
|
sleep 10
|
|
done
|
|
|
|
# Verify health endpoint
|
|
echo ""
|
|
echo "=== Health Check ==="
|
|
curl -sf "$BASE_URL/health" || { echo "Health check failed"; exit 1; }
|
|
echo ""
|
|
|
|
# Verify API is responding
|
|
echo ""
|
|
echo "=== API Check (GET /api/v1/projects) ==="
|
|
HTTP_CODE=$(curl -sf -o /dev/null -w "%{http_code}" "$BASE_URL/api/v1/projects")
|
|
if [ "$HTTP_CODE" = "200" ]; then
|
|
echo "API responding: HTTP $HTTP_CODE"
|
|
else
|
|
echo "API check failed: HTTP $HTTP_CODE"
|
|
exit 1
|
|
fi
|
|
|
|
# Verify frontend is served
|
|
echo ""
|
|
echo "=== Frontend Check ==="
|
|
if curl -sf "$BASE_URL/" | grep -q "</html>"; then
|
|
echo "Frontend is being served"
|
|
else
|
|
echo "Frontend check failed"
|
|
exit 1
|
|
fi
|
|
|
|
echo ""
|
|
echo "=== All checks passed! ==="
|
|
echo "Deployment URL: $BASE_URL"
|
|
|
|
# Deploy to stage (main branch)
|
|
deploy_stage:
|
|
<<: *deploy_template
|
|
variables:
|
|
NAMESPACE: orch-stage-namespace
|
|
VALUES_FILE: helm/orchard/values-stage.yaml
|
|
BASE_URL: https://orchard-stage.common.global.bsf.tools
|
|
before_script:
|
|
- kubectl config use-context esv/bsf/bsf-integration/orchard/orchard-mvp:orchard-stage
|
|
- *helm_setup
|
|
script:
|
|
- echo "Deploying to stage environment"
|
|
- cd $CI_PROJECT_DIR
|
|
- |
|
|
helm upgrade --install orchard-stage ./helm/orchard \
|
|
--namespace $NAMESPACE \
|
|
-f $VALUES_FILE \
|
|
--set image.tag=git.linux-amd64-$CI_COMMIT_SHA \
|
|
--wait \
|
|
--timeout 5m
|
|
- kubectl rollout status deployment/orchard-stage -n $NAMESPACE --timeout=5m
|
|
- *verify_deployment
|
|
environment:
|
|
name: stage
|
|
url: https://orchard-stage.common.global.bsf.tools
|
|
kubernetes:
|
|
agent: esv/bsf/bsf-integration/orchard/orchard-mvp:orchard-stage
|
|
rules:
|
|
- if: '$CI_COMMIT_BRANCH == "main"'
|
|
when: always
|
|
|
|
# Deploy feature branch to dev namespace
|
|
deploy_feature:
|
|
<<: *deploy_template
|
|
variables:
|
|
NAMESPACE: orch-dev-namespace
|
|
VALUES_FILE: helm/orchard/values-dev.yaml
|
|
before_script:
|
|
- kubectl config use-context esv/bsf/bsf-integration/orchard/orchard-mvp:orchard
|
|
- *helm_setup
|
|
script:
|
|
- echo "Deploying feature branch $CI_COMMIT_REF_SLUG"
|
|
- cd $CI_PROJECT_DIR
|
|
- |
|
|
helm upgrade --install orchard-$CI_COMMIT_REF_SLUG ./helm/orchard \
|
|
--namespace $NAMESPACE \
|
|
-f $VALUES_FILE \
|
|
--set image.tag=git.linux-amd64-$CI_COMMIT_SHA \
|
|
--set ingress.hosts[0].host=orchard-$CI_COMMIT_REF_SLUG.common.global.bsf.tools \
|
|
--set ingress.tls[0].hosts[0]=orchard-$CI_COMMIT_REF_SLUG.common.global.bsf.tools \
|
|
--set ingress.tls[0].secretName=orchard-$CI_COMMIT_REF_SLUG-tls \
|
|
--set minioIngress.host=minio-$CI_COMMIT_REF_SLUG.common.global.bsf.tools \
|
|
--set minioIngress.tls.secretName=minio-$CI_COMMIT_REF_SLUG-tls \
|
|
--wait \
|
|
--timeout 5m
|
|
- kubectl rollout status deployment/orchard-$CI_COMMIT_REF_SLUG -n $NAMESPACE --timeout=5m
|
|
- export BASE_URL="https://orchard-$CI_COMMIT_REF_SLUG.common.global.bsf.tools"
|
|
- *verify_deployment
|
|
environment:
|
|
name: review/$CI_COMMIT_REF_SLUG
|
|
url: https://orchard-$CI_COMMIT_REF_SLUG.common.global.bsf.tools
|
|
on_stop: cleanup_feature
|
|
kubernetes:
|
|
agent: esv/bsf/bsf-integration/orchard/orchard-mvp:orchard
|
|
rules:
|
|
- if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "main"'
|
|
when: always
|
|
|
|
# Cleanup feature branch deployment
|
|
cleanup_feature:
|
|
<<: *deploy_template
|
|
needs: []
|
|
variables:
|
|
NAMESPACE: orch-dev-namespace
|
|
before_script:
|
|
- kubectl config use-context esv/bsf/bsf-integration/orchard/orchard-mvp:orchard
|
|
script:
|
|
- echo "Cleaning up feature deployment orchard-$CI_COMMIT_REF_SLUG"
|
|
- helm uninstall orchard-$CI_COMMIT_REF_SLUG --namespace $NAMESPACE || true
|
|
environment:
|
|
name: review/$CI_COMMIT_REF_SLUG
|
|
action: stop
|
|
kubernetes:
|
|
agent: esv/bsf/bsf-integration/orchard/orchard-mvp:orchard
|
|
rules:
|
|
- if: '$CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != "main"'
|
|
when: manual
|
|
allow_failure: true
|