Merge pull request 'pipeline' (#5) from pipeline into main
Some checks failed
build / docker-build (push) Failing after 9s

Reviewed-on: mondo/SIM-Data-Platform#5
This commit was merged in pull request #5.
This commit is contained in:
2025-10-17 13:13:14 -05:00
41 changed files with 2909 additions and 332 deletions

View File

@@ -0,0 +1,26 @@
name: build
on:
push:
branches:
- main
jobs:
docker-build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Login to Docker Registry
uses: docker/login-action@v3
with:
registry: git.bitstorm.ca # e.g., docker.io for Docker Hub
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: git.bitstorm.ca/mondo/warehouse13:latest

View File

@@ -1,28 +1,37 @@
stages:
- test
- build
- deploy
# Test stage
test:
stage: test
allow_failure: true
image: containers.global.bsf.tools/node:20.11-alpine3.19
script:
- cd frontend
- npm config set registry https://deps.global.bsf.tools/artifactory/api/npm/registry.npmjs.org/
- npm config set strict-ssl false
- npm config fix
- npm install
build_container:
stage: build
image: deps.global.bsf.tools/quay.io/buildah/stable:latest
variables:
IMAGE_NAME: "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG-$CI_COMMIT_SHORT_SHA"
before_script:
- mkdir -p /tmp/buildah-storage
- export BUILDAH_ROOT="/tmp/buildah-storage"
- echo "$CI_REGISTRY_PASSWORD" | buildah login -u "$CI_REGISTRY_USER" --password-stdin "$CI_REGISTRY"
# build_container:
# stage: build
# image: deps.global.bsf.tools/quay.io/buildah/stable:latest
# variables:
# IMAGE_NAME: "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_NAME"
# before_script:
# - mkdir -p /tmp/buildah-storage
# - export BUILDAH_ROOT="/tmp/buildah-storage"
# - echo "$CI_REGISTRY_PASSWORD" | buildah login -u "$CI_REGISTRY_USER" --password-stdin "$CI_REGISTRY"
# script:
# - buildah bud --build-arg NPM_REGISTRY=https://deps.global.bsf.tools/artifactory/api/npm/registry.npmjs.org/ --storage-driver vfs --isolation chroot -t $IMAGE_NAME .
# - buildah push --storage-driver vfs $IMAGE_NAME
deploy_helm_charts:
stage: deploy
image:
name: deps.global.bsf.tools/registry-1.docker.io/alpine/k8s:1.29.12
parallel:
matrix:
# - ENV: "prod"
# VALUES_FILE: "helm/values-prod.yaml"
# CONTEXT: "esv/bsf/bsf-services/gitlab-kaas-agent-config:services-prod-agent"
# NAMESPACE: "bsf-services-namespace"
# ONLY: "main"
- ENV: "dev"
VALUES_FILE: "helm/warehouse13/values.yaml"
CONTEXT: "esv/bsf/bsf-services/gitlab-kaas-agent-config:services-prod-agent"
NAMESPACE: "bsf-services-dev-namespace"
# ONLY: ["branches", "!main"]
script:
- buildah bud --build-arg NPM_REGISTRY=https://deps.global.bsf.tools/artifactory/api/npm/registry.npmjs.org/ --storage-driver vfs --isolation chroot -t $IMAGE_NAME .
- buildah push --storage-driver vfs $IMAGE_NAME
- kubectl config use-context $CONTEXT
- |
helm upgrade --install warehouse13-$CI_COMMIT_REF_NAME ./helm/warehouse13 --namespace $NAMESPACE -f $VALUES_FILE

View File

@@ -1,5 +1,5 @@
# Multi-stage build: First stage builds Angular frontend
FROM node:24-alpine AS frontend-build
FROM node:20.11-alpine3.19 AS frontend-build
# Accept npm registry as build argument
ARG NPM_REGISTRY=https://registry.npmjs.org/

View File

@@ -214,35 +214,54 @@ MINIO_BUCKET_NAME=test-artifacts
### Kubernetes with Helm
1. Build and push Docker image:
**Quick Start:**
```bash
docker build -t your-registry/datalake:latest .
docker push your-registry/datalake:latest
helm install warehouse13 ./helm/warehouse13 --namespace warehouse13 --create-namespace
```
2. Install with Helm:
**Production Deployment:**
```bash
helm install datalake ./helm \
--set image.repository=your-registry/datalake \
--set image.tag=latest \
--namespace datalake \
--create-namespace
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--create-namespace \
--values ./helm/warehouse13/values-production.yaml
```
3. Access the API:
**Air-Gapped Deployment:**
```bash
kubectl port-forward -n datalake svc/datalake 8000:8000
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--create-namespace \
--values ./helm/warehouse13/values-airgapped.yaml
```
**Access the Application:**
```bash
kubectl port-forward -n warehouse13 svc/warehouse13-frontend 4200:80
kubectl port-forward -n warehouse13 svc/warehouse13-api 8000:8000
```
### Helm Documentation
- **Full Helm Guide:** [HELM-DEPLOYMENT.md](./docs/HELM-DEPLOYMENT.md)
- **Chart README:** [helm/warehouse13/README.md](./helm/warehouse13/README.md)
- **Quick Start:** [helm/warehouse13/QUICKSTART.md](./helm/warehouse13/QUICKSTART.md)
- **Example Configurations:**
- Development: [values-dev.yaml](./helm/warehouse13/values-dev.yaml)
- Production: [values-production.yaml](./helm/warehouse13/values-production.yaml)
- Air-Gapped: [values-airgapped.yaml](./helm/warehouse13/values-airgapped.yaml)
### Helm Configuration
Edit `helm/values.yaml` to customize:
- Replica count
- Resource limits
- Storage backend (S3 vs MinIO)
- Ingress settings
- PostgreSQL settings
- Autoscaling
All component images are fully configurable in `helm/warehouse13/values.yaml`:
- PostgreSQL image and version
- MinIO image and version
- API image and version
- Frontend image and version
- Resource limits and requests
- Storage backend configuration
- Ingress and TLS settings
- Persistence and storage classes
### GitLab CI/CD

View File

@@ -54,7 +54,7 @@ This script will:
```bash
# Option A: Use the helper script
./build-for-airgap.sh
./scripts/build-for-airgap.sh
# Option B: Build manually
cd frontend

517
docs/HELM-DEPLOYMENT.md Normal file
View File

@@ -0,0 +1,517 @@
# Warehouse13 - Kubernetes Deployment with Helm
This guide covers deploying Warehouse13 to Kubernetes using the official Helm chart.
## Table of Contents
1. [Prerequisites](#prerequisites)
2. [Quick Start](#quick-start)
3. [Deployment Scenarios](#deployment-scenarios)
4. [Configuration](#configuration)
5. [Post-Deployment](#post-deployment)
6. [Upgrading](#upgrading)
7. [Troubleshooting](#troubleshooting)
## Prerequisites
- Kubernetes 1.19+ cluster
- Helm 3.0+
- kubectl configured to access your cluster
- Persistent volume provisioner (for production deployments)
### Installing Helm
```bash
# macOS
brew install helm
# Linux
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Windows
choco install kubernetes-helm
```
## Quick Start
### 1. Standard Deployment (Internet Access)
```bash
# Create namespace
kubectl create namespace warehouse13
# Install with default values
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13
# Wait for pods to be ready
kubectl wait --for=condition=ready pod \
--all --namespace warehouse13 --timeout=300s
```
### 2. Access the Application
```bash
# Frontend
kubectl port-forward -n warehouse13 svc/warehouse13-frontend 4200:80
# API
kubectl port-forward -n warehouse13 svc/warehouse13-api 8000:8000
# MinIO Console
kubectl port-forward -n warehouse13 svc/warehouse13-minio 9001:9001
```
Then visit:
- Frontend: http://localhost:4200
- API Docs: http://localhost:8000/docs
- MinIO Console: http://localhost:9001
## Deployment Scenarios
### Development Environment
For local testing or CI/CD:
```bash
helm install warehouse13-dev ./helm/warehouse13 \
--namespace warehouse13-dev \
--create-namespace \
--values ./helm/warehouse13/values-dev.yaml
```
**Features:**
- Single replica for all services
- emptyDir storage (no persistence)
- Minimal resource requests
- Always pull latest dev images
### Production Environment
For production with ingress and high availability:
```bash
# First, update the values file with your domain and secrets
cp ./helm/warehouse13/values-production.yaml ./my-production-values.yaml
# Edit the file:
# - Set postgres.auth.password
# - Set minio.auth.rootUser and rootPassword
# - Set ingress.hosts[0].host to your domain
# - Update storageClass for your environment
# Install
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--create-namespace \
--values ./my-production-values.yaml
```
**Features:**
- 3 replicas for API and frontend
- Persistent storage with PVCs
- Ingress with TLS support
- Resource limits and requests
- Health checks enabled
- Pod anti-affinity for distribution
### Air-Gapped Environment
For restricted/disconnected environments:
```bash
# 1. First, push images to your internal registry
# Example using harbor.internal.example.com
# Pull images (on internet-connected machine)
docker pull postgres:15-alpine
docker pull minio/minio:latest
docker pull warehouse13/api:v1.0.0
docker pull warehouse13/frontend:v1.0.0
# Tag for internal registry
docker tag postgres:15-alpine harbor.internal.example.com/library/postgres:15-alpine
docker tag minio/minio:latest harbor.internal.example.com/library/minio:latest
docker tag warehouse13/api:v1.0.0 harbor.internal.example.com/warehouse13/api:v1.0.0
docker tag warehouse13/frontend:v1.0.0 harbor.internal.example.com/warehouse13/frontend:v1.0.0
# Push to internal registry
docker push harbor.internal.example.com/library/postgres:15-alpine
docker push harbor.internal.example.com/library/minio:latest
docker push harbor.internal.example.com/warehouse13/api:v1.0.0
docker push harbor.internal.example.com/warehouse13/frontend:v1.0.0
# 2. Update the values file with your registry
cp ./helm/warehouse13/values-airgapped.yaml ./my-airgapped-values.yaml
# Edit to match your environment:
# - Update all image.repository values
# - Set secure passwords
# - Configure storage classes
# - Add node selectors/tolerations if needed
# 3. Install on air-gapped cluster
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--create-namespace \
--values ./my-airgapped-values.yaml
```
**Features:**
- All images from custom registry
- Local storage class support
- Node selectors for specific nodes
- Tolerations for tainted nodes
## Configuration
### Configurable Images
All component images can be customized:
```yaml
# PostgreSQL
postgres:
image:
repository: postgres # or your-registry/postgres
tag: 15-alpine
pullPolicy: IfNotPresent
# MinIO
minio:
image:
repository: minio/minio # or your-registry/minio
tag: latest
pullPolicy: IfNotPresent
# API Backend
api:
image:
repository: warehouse13/api # or your-registry/warehouse13-api
tag: v1.0.0
pullPolicy: IfNotPresent
# Frontend
frontend:
image:
repository: warehouse13/frontend # or your-registry/warehouse13-frontend
tag: v1.0.0
pullPolicy: IfNotPresent
```
### Quick Image Override
```bash
# Override images from command line
helm install warehouse13 ./helm/warehouse13 \
--set postgres.image.repository=myregistry.com/postgres \
--set postgres.image.tag=15-alpine \
--set minio.image.repository=myregistry.com/minio \
--set minio.image.tag=latest \
--set api.image.repository=myregistry.com/warehouse13-api \
--set api.image.tag=v1.0.0 \
--set frontend.image.repository=myregistry.com/warehouse13-frontend \
--set frontend.image.tag=v1.0.0
```
### Storage Configuration
```yaml
# PostgreSQL storage
postgres:
persistence:
enabled: true
size: 50Gi
storageClass: "fast-ssd" # or "" for default
# MinIO storage
minio:
persistence:
enabled: true
size: 500Gi
storageClass: "bulk-storage" # or "" for default
```
### Resource Configuration
```yaml
# API resources
api:
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
# Frontend resources
frontend:
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
```
### Ingress Configuration
```yaml
ingress:
enabled: true
className: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
hosts:
- host: warehouse13.example.com
paths:
- path: /
pathType: Prefix
backend: frontend
- path: /api
pathType: Prefix
backend: api
tls:
- secretName: warehouse13-tls
hosts:
- warehouse13.example.com
```
## Post-Deployment
### Verify Installation
```bash
# Check all pods are running
kubectl get pods -n warehouse13
# Check services
kubectl get svc -n warehouse13
# Check PVCs
kubectl get pvc -n warehouse13
# Check ingress (if enabled)
kubectl get ingress -n warehouse13
```
### View Logs
```bash
# API logs
kubectl logs -n warehouse13 -l app.kubernetes.io/component=api --tail=100 -f
# Frontend logs
kubectl logs -n warehouse13 -l app.kubernetes.io/component=frontend --tail=100 -f
# PostgreSQL logs
kubectl logs -n warehouse13 warehouse13-postgres-0 --tail=100 -f
# MinIO logs
kubectl logs -n warehouse13 warehouse13-minio-0 --tail=100 -f
```
### Initialize MinIO Bucket
```bash
# Port-forward to MinIO console
kubectl port-forward -n warehouse13 svc/warehouse13-minio 9001:9001
# Open http://localhost:9001
# Login with credentials from values.yaml
# Create bucket: "artifacts"
```
## Upgrading
### Upgrade to New Version
```bash
# Update image tags in values file
# Then run upgrade
helm upgrade warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--values ./my-production-values.yaml \
--wait \
--timeout 10m
# Check rollout status
kubectl rollout status deployment/warehouse13-api -n warehouse13
kubectl rollout status deployment/warehouse13-frontend -n warehouse13
```
### Rollback
```bash
# View revision history
helm history warehouse13 -n warehouse13
# Rollback to previous version
helm rollback warehouse13 -n warehouse13
# Rollback to specific revision
helm rollback warehouse13 2 -n warehouse13
```
### Update Values Only
```bash
# Update configuration without changing images
helm upgrade warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--values ./my-updated-values.yaml \
--reuse-values
```
## Backup and Restore
### PostgreSQL Backup
```bash
# Create backup
kubectl exec -n warehouse13 warehouse13-postgres-0 -- \
pg_dump -U warehouse13user warehouse13 > backup-$(date +%Y%m%d).sql
# Restore
cat backup-20241016.sql | kubectl exec -i -n warehouse13 warehouse13-postgres-0 -- \
psql -U warehouse13user warehouse13
```
### MinIO Backup
```bash
# Install MinIO Client
wget https://dl.min.io/client/mc/release/linux-amd64/mc
chmod +x mc
# Configure
kubectl port-forward -n warehouse13 svc/warehouse13-minio 9000:9000
mc alias set w13 http://localhost:9000 <access-key> <secret-key>
# Backup bucket
mc mirror w13/artifacts ./backup/artifacts-$(date +%Y%m%d)
# Restore
mc mirror ./backup/artifacts-20241016 w13/artifacts
```
### Full Backup
```bash
# Backup all PVCs
for pvc in $(kubectl get pvc -n warehouse13 -o name); do
pvc_name=$(basename $pvc)
kubectl get -n warehouse13 $pvc -o yaml > backup-${pvc_name}.yaml
done
# Backup Helm values
helm get values warehouse13 -n warehouse13 > backup-values.yaml
```
## Troubleshooting
### Pods Not Starting
```bash
# Check pod status
kubectl get pods -n warehouse13
# Describe pod for events
kubectl describe pod <pod-name> -n warehouse13
# Check logs
kubectl logs <pod-name> -n warehouse13
# Common issues:
# - ImagePullBackOff: Check image repository and credentials
# - Pending: Check PVC status and node resources
# - CrashLoopBackOff: Check application logs
```
### PVC Issues
```bash
# Check PVC status
kubectl get pvc -n warehouse13
# Describe PVC
kubectl describe pvc <pvc-name> -n warehouse13
# Common issues:
# - Pending: No storage class or insufficient storage
# - Bound: PVC is healthy
```
### Database Connection Issues
```bash
# Test PostgreSQL connection
kubectl exec -it -n warehouse13 warehouse13-postgres-0 -- \
psql -U warehouse13user -d warehouse13
# Check database logs
kubectl logs -n warehouse13 warehouse13-postgres-0 --tail=100
# Verify secret
kubectl get secret -n warehouse13 warehouse13-secrets -o yaml
```
### Ingress Not Working
```bash
# Check ingress status
kubectl get ingress -n warehouse13
kubectl describe ingress -n warehouse13 warehouse13-ingress
# Check ingress controller logs
kubectl logs -n ingress-nginx -l app.kubernetes.io/component=controller
# Verify TLS certificate
kubectl get certificate -n warehouse13
kubectl describe certificate -n warehouse13 warehouse13-tls
```
### Performance Issues
```bash
# Check resource usage
kubectl top pods -n warehouse13
kubectl top nodes
# Check if pods are being throttled
kubectl describe pod <pod-name> -n warehouse13 | grep -A 5 "State:"
# Increase resources
helm upgrade warehouse13 ./helm/warehouse13 \
--set api.resources.limits.memory=2Gi \
--set api.resources.limits.cpu=2000m
```
## Uninstalling
```bash
# Uninstall the release
helm uninstall warehouse13 -n warehouse13
# Delete PVCs (data will be lost!)
kubectl delete pvc -n warehouse13 -l app.kubernetes.io/instance=warehouse13
# Delete namespace
kubectl delete namespace warehouse13
```
## Additional Resources
- [Helm Chart README](./helm/warehouse13/README.md)
- [Values Documentation](./helm/warehouse13/values.yaml)
- [Docker Deployment Guide](./DEPLOYMENT.md)
- [Main README](./README.md)
## Support
For issues and questions:
- GitHub Issues: https://github.com/yourusername/warehouse13/issues
- Helm Chart Issues: Tag with `helm` label

View File

@@ -0,0 +1,220 @@
# NPM Package Age Policy
## Requirement
All npm packages must be **at least 2 weeks old** before they can be used in this project. This ensures:
- Package stability
- Security vulnerability disclosure time
- Compliance with organizational security policies
## How It Works
### 1. Package Version Pinning
The project uses exact version pinning in `package.json` to prevent automatic updates:
```json
{
"dependencies": {
"@angular/core": "19.2.7", // Exact version, not "^19.2.7" or "~19.2.7"
// ...
}
}
```
The `frontend/.npmrc` file enforces this:
```
save-exact=true
package-lock=true
```
### 2. Automated Age Checking
Use the provided script to verify all packages meet the 2-week requirement:
```bash
# Check if all packages are at least 2 weeks old
node scripts/check-package-age.js
```
This script:
- Queries npm registry for publish dates
- Calculates age of each package
- Fails if any package is newer than 14 days
- Shows detailed age information for all packages
### 3. Installation Process
**Always use `npm ci` instead of `npm install`:**
```bash
cd frontend
npm ci # Installs exact versions from package-lock.json
```
**Why `npm ci`?**
- Uses exact versions from `package-lock.json`
- Doesn't update `package-lock.json`
- Ensures reproducible builds
- Faster than `npm install`
## Updating Packages
When you need to add or update packages:
### Step 1: Add Package to package.json
```bash
# Find a version that's at least 2 weeks old
npm view <package-name> time
# Add exact version to package.json
"dependencies": {
"new-package": "1.2.3"
}
```
### Step 2: Verify Age
```bash
node scripts/check-package-age.js
```
### Step 3: Update Lock File
```bash
cd frontend
npm install --package-lock-only
```
### Step 4: Install and Test
```bash
npm ci
npm run build:prod
```
## CI/CD Integration
Add the age check to your CI/CD pipeline:
### GitLab CI Example
```yaml
verify_package_age:
stage: validate
image: node:18-alpine
script:
- node scripts/check-package-age.js
only:
- merge_requests
- main
```
### GitHub Actions Example
```yaml
- name: Check Package Age
run: node scripts/check-package-age.js
```
## Troubleshooting
### "Package is too new" Error
If a package fails the age check:
1. **Find an older version:**
```bash
npm view <package-name> versions --json
npm view <package-name>@<older-version> time
```
2. **Update package.json with older version**
3. **Re-run age check:**
```bash
node scripts/check-package-age.js
```
### Can't Find Old Enough Version
If no version meets the 2-week requirement:
- Wait until the package is at least 2 weeks old
- Look for alternative packages
- Request an exception through your security team
## Example Workflow
```bash
# 1. Check current package ages
node scripts/check-package-age.js
# 2. If all pass, install dependencies
cd frontend
npm ci
# 3. Build application
npm run build:prod
# 4. For air-gapped deployment
../scripts/build-for-airgap.sh
```
## Scripts Reference
| Script | Purpose |
|--------|---------|
| `scripts/check-package-age.js` | Verify all packages are ≥ 2 weeks old |
| `scripts/pin-old-versions.sh` | Helper script to validate and pin versions |
| `scripts/build-for-airgap.sh` | Build frontend for air-gapped deployment |
## Best Practices
1. **Always commit `package-lock.json`**
- Ensures everyone uses the same versions
- Required for reproducible builds
2. **Use `npm ci` in CI/CD**
- Faster than `npm install`
- Enforces lock file versions
- Prevents surprises
3. **Regular audits**
```bash
# Check for security vulnerabilities
npm audit
# Check package ages
node scripts/check-package-age.js
```
4. **Version ranges to avoid**
- ❌ `^1.2.3` (allows minor/patch updates)
- ❌ `~1.2.3` (allows patch updates)
- ❌ `*` or `latest` (allows any version)
- ✅ `1.2.3` (exact version only)
## Package Age Check Output
```
Checking package ages (must be at least 2 weeks old)...
✓ @angular/common@19.2.7 - 45 days old
✓ @angular/compiler@19.2.7 - 45 days old
✓ rxjs@7.8.0 - 180 days old
❌ new-package@1.0.0 - 5 days old (published 2025-01-12)
================================================================================
❌ FAILED: 1 package(s) are newer than 2 weeks:
- new-package@1.0.0 (5 days old, published 2025-01-12)
```
## Support
For questions or exceptions:
- Review with security team
- Document in project README
- Update this policy as needed

View File

@@ -164,7 +164,7 @@ curl -X POST "http://localhost:8000/api/v1/artifacts/query" \
make deploy
# Or directly with Helm
helm install datalake ./helm --namespace datalake --create-namespace
helm install warehouse13 ./helm/warehouse13 --namespace warehouse13 --create-namespace
```
## Feature Flags Usage
@@ -190,9 +190,8 @@ AWS_REGION=us-east-1
S3_BUCKET_NAME=your-bucket
# Deploy
helm install datalake ./helm \
--set config.deploymentMode=cloud \
--set aws.enabled=true
helm install warehouse13 ./helm/warehouse13 \
--set global.deploymentMode=cloud
```
## What's Next

View File

@@ -1,13 +0,0 @@
apiVersion: v2
name: datalake
description: Test Artifact Data Lake - Store and query test artifacts
type: application
version: 1.0.0
appVersion: "1.0.0"
keywords:
- testing
- artifacts
- storage
- data-lake
maintainers:
- name: Your Team

42
helm/README.md Normal file
View File

@@ -0,0 +1,42 @@
# Helm Charts
This directory contains Helm charts for deploying Warehouse13.
## Current Chart (Recommended)
**Location:** `./warehouse13/`
The latest, fully-featured Helm chart with:
- Warehouse13 branding
- Configurable images for all components
- Multiple deployment scenarios (dev, production, air-gapped)
- Comprehensive documentation
- Example values files
**Usage:**
```bash
helm install warehouse13 ./warehouse13
```
**Documentation:** See [warehouse13/README.md](./warehouse13/README.md)
## Migration from Legacy Chart
If you were using an older version of the chart, migration is straightforward:
```bash
# Uninstall old chart (if named "datalake" or other name)
helm uninstall <old-release-name>
# Install new chart
helm install warehouse13 ./warehouse13 --namespace warehouse13 --create-namespace
# Or upgrade in place (if compatible)
helm upgrade <old-release-name> ./warehouse13
```
Note: Check your values.yaml configuration and update image repositories, resource limits, and other settings as needed.
## Quick Start
See [../docs/HELM-DEPLOYMENT.md](../docs/HELM-DEPLOYMENT.md) for comprehensive deployment guide.

View File

@@ -1,111 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "datalake.fullname" . }}
labels:
{{- include "datalake.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "datalake.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "datalake.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "datalake.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.targetPort }}
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 5
periodSeconds: 5
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: {{ include "datalake.fullname" . }}-secrets
key: database-url
- name: STORAGE_BACKEND
value: {{ .Values.config.storageBackend | quote }}
- name: MAX_UPLOAD_SIZE
value: {{ .Values.config.maxUploadSize | quote }}
{{- if eq .Values.config.storageBackend "s3" }}
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: {{ include "datalake.fullname" . }}-secrets
key: aws-access-key-id
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ include "datalake.fullname" . }}-secrets
key: aws-secret-access-key
- name: AWS_REGION
value: {{ .Values.aws.region | quote }}
- name: S3_BUCKET_NAME
value: {{ .Values.aws.bucketName | quote }}
{{- else }}
- name: MINIO_ENDPOINT
value: "{{ include "datalake.fullname" . }}-minio:9000"
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: {{ include "datalake.fullname" . }}-secrets
key: minio-access-key
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: {{ include "datalake.fullname" . }}-secrets
key: minio-secret-key
- name: MINIO_BUCKET_NAME
value: "test-artifacts"
- name: MINIO_SECURE
value: "false"
{{- end }}
{{- with .Values.env }}
{{- toYaml . | nindent 8 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "datalake.fullname" . }}-secrets
labels:
{{- include "datalake.labels" . | nindent 4 }}
type: Opaque
stringData:
database-url: "postgresql://{{ .Values.postgresql.auth.username }}:{{ .Values.postgresql.auth.password }}@{{ include "datalake.fullname" . }}-postgresql:5432/{{ .Values.postgresql.auth.database }}"
{{- if .Values.aws.enabled }}
aws-access-key-id: {{ .Values.aws.accessKeyId | quote }}
aws-secret-access-key: {{ .Values.aws.secretAccessKey | quote }}
{{- else }}
minio-access-key: {{ .Values.minio.rootUser | quote }}
minio-secret-key: {{ .Values.minio.rootPassword | quote }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "datalake.fullname" . }}
labels:
{{- include "datalake.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "datalake.selectorLabels" . | nindent 4 }}

View File

@@ -1,111 +0,0 @@
replicaCount: 1
image:
repository: datalake
pullPolicy: IfNotPresent
tag: "latest"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
create: true
annotations: {}
name: ""
podAnnotations: {}
podSecurityContext:
fsGroup: 1000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1000
service:
type: ClusterIP
port: 8000
targetPort: 8000
ingress:
enabled: false
className: ""
annotations: {}
hosts:
- host: datalake.local
paths:
- path: /
pathType: Prefix
tls: []
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 500m
memory: 512Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 10
targetCPUUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
# Application configuration
config:
storageBackend: minio # or "s3"
maxUploadSize: 524288000 # 500MB
# PostgreSQL configuration
postgresql:
enabled: true
auth:
username: user
password: password
database: datalake
primary:
persistence:
enabled: true
size: 10Gi
# MinIO configuration (for self-hosted storage)
minio:
enabled: true
mode: standalone
rootUser: minioadmin
rootPassword: minioadmin
persistence:
enabled: true
size: 50Gi
service:
type: ClusterIP
port: 9000
consoleService:
port: 9001
# AWS S3 configuration (when using AWS)
aws:
enabled: false
accessKeyId: ""
secretAccessKey: ""
region: us-east-1
bucketName: test-artifacts
# Environment variables
env:
- name: API_HOST
value: "0.0.0.0"
- name: API_PORT
value: "8000"

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,281 @@
# Warehouse13 Architecture
## Overview
Warehouse13 uses a **unified application container** that includes both the frontend and backend in a single Docker image using a multi-stage build.
## Docker Build Strategy
### Multi-Stage Dockerfile
```dockerfile
# Stage 1: Build Angular Frontend
FROM node:24-alpine AS frontend-build
WORKDIR /frontend
COPY frontend/package*.json ./
RUN npm install
COPY frontend/ ./
RUN npm run build:prod
# Stage 2: Python Backend with Static Frontend
FROM python:3.11-alpine
WORKDIR /app
# Install Python dependencies
COPY requirements.txt .
RUN pip install -r requirements.txt
# Copy backend code
COPY app/ ./app/
# Copy built frontend from stage 1
COPY --from=frontend-build /frontend/dist/frontend/browser ./static/
# Run FastAPI server
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
```
### Benefits
1. **Simplified Deployment** - Single container to manage
2. **Reduced Resource Usage** - No separate nginx container needed
3. **Easier Scaling** - Scale one deployment instead of two
4. **Consistent Versioning** - Frontend and backend versions always match
5. **Faster Deployments** - Fewer containers to orchestrate
## Service Architecture
```
┌─────────────────────────────────────────┐
│ warehouse13-app │
│ ┌────────────────────────────────────┐ │
│ │ FastAPI Backend (Port 8000) │ │
│ │ ├── /api/* → REST API │ │
│ │ ├── /health → Health check │ │
│ │ ├── /docs → API documentation │ │
│ │ └── /* → Angular SPA │ │
│ │ │ │
│ │ Static Files: /static/ │ │
│ │ └── Angular build output │ │
│ └────────────────────────────────────┘ │
└─────────────────────────────────────────┘
├────────────────┐
↓ ↓
┌──────────┐ ┌────────────┐
│PostgreSQL│ │ MinIO │
│(Metadata)│ │ (Blobs) │
└──────────┘ └────────────┘
```
## Helm Chart Structure
### Single Application Deployment
The Helm chart creates:
1. **1 Deployment**: `warehouse13-app`
- Runs the unified container
- Configurable replicas (default: 2)
- Health checks on `/health` endpoint
2. **1 Service**: `warehouse13-app`
- Exposes port 8000
- Routes all traffic to the application
3. **Optional Ingress**
- All paths route to `warehouse13-app:8000`
- FastAPI handles routing internally
### Kubernetes Resources
```yaml
# warehouse13-app Deployment
- Replicas: 2 (configurable)
- Port: 8000
- Health checks: /health
- Environment: DATABASE_URL, MINIO_* vars
# warehouse13-app Service
- Type: ClusterIP
- Port: 8000 → 8000
# Ingress (optional)
- Path: / → warehouse13-app:8000
```
## Configuration
### Image Configuration
In `values.yaml`:
```yaml
app:
enabled: true
image:
repository: warehouse13/app # Single unified image
tag: latest
pullPolicy: IfNotPresent
replicas: 2
resources:
requests:
memory: "384Mi" # Combined frontend + backend
cpu: "350m"
limits:
memory: "768Mi"
cpu: "750m"
```
### Accessing the Application
**Via Port Forward:**
```bash
kubectl port-forward svc/warehouse13-app 8000:8000
```
Then access:
- Frontend: http://localhost:8000
- API: http://localhost:8000/api
- API Docs: http://localhost:8000/docs
- Health: http://localhost:8000/health
**Via Ingress:**
```yaml
ingress:
enabled: true
hosts:
- host: warehouse13.example.com
paths:
- path: /
pathType: Prefix
backend: app # All traffic to one service
```
## Migration from Separate Services
If you previously had separate `api` and `frontend` deployments:
### Before (Old Architecture)
```yaml
# values.yaml (old)
api:
image: warehouse13/api
replicas: 2
frontend:
image: warehouse13/frontend
replicas: 2
# Two deployments, two services
```
### After (Current Architecture)
```yaml
# values.yaml (current)
app:
image: warehouse13/app # Unified image
replicas: 2
# One deployment, one service
```
### Migration Steps
1. **Update values.yaml** - Change from `api`/`frontend` to `app`
2. **Update image references** - Use `warehouse13/app` instead of separate images
3. **Update ingress** - Point all paths to `app` backend
4. **Deploy** - Helm will handle the transition
5. **Verify** - Check that both frontend and API work through single service
## Development Workflow
### Building the Image
```bash
# Build unified image
docker build -t warehouse13/app:dev .
# Or for air-gapped environments with custom registry
docker build \
--build-arg NPM_REGISTRY=https://registry.npmjs.org/ \
-t warehouse13/app:v1.0.0 .
```
### Testing Locally
```bash
docker run -p 8000:8000 \
-e DATABASE_URL=postgresql://user:pass@host/db \
-e MINIO_ENDPOINT=minio:9000 \
warehouse13/app:dev
```
Access:
- Frontend: http://localhost:8000
- API: http://localhost:8000/docs
## Performance Considerations
### Resource Allocation
The unified container combines both frontend serving and API processing:
- **Memory**: Angular assets (~50MB) + Python runtime (~100MB) + working memory
- **CPU**: Primarily used for API requests; static file serving is lightweight
- **Recommended Minimum**: 384Mi memory, 350m CPU
- **Production**: 768Mi memory, 750m CPU per replica
### Scaling Strategy
Scale horizontally by increasing replicas:
```bash
# Scale to 5 replicas
kubectl scale deployment warehouse13-app --replicas=5
# Or via Helm
helm upgrade warehouse13 ./helm/warehouse13 --set app.replicas=5
```
### Caching
FastAPI serves static files efficiently with:
- ETag support
- Browser caching headers
- Gzip compression (if enabled in FastAPI config)
## Troubleshooting
### Frontend Not Loading
```bash
# Check if static files exist in container
kubectl exec -it warehouse13-app-xxx -- ls -la /app/static/
# Should see: index.html, *.js, *.css files
```
### API Not Working
```bash
# Check API health
kubectl exec -it warehouse13-app-xxx -- curl http://localhost:8000/health
# Check logs
kubectl logs warehouse13-app-xxx -f
```
### Both Frontend and API Issues
```bash
# Check if app is running
kubectl get pods -l app.kubernetes.io/component=app
# Check service
kubectl get svc warehouse13-app
# Test connectivity
kubectl port-forward svc/warehouse13-app 8000:8000
curl http://localhost:8000/health
```
## Summary
The unified architecture simplifies deployment and operations while maintaining the same functionality. All routing, caching, and API requests are handled by a single FastAPI application that serves both the Angular SPA and the REST API endpoints.

View File

@@ -0,0 +1,16 @@
apiVersion: v2
name: warehouse13
description: Warehouse13 - Enterprise Test Artifact Storage
type: application
version: 1.0.0
appVersion: "1.0.0"
keywords:
- testing
- artifacts
- storage
- datalake
maintainers:
- name: Warehouse13 Team
home: https://github.com/yourusername/warehouse13
sources:
- https://github.com/yourusername/warehouse13

View File

@@ -0,0 +1,148 @@
# Warehouse13 Helm Chart - Quick Start
## 5-Minute Deployment
### Prerequisites Check
```bash
# Verify Kubernetes cluster access
kubectl cluster-info
# Verify Helm is installed
helm version
# Create namespace
kubectl create namespace warehouse13
```
### Deploy with Defaults
```bash
# Install chart
helm install warehouse13 ./helm/warehouse13 --namespace warehouse13
# Wait for ready
kubectl wait --for=condition=ready pod --all -n warehouse13 --timeout=5m
```
### Access Application
```bash
# In separate terminals, run:
# Terminal 1: Frontend
kubectl port-forward -n warehouse13 svc/warehouse13-frontend 4200:80
# Terminal 2: API
kubectl port-forward -n warehouse13 svc/warehouse13-api 8000:8000
# Terminal 3: MinIO Console
kubectl port-forward -n warehouse13 svc/warehouse13-minio 9001:9001
```
Then open in browser:
- **Frontend:** http://localhost:4200
- **API Docs:** http://localhost:8000/docs
- **MinIO Console:** http://localhost:9001
- Username: `minioadmin`
- Password: `minioadmin`
## Common Scenarios
### 1. Development (No Persistence)
```bash
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--values ./helm/warehouse13/values-dev.yaml
```
### 2. Production (With Ingress)
```bash
# Update values-production.yaml with your settings first
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--values ./helm/warehouse13/values-production.yaml
```
### 3. Air-Gapped (Custom Registry)
```bash
# Update values-airgapped.yaml with your registry first
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--values ./helm/warehouse13/values-airgapped.yaml
```
### 4. Custom Image Repository
```bash
helm install warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--set postgres.image.repository=myregistry.com/postgres \
--set minio.image.repository=myregistry.com/minio \
--set api.image.repository=myregistry.com/warehouse13-api \
--set frontend.image.repository=myregistry.com/warehouse13-frontend
```
## Verify Deployment
```bash
# Check pods
kubectl get pods -n warehouse13
# Check services
kubectl get svc -n warehouse13
# View logs
kubectl logs -n warehouse13 -l app.kubernetes.io/component=api --tail=50
# Check resource usage
kubectl top pods -n warehouse13
```
## Cleanup
```bash
# Uninstall release
helm uninstall warehouse13 -n warehouse13
# Delete PVCs (data will be lost!)
kubectl delete pvc -n warehouse13 --all
# Delete namespace
kubectl delete namespace warehouse13
```
## Next Steps
- **Full Documentation:** [README.md](./README.md)
- **Deployment Guide:** [../../docs/HELM-DEPLOYMENT.md](../../docs/HELM-DEPLOYMENT.md)
- **Configuration Options:** [values.yaml](./values.yaml)
- **Example Configs:** [values-dev.yaml](./values-dev.yaml), [values-production.yaml](./values-production.yaml), [values-airgapped.yaml](./values-airgapped.yaml)
## Troubleshooting
### Pods stuck in Pending
```bash
kubectl describe pod <pod-name> -n warehouse13
# Check: PVC status, node resources, storage classes
```
### Image pull errors
```bash
kubectl describe pod <pod-name> -n warehouse13
# Check: Image repository, credentials, network access
```
### Database connection errors
```bash
kubectl logs -n warehouse13 warehouse13-postgres-0
kubectl get secret -n warehouse13 warehouse13-secrets -o yaml
```
## Support
- GitHub Issues: https://github.com/yourusername/warehouse13/issues
- Documentation: https://warehouse13.example.com/docs

441
helm/warehouse13/README.md Normal file
View File

@@ -0,0 +1,441 @@
# Warehouse13 Helm Chart
Enterprise Test Artifact Storage - Kubernetes deployment via Helm
## Overview
This Helm chart deploys the complete Warehouse13 stack on Kubernetes:
- **PostgreSQL 15** - Metadata database
- **MinIO** - S3-compatible object storage
- **FastAPI Backend** - REST API server
- **Angular Frontend** - Web UI (nginx-served)
## Prerequisites
- Kubernetes 1.19+
- Helm 3.0+
- PV provisioner support (for persistent storage)
## Installation
### Quick Start
```bash
# Add the Warehouse13 chart repository (if published)
helm repo add warehouse13 https://charts.warehouse13.example.com
helm repo update
# Install with default values
helm install my-warehouse13 warehouse13/warehouse13
# Or install from local chart
helm install my-warehouse13 ./helm/warehouse13
```
### Custom Installation
```bash
# Install with custom values
helm install my-warehouse13 ./helm/warehouse13 \
--set postgres.persistence.size=20Gi \
--set minio.persistence.size=100Gi \
--set api.replicas=3
# Install in a specific namespace
helm install my-warehouse13 ./helm/warehouse13 \
--namespace warehouse13 \
--create-namespace
```
## Configuration
### Configurable Images
All component images can be customized via values.yaml or command-line flags:
```yaml
postgres:
image:
repository: postgres
tag: 15-alpine
pullPolicy: IfNotPresent
minio:
image:
repository: minio/minio
tag: latest
pullPolicy: IfNotPresent
api:
image:
repository: warehouse13/api
tag: latest
pullPolicy: IfNotPresent
frontend:
image:
repository: warehouse13/frontend
tag: latest
pullPolicy: IfNotPresent
```
**Example: Using custom image registry**
```bash
helm install my-warehouse13 ./helm/warehouse13 \
--set postgres.image.repository=myregistry.example.com/postgres \
--set minio.image.repository=myregistry.example.com/minio \
--set api.image.repository=myregistry.example.com/warehouse13-api \
--set frontend.image.repository=myregistry.example.com/warehouse13-frontend
```
**Example: Air-gapped deployment with specific tags**
```bash
helm install my-warehouse13 ./helm/warehouse13 \
--set postgres.image.repository=harbor.internal/library/postgres \
--set postgres.image.tag=15-alpine \
--set minio.image.repository=harbor.internal/library/minio \
--set minio.image.tag=RELEASE.2024-01-01T00-00-00Z \
--set api.image.repository=harbor.internal/warehouse13/api \
--set api.image.tag=v1.0.0 \
--set frontend.image.repository=harbor.internal/warehouse13/frontend \
--set frontend.image.tag=v1.0.0
```
### Key Parameters
| Parameter | Description | Default |
|-----------|-------------|---------|
| `global.deploymentMode` | Deployment mode (standard/airgapped) | `standard` |
| `global.storageBackend` | Storage backend (minio/s3) | `minio` |
| `postgres.persistence.enabled` | Enable PostgreSQL persistence | `true` |
| `postgres.persistence.size` | PostgreSQL PVC size | `10Gi` |
| `postgres.auth.username` | PostgreSQL username | `user` |
| `postgres.auth.password` | PostgreSQL password | `password` |
| `minio.persistence.enabled` | Enable MinIO persistence | `true` |
| `minio.persistence.size` | MinIO PVC size | `50Gi` |
| `minio.auth.rootUser` | MinIO root username | `minioadmin` |
| `minio.auth.rootPassword` | MinIO root password | `minioadmin` |
| `api.replicas` | Number of API replicas | `2` |
| `frontend.replicas` | Number of frontend replicas | `2` |
| `ingress.enabled` | Enable ingress | `false` |
| `ingress.className` | Ingress class name | `nginx` |
| `ingress.hosts` | Ingress hosts configuration | See values.yaml |
### Example Configurations
#### Production with Ingress
```yaml
# values-production.yaml
global:
deploymentMode: "standard"
storageBackend: "minio"
postgres:
persistence:
size: 50Gi
storageClass: "fast-ssd"
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
minio:
persistence:
size: 500Gi
storageClass: "bulk-storage"
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
api:
replicas: 3
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
frontend:
replicas: 3
ingress:
enabled: true
className: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:
- host: warehouse13.example.com
paths:
- path: /
pathType: Prefix
backend: frontend
- path: /api
pathType: Prefix
backend: api
tls:
- secretName: warehouse13-tls
hosts:
- warehouse13.example.com
```
```bash
helm install my-warehouse13 ./helm/warehouse13 -f values-production.yaml
```
#### Air-Gapped Environment
```yaml
# values-airgapped.yaml
global:
deploymentMode: "airgapped"
storageBackend: "minio"
postgres:
image:
repository: harbor.internal.example.com/library/postgres
tag: 15-alpine
pullPolicy: IfNotPresent
minio:
image:
repository: harbor.internal.example.com/library/minio
tag: RELEASE.2024-01-01T00-00-00Z
pullPolicy: IfNotPresent
api:
image:
repository: harbor.internal.example.com/warehouse13/api
tag: v1.0.0
pullPolicy: IfNotPresent
frontend:
image:
repository: harbor.internal.example.com/warehouse13/frontend
tag: v1.0.0
pullPolicy: IfNotPresent
```
```bash
helm install my-warehouse13 ./helm/warehouse13 -f values-airgapped.yaml
```
#### Development/Testing
```yaml
# values-dev.yaml
global:
deploymentMode: "standard"
postgres:
persistence:
enabled: false # Use emptyDir for quick testing
resources:
requests:
memory: "128Mi"
cpu: "100m"
minio:
persistence:
enabled: false
resources:
requests:
memory: "256Mi"
cpu: "100m"
api:
replicas: 1
image:
tag: dev
frontend:
replicas: 1
image:
tag: dev
```
```bash
helm install my-warehouse13 ./helm/warehouse13 -f values-dev.yaml
```
## Accessing the Application
### Port Forwarding (Development)
```bash
# Access frontend
kubectl port-forward svc/warehouse13-frontend 4200:80
# Access API
kubectl port-forward svc/warehouse13-api 8000:8000
# Access MinIO console
kubectl port-forward svc/warehouse13-minio 9001:9001
# Then visit:
# - Frontend: http://localhost:4200
# - API: http://localhost:8000
# - MinIO Console: http://localhost:9001
```
### Via Ingress (Production)
If ingress is enabled:
```
https://warehouse13.example.com
```
## Upgrading
```bash
# Upgrade with new values
helm upgrade my-warehouse13 ./helm/warehouse13 \
--set api.image.tag=v2.0.0 \
--set frontend.image.tag=v2.0.0
# Upgrade with values file
helm upgrade my-warehouse13 ./helm/warehouse13 -f values-production.yaml
# Upgrade and wait for completion
helm upgrade my-warehouse13 ./helm/warehouse13 --wait --timeout 10m
```
## Uninstalling
```bash
# Uninstall the release
helm uninstall my-warehouse13
# Note: PVCs are not deleted automatically. To delete them:
kubectl delete pvc -l app.kubernetes.io/instance=my-warehouse13
```
## Backup and Restore
### PostgreSQL Backup
```bash
# Create backup
kubectl exec -it warehouse13-postgres-0 -- pg_dump -U user warehouse13 > backup.sql
# Restore
kubectl exec -i warehouse13-postgres-0 -- psql -U user warehouse13 < backup.sql
```
### MinIO Backup
```bash
# Install mc (MinIO Client)
# Configure mc alias
mc alias set w13 http://localhost:9001 minioadmin minioadmin
# Mirror bucket
mc mirror w13/artifacts ./backup/artifacts
# Restore
mc mirror ./backup/artifacts w13/artifacts
```
## Troubleshooting
### Check Pod Status
```bash
kubectl get pods -l app.kubernetes.io/name=warehouse13
```
### View Logs
```bash
# API logs
kubectl logs -l app.kubernetes.io/component=api -f
# Frontend logs
kubectl logs -l app.kubernetes.io/component=frontend -f
# PostgreSQL logs
kubectl logs warehouse13-postgres-0 -f
# MinIO logs
kubectl logs warehouse13-minio-0 -f
```
### Check Services
```bash
kubectl get svc -l app.kubernetes.io/name=warehouse13
```
### Common Issues
**Pods stuck in Pending**
- Check PVC status: `kubectl get pvc`
- Verify storage class exists: `kubectl get storageclass`
- Check node resources: `kubectl describe nodes`
**Database connection errors**
- Verify postgres pod is running: `kubectl get pod warehouse13-postgres-0`
- Check database logs: `kubectl logs warehouse13-postgres-0`
- Verify secret exists: `kubectl get secret warehouse13-secrets`
**Frontend cannot reach API**
- Check ingress configuration: `kubectl describe ingress warehouse13-ingress`
- Verify API service: `kubectl get svc warehouse13-api`
- Check API pod health: `kubectl get pods -l app.kubernetes.io/component=api`
## Security Considerations
### Secrets Management
**Default credentials are for development only!** In production:
1. **Use external secrets management:**
```yaml
# Use sealed-secrets, external-secrets, or similar
postgres:
auth:
username: "{{ .Values.externalSecrets.postgresUser }}"
password: "{{ .Values.externalSecrets.postgresPassword }}"
```
2. **Or create secrets manually:**
```bash
kubectl create secret generic warehouse13-secrets \
--from-literal=postgres-username=secure-user \
--from-literal=postgres-password=secure-password \
--from-literal=minio-root-user=secure-minio-user \
--from-literal=minio-root-password=secure-minio-password
# Then install without default secrets
helm install my-warehouse13 ./helm/warehouse13 --set createSecrets=false
```
3. **Enable TLS:**
```yaml
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
tls:
- secretName: warehouse13-tls
hosts:
- warehouse13.example.com
```
## Support
For issues and questions:
- GitHub Issues: https://github.com/yourusername/warehouse13/issues
- Documentation: https://warehouse13.example.com/docs

View File

@@ -0,0 +1,131 @@
_ _ _ _ _____
| | | | | | / |___ /
| | | | __ _ _ __ ___| |__ ___ _ _ ___ / / |_ \
| |/\| |/ _` | '__/ _ \ '_ \ / _ \| | | / __|/ / ___) |
\ /\ / (_| | | | __/ | | | (_) | |_| \__ \_/ |____/
\/ \/ \__,_|_| \___|_| |_|\___/ \__,_|___(_)
Enterprise Test Artifact Storage has been deployed!
Chart Name: {{ .Chart.Name }}
Chart Version: {{ .Chart.Version }}
App Version: {{ .Chart.AppVersion }}
Release Name: {{ .Release.Name }}
Namespace: {{ .Release.Namespace }}
---
DEPLOYMENT INFORMATION:
{{- if .Values.app.enabled }}
Application (Unified API + Frontend):
Service: warehouse13-app
Replicas: {{ .Values.app.replicas }}
Image: {{ .Values.app.image.repository }}:{{ .Values.app.image.tag }}
Port: {{ .Values.app.service.port }}
Note: Multi-stage build includes both Angular frontend and FastAPI backend
{{- end }}
{{- if .Values.postgres.enabled }}
PostgreSQL:
Service: warehouse13-postgres
Image: {{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }}
Persistence: {{ if .Values.postgres.persistence.enabled }}Enabled ({{ .Values.postgres.persistence.size }}){{ else }}Disabled (emptyDir){{ end }}
{{- end }}
{{- if .Values.minio.enabled }}
MinIO:
Service: warehouse13-minio
Image: {{ .Values.minio.image.repository }}:{{ .Values.minio.image.tag }}
Persistence: {{ if .Values.minio.persistence.enabled }}Enabled ({{ .Values.minio.persistence.size }}){{ else }}Disabled (emptyDir){{ end }}
{{- end }}
---
ACCESSING YOUR APPLICATION:
{{- if .Values.ingress.enabled }}
1. Via Ingress:
{{- range .Values.ingress.hosts }}
https://{{ .host }}
{{- end }}
{{- else }}
1. Using Port Forwarding:
# Application (Frontend + API)
kubectl port-forward -n {{ .Release.Namespace }} svc/warehouse13-app 8000:8000
Then visit:
- Frontend: http://localhost:8000
- API Docs: http://localhost:8000/docs
- Health: http://localhost:8000/health
# MinIO Console
kubectl port-forward -n {{ .Release.Namespace }} svc/warehouse13-minio 9001:9001
Then visit: http://localhost:9001
Username: {{ .Values.minio.auth.rootUser }}
Password: {{ .Values.minio.auth.rootPassword }}
2. Expose via LoadBalancer or Ingress for external access.
{{- end }}
---
CHECKING STATUS:
# View all pods
kubectl get pods -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
# Check services
kubectl get svc -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
# View logs
kubectl logs -n {{ .Release.Namespace }} -l app.kubernetes.io/component=app -f
---
UPGRADING:
helm upgrade {{ .Release.Name }} warehouse13/warehouse13 \
--namespace {{ .Release.Namespace }}
---
UNINSTALLING:
helm uninstall {{ .Release.Name }} --namespace {{ .Release.Namespace }}
# Note: PVCs are retained. To delete them:
kubectl delete pvc -n {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }}
---
{{- if not .Values.ingress.enabled }}
⚠️ IMPORTANT: Ingress is disabled. Enable it for production use:
--set ingress.enabled=true
{{- end }}
{{- if eq .Values.postgres.auth.password "password" }}
⚠️ WARNING: Using default PostgreSQL password!
For production, set a secure password:
--set postgres.auth.password=YOUR_SECURE_PASSWORD
{{- end }}
{{- if eq .Values.minio.auth.rootPassword "minioadmin" }}
⚠️ WARNING: Using default MinIO password!
For production, set a secure password:
--set minio.auth.rootPassword=YOUR_SECURE_PASSWORD
{{- end }}
---
For more information, visit:
Documentation: https://github.com/yourusername/warehouse13
Issues: https://github.com/yourusername/warehouse13/issues
Thank you for using Warehouse13!

View File

@@ -1,14 +1,14 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "datalake.name" -}}
{{- define "warehouse13.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
*/}}
{{- define "datalake.fullname" -}}
{{- define "warehouse13.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
@@ -24,16 +24,16 @@ Create a default fully qualified app name.
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "datalake.chart" -}}
{{- define "warehouse13.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "datalake.labels" -}}
helm.sh/chart: {{ include "datalake.chart" . }}
{{ include "datalake.selectorLabels" . }}
{{- define "warehouse13.labels" -}}
helm.sh/chart: {{ include "warehouse13.chart" . }}
{{ include "warehouse13.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
@@ -43,18 +43,29 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
{{/*
Selector labels
*/}}
{{- define "datalake.selectorLabels" -}}
app.kubernetes.io/name: {{ include "datalake.name" . }}
{{- define "warehouse13.selectorLabels" -}}
app.kubernetes.io/name: {{ include "warehouse13.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "datalake.serviceAccountName" -}}
{{- define "warehouse13.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "datalake.fullname" .) .Values.serviceAccount.name }}
{{- default (include "warehouse13.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
PostgreSQL connection string
*/}}
{{- define "warehouse13.postgresUrl" -}}
{{- if .Values.app.env.databaseUrl }}
{{- .Values.app.env.databaseUrl }}
{{- else }}
{{- printf "postgresql://%s:%s@warehouse13-postgres:%d/%s" .Values.postgres.auth.username .Values.postgres.auth.password (.Values.postgres.service.port | int) .Values.postgres.auth.database }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,99 @@
{{- if .Values.app.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: warehouse13-app
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
app.kubernetes.io/component: app
spec:
replicas: {{ .Values.app.replicas }}
selector:
matchLabels:
{{- include "warehouse13.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: app
template:
metadata:
labels:
{{- include "warehouse13.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: app
spec:
imagePullSecrets:
- name: gitlab-dev-ns-registry-secret
serviceAccountName: {{ include "warehouse13.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: app
securityContext:
{{- toYaml .Values.securityContext | nindent 10 }}
image: "{{ .Values.app.image.repository }}:{{ .Values.app.image.tag }}"
imagePullPolicy: {{ .Values.app.image.pullPolicy }}
ports:
- name: http
containerPort: 8000
protocol: TCP
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: database-url
- name: STORAGE_BACKEND
valueFrom:
configMapKeyRef:
name: warehouse13-config
key: STORAGE_BACKEND
- name: MINIO_ENDPOINT
valueFrom:
configMapKeyRef:
name: warehouse13-config
key: MINIO_ENDPOINT
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: minio-root-user
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: minio-root-password
- name: MINIO_BUCKET_NAME
value: "test-artifacts"
- name: MINIO_SECURE
value: "false"
- name: DEPLOYMENT_MODE
valueFrom:
configMapKeyRef:
name: warehouse13-config
key: DEPLOYMENT_MODE
resources:
{{- toYaml .Values.app.resources | nindent 10 }}
{{- if .Values.app.healthCheck.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.app.healthCheck.liveness.path }}
port: http
initialDelaySeconds: {{ .Values.app.healthCheck.liveness.initialDelaySeconds }}
periodSeconds: {{ .Values.app.healthCheck.liveness.periodSeconds }}
readinessProbe:
httpGet:
path: {{ .Values.app.healthCheck.readiness.path }}
port: http
initialDelaySeconds: {{ .Values.app.healthCheck.readiness.initialDelaySeconds }}
periodSeconds: {{ .Values.app.healthCheck.readiness.periodSeconds }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if .Values.app.enabled }}
apiVersion: v1
kind: Service
metadata:
name: warehouse13-app
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
app.kubernetes.io/component: app
spec:
type: {{ .Values.app.service.type }}
ports:
- port: {{ .Values.app.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "warehouse13.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: app
{{- end }}

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: warehouse13-config
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
data:
DEPLOYMENT_MODE: {{ .Values.global.deploymentMode | quote }}
STORAGE_BACKEND: {{ .Values.global.storageBackend | quote }}
MINIO_ENDPOINT: {{ printf "warehouse13-minio:%d" (.Values.minio.service.apiPort | int) | quote }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.imagePullSecret.name }}
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: {{
(printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"%s\",\"auth\":\"%s\"}}}"
.Values.imagePullSecret.server
.Values.imagePullSecret.username
.Values.imagePullSecret.password
.Values.imagePullSecret.email
(printf "%s" (b64enc (printf "%s:%s" .Values.imagePullSecret.username .Values.imagePullSecret.password))))
| b64enc | quote
}}

View File

@@ -2,9 +2,9 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "datalake.fullname" . }}
name: warehouse13-ingress
labels:
{{- include "datalake.labels" . | nindent 4 }}
{{- include "warehouse13.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
@@ -33,9 +33,9 @@ spec:
pathType: {{ .pathType }}
backend:
service:
name: {{ include "datalake.fullname" $ }}
name: {{ printf "warehouse13-%s" .backend }}
port:
number: {{ $.Values.service.port }}
number: {{ $.Values.app.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,23 @@
{{- if .Values.minio.enabled }}
apiVersion: v1
kind: Service
metadata:
name: warehouse13-minio
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
app.kubernetes.io/component: storage
spec:
type: {{ .Values.minio.service.type }}
ports:
- port: {{ .Values.minio.service.apiPort }}
targetPort: api
protocol: TCP
name: api
- port: {{ .Values.minio.service.consolePort }}
targetPort: console
protocol: TCP
name: console
selector:
{{- include "warehouse13.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: storage
{{- end }}

View File

@@ -0,0 +1,87 @@
{{- if .Values.minio.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: warehouse13-minio
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
app.kubernetes.io/component: storage
spec:
serviceName: warehouse13-minio
replicas: 1
selector:
matchLabels:
{{- include "warehouse13.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: storage
template:
metadata:
labels:
{{- include "warehouse13.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: storage
spec:
serviceAccountName: {{ include "warehouse13.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: minio
image: "{{ .Values.minio.image.repository }}:{{ .Values.minio.image.tag }}"
imagePullPolicy: {{ .Values.minio.image.pullPolicy }}
command:
- minio
- server
- /data
- --console-address
- ":9001"
ports:
- name: api
containerPort: 9000
protocol: TCP
- name: console
containerPort: 9001
protocol: TCP
env:
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: minio-root-user
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: minio-root-password
volumeMounts:
- name: data
mountPath: /data
resources:
{{- toYaml .Values.minio.resources | nindent 10 }}
livenessProbe:
httpGet:
path: /minio/health/live
port: api
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /minio/health/ready
port: api
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.minio.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
{{- if .Values.minio.persistence.storageClass }}
storageClassName: {{ .Values.minio.persistence.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.minio.persistence.size }}
{{- else }}
volumes:
- name: data
emptyDir: {}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if .Values.postgres.enabled }}
apiVersion: v1
kind: Service
metadata:
name: warehouse13-postgres
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
type: {{ .Values.postgres.service.type }}
ports:
- port: {{ .Values.postgres.service.port }}
targetPort: postgres
protocol: TCP
name: postgres
selector:
{{- include "warehouse13.selectorLabels" . | nindent 4 }}
app.kubernetes.io/component: database
{{- end }}

View File

@@ -0,0 +1,89 @@
{{- if .Values.postgres.enabled }}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: warehouse13-postgres
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
app.kubernetes.io/component: database
spec:
serviceName: warehouse13-postgres
replicas: 1
selector:
matchLabels:
{{- include "warehouse13.selectorLabels" . | nindent 6 }}
app.kubernetes.io/component: database
template:
metadata:
labels:
{{- include "warehouse13.selectorLabels" . | nindent 8 }}
app.kubernetes.io/component: database
spec:
serviceAccountName: {{ include "warehouse13.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: postgres
image: "{{ .Values.postgres.image.repository }}:{{ .Values.postgres.image.tag }}"
imagePullPolicy: {{ .Values.postgres.image.pullPolicy }}
ports:
- name: postgres
containerPort: 5432
protocol: TCP
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: postgres-username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: postgres-password
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: warehouse13-secrets
key: postgres-database
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
resources:
{{- toYaml .Values.postgres.resources | nindent 10 }}
livenessProbe:
exec:
command:
- pg_isready
- -U
- $(POSTGRES_USER)
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- pg_isready
- -U
- $(POSTGRES_USER)
initialDelaySeconds: 10
periodSeconds: 5
{{- if .Values.postgres.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
{{- if .Values.postgres.persistence.storageClass }}
storageClassName: {{ .Values.postgres.persistence.storageClass }}
{{- end }}
resources:
requests:
storage: {{ .Values.postgres.persistence.size }}
{{- else }}
volumes:
- name: data
emptyDir: {}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: warehouse13-secrets
labels:
{{- include "warehouse13.labels" . | nindent 4 }}
type: Opaque
stringData:
postgres-username: {{ .Values.postgres.auth.username | quote }}
postgres-password: {{ .Values.postgres.auth.password | quote }}
postgres-database: {{ .Values.postgres.auth.database | quote }}
minio-root-user: {{ .Values.minio.auth.rootUser | quote }}
minio-root-password: {{ .Values.minio.auth.rootPassword | quote }}
database-url: {{ include "warehouse13.postgresUrl" . | quote }}

View File

@@ -2,9 +2,9 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "datalake.serviceAccountName" . }}
name: {{ include "warehouse13.serviceAccountName" . }}
labels:
{{- include "datalake.labels" . | nindent 4 }}
{{- include "warehouse13.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}

View File

@@ -0,0 +1,83 @@
# Warehouse13 - Air-Gapped Deployment Example
# Use this for restricted/disconnected environments
global:
deploymentMode: "airgapped"
storageBackend: "minio"
# PostgreSQL with custom registry
postgres:
enabled: true
image:
repository: harbor.internal.example.com/library/postgres
tag: 15-alpine
pullPolicy: IfNotPresent
auth:
username: warehouse13user
password: CHANGE_ME_SECURE_PASSWORD
database: warehouse13
persistence:
enabled: true
size: 20Gi
storageClass: "local-storage"
resources:
requests:
memory: "512Mi"
cpu: "500m"
limits:
memory: "1Gi"
cpu: "1000m"
# MinIO with custom registry
minio:
enabled: true
image:
repository: harbor.internal.example.com/library/minio
tag: RELEASE.2024-01-01T00-00-00Z
pullPolicy: IfNotPresent
auth:
rootUser: CHANGE_ME_MINIO_USER
rootPassword: CHANGE_ME_MINIO_PASSWORD
persistence:
enabled: true
size: 100Gi
storageClass: "local-storage"
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
# Application with custom registry (unified API + Frontend)
app:
enabled: true
image:
repository: harbor.internal.example.com/warehouse13/app
tag: v1.0.0
pullPolicy: IfNotPresent
replicas: 2
resources:
requests:
memory: "768Mi"
cpu: "750m"
limits:
memory: "1536Mi"
cpu: "1500m"
# Ingress disabled for air-gapped - use NodePort or port-forward
ingress:
enabled: false
# Node selector for specific nodes
nodeSelector:
environment: production
storage: local
# Tolerations for tainted nodes
tolerations:
- key: "airgapped"
operator: "Equal"
value: "true"
effect: "NoSchedule"

View File

@@ -0,0 +1,69 @@
# Warehouse13 - Development/Testing Deployment Example
# Use this for local testing or CI/CD environments
global:
deploymentMode: "standard"
storageBackend: "minio"
postgres:
enabled: true
image:
repository: postgres
tag: 15-alpine
pullPolicy: IfNotPresent
auth:
username: dev
password: dev
database: warehouse13dev
persistence:
enabled: false # Use emptyDir for faster cleanup
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "250m"
minio:
enabled: true
image:
repository: minio/minio
tag: latest
pullPolicy: IfNotPresent
auth:
rootUser: minioadmin
rootPassword: minioadmin
persistence:
enabled: false # Use emptyDir for faster cleanup
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "250m"
app:
enabled: true
image:
repository: warehouse13/app
tag: dev
pullPolicy: Always # Always pull latest dev image
replicas: 1
resources:
requests:
memory: "384Mi"
cpu: "350m"
limits:
memory: "768Mi"
cpu: "750m"
healthCheck:
enabled: true
ingress:
enabled: false # Use port-forward for dev
serviceAccount:
create: true
name: "warehouse13-dev"

View File

@@ -0,0 +1,98 @@
# Warehouse13 - Production Deployment Example
# Use this for production environments with ingress and proper resources
global:
deploymentMode: "standard"
storageBackend: "minio"
postgres:
enabled: true
image:
repository: postgres
tag: 15-alpine
pullPolicy: IfNotPresent
auth:
username: warehouse13user
password: CHANGE_ME_SECURE_PASSWORD
database: warehouse13
persistence:
enabled: true
size: 50Gi
storageClass: "fast-ssd"
resources:
requests:
memory: "1Gi"
cpu: "1000m"
limits:
memory: "2Gi"
cpu: "2000m"
minio:
enabled: true
image:
repository: minio/minio
tag: latest
pullPolicy: IfNotPresent
auth:
rootUser: CHANGE_ME_MINIO_USER
rootPassword: CHANGE_ME_MINIO_PASSWORD
persistence:
enabled: true
size: 500Gi
storageClass: "bulk-storage"
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
app:
enabled: true
image:
repository: warehouse13/app
tag: v1.0.0
pullPolicy: IfNotPresent
replicas: 3
resources:
requests:
memory: "768Mi"
cpu: "750m"
limits:
memory: "1536Mi"
cpu: "1500m"
healthCheck:
enabled: true
ingress:
enabled: true
className: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
hosts:
- host: warehouse13.example.com
paths:
- path: /
pathType: Prefix
backend: app
tls:
- secretName: warehouse13-tls
hosts:
- warehouse13.example.com
# Affinity for pod distribution
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- warehouse13
topologyKey: kubernetes.io/hostname

View File

@@ -0,0 +1,148 @@
# Warehouse13 - Enterprise Test Artifact Storage
# Default values for Helm chart
# Global settings
global:
deploymentMode: "air-gapped" # standard or airgapped
storageBackend: "minio" # minio or s3
# PostgreSQL Database
postgres:
enabled: true
image:
repository: containers.global.bsf.tools/postgres
tag: 15-alpine
pullPolicy: Always
auth:
username: user
password: password
database: warehouse13
persistence:
enabled: false
size: 10Gi
storageClass: ""
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
service:
type: ClusterIP
port: 5432
# MinIO Object Storage
minio:
enabled: true
image:
repository: containers.global.bsf.tools/minio/minio
tag: latest
pullPolicy: Always
auth:
rootUser: minioadmin
rootPassword: minioadmin
persistence:
enabled: true
size: 50Gi
storageClass: ""
resources:
requests:
memory: "1Gi"
cpu: "250m"
limits:
memory: "1Gi"
cpu: "1000m"
service:
type: ClusterIP
apiPort: 9000
consolePort: 9001
# Application (Unified API + Frontend)
# The application uses a multi-stage Docker build:
# - Stage 1: Builds Angular frontend
# - Stage 2: Python FastAPI backend that serves the frontend from /static
app:
enabled: true
image:
repository: registry.global.bsf.tools/esv/bsf/bsf-services/warehouse13
tag: main-7126c618
pullPolicy: Always
replicas: 1
env:
databaseUrl: "postgresql://user:password@warehouse13-postgres:5432/warehouse13"
storageBackend: "minio"
minioEndpoint: "warehouse13-minio:9000"
resources:
requests:
memory: "768Mi"
cpu: "350m"
limits:
memory: "768Mi"
cpu: "750m"
service:
type: ClusterIP
port: 8000
healthCheck:
enabled: true
liveness:
path: /health
initialDelaySeconds: 30
periodSeconds: 10
readiness:
path: /health
initialDelaySeconds: 10
periodSeconds: 5
# Ingress
ingress:
enabled: true
className: "nginx"
annotations:
cert-manager.io/cluster-issuer: "letsencrypt"
hosts:
- host: warehouse13.common.global.bsf.tools
paths:
- path: /
pathType: Prefix
backend: app # All traffic goes to unified app (serves both API and frontend)
tls:
- secretName: warehouse13-tls
hosts:
- warehouse13.common.global.bsf.tools
# Service Account
serviceAccount:
create: true
annotations: {}
name: "warehouse13"
# Pod Security
podSecurityContext:
fsGroup: 2000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: false
runAsNonRoot: true
runAsUser: 1000
# Node selector
nodeSelector: {}
# Tolerations
tolerations: []
# Affinity
affinity: {}
imagePullSecret:
name: gitlab-dev-ns-registry-secret
username: project_9145_bot_imagepuller
# Read only token so okay if hard coded here
password: glpat-ZV7ASvBqFoiWC9QqD5WlTG86MQp1OjVxMgk.01.0z192vpfw
server: registry.global.bsf.tools
email: botemail@global.bsf.tool

View File

@@ -30,7 +30,7 @@ fi
echo "Step 1: Building Angular frontend locally..."
echo "==========================================="
./build-for-airgap.sh
./scripts/build-for-airgap.sh
echo ""
echo "Step 2: Starting Docker containers..."

146
scripts/check-package-age.js Executable file
View File

@@ -0,0 +1,146 @@
#!/usr/bin/env node
/**
* Check if npm packages are at least 2 weeks old before installation
* Usage: node scripts/check-package-age.js
*/
const https = require('https');
const fs = require('fs');
const path = require('path');
const TWO_WEEKS_MS = 14 * 24 * 60 * 60 * 1000;
function getPackageInfo(packageName, version) {
return new Promise((resolve, reject) => {
// Remove version prefixes like ^, ~, >=
let cleanVersion = version.replace(/^[\^~>=]+/, '');
// If version contains .x or wildcards, fetch all versions and find latest matching
if (cleanVersion.includes('x') || cleanVersion.includes('*')) {
const url = `https://registry.npmjs.org/${packageName}`;
https.get(url, (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
try {
const info = JSON.parse(data);
const versions = Object.keys(info.versions || {});
// Convert version pattern to regex (e.g., "19.2.x" -> /^19\.2\.\d+$/)
const pattern = cleanVersion.replace(/\./g, '\\.').replace(/x|\*/g, '\\d+');
const regex = new RegExp(`^${pattern}$`);
// Find matching versions and get the latest
const matchingVersions = versions.filter(v => regex.test(v)).sort();
const latestMatching = matchingVersions[matchingVersions.length - 1];
if (latestMatching && info.time && info.time[latestMatching]) {
resolve({
name: packageName,
version: latestMatching,
published: info.time[latestMatching],
error: null
});
} else {
reject(new Error(`No matching version found for ${packageName}@${cleanVersion}`));
}
} catch (err) {
reject(new Error(`Failed to parse response for ${packageName}@${cleanVersion}`));
}
});
}).on('error', (err) => {
reject(err);
});
} else {
// Exact version lookup
const url = `https://registry.npmjs.org/${packageName}/${cleanVersion}`;
https.get(url, (res) => {
let data = '';
res.on('data', (chunk) => {
data += chunk;
});
res.on('end', () => {
try {
const info = JSON.parse(data);
resolve({
name: packageName,
version: cleanVersion,
published: info.time || info._time,
error: null
});
} catch (err) {
reject(new Error(`Failed to parse response for ${packageName}@${cleanVersion}`));
}
});
}).on('error', (err) => {
reject(err);
});
}
});
}
async function checkPackageAge() {
const packageJsonPath = path.join(__dirname, '../frontend/package.json');
const packageJson = JSON.parse(fs.readFileSync(packageJsonPath, 'utf8'));
const allDeps = {
...packageJson.dependencies,
...packageJson.devDependencies
};
console.log('Checking package ages (must be at least 2 weeks old)...\n');
const tooNew = [];
const errors = [];
for (const [name, version] of Object.entries(allDeps)) {
try {
const info = await getPackageInfo(name, version);
const publishDate = new Date(info.published);
const age = Date.now() - publishDate.getTime();
const ageInDays = Math.floor(age / (24 * 60 * 60 * 1000));
if (age < TWO_WEEKS_MS) {
tooNew.push({
name,
version: info.version,
age: ageInDays,
published: publishDate.toISOString().split('T')[0]
});
console.log(`${name}@${info.version} - ${ageInDays} days old (published ${publishDate.toISOString().split('T')[0]})`);
} else {
console.log(`${name}@${info.version} - ${ageInDays} days old`);
}
} catch (err) {
errors.push({ name, version, error: err.message });
console.log(`⚠️ ${name}@${version} - Could not check: ${err.message}`);
}
}
console.log('\n' + '='.repeat(80));
if (tooNew.length > 0) {
console.log(`\n❌ FAILED: ${tooNew.length} package(s) are newer than 2 weeks:\n`);
tooNew.forEach(pkg => {
console.log(` - ${pkg.name}@${pkg.version} (${pkg.age} days old, published ${pkg.published})`);
});
process.exit(1);
} else if (errors.length > 0) {
console.log(`\n⚠️ WARNING: Could not verify ${errors.length} package(s)`);
process.exit(0);
} else {
console.log('\n✓ SUCCESS: All packages are at least 2 weeks old');
process.exit(0);
}
}
checkPackageAge();

View File

@@ -18,7 +18,7 @@ else
echo " Expected: frontend/dist/frontend/browser"
echo ""
echo " You need to build the Angular app first:"
echo " Run: ./build-for-airgap.sh"
echo " Run: ./scripts/build-for-airgap.sh"
echo " OR: cd frontend && npm install && npm run build:prod"
echo ""
errors=$((errors + 1))

41
scripts/pin-old-versions.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Pin npm packages to versions that are at least 2 weeks old
# This script helps ensure compliance with package age requirements
set -e
echo "========================================="
echo "Pin NPM Packages to Old Versions"
echo "========================================="
echo ""
cd frontend
echo "Step 1: Checking current package ages..."
node ../scripts/check-package-age.js || {
echo ""
echo "Some packages are too new. Recommendations:"
echo "1. Manually downgrade packages in package.json to older versions"
echo "2. Run: npm install --package-lock-only to update lock file"
echo "3. Re-run this script to verify"
exit 1
}
echo ""
echo "Step 2: Ensuring package-lock.json uses exact versions..."
if [ -f "package-lock.json" ]; then
echo "✓ package-lock.json exists"
else
echo "⚠ package-lock.json does not exist. Creating it..."
npm install --package-lock-only
fi
echo ""
echo "========================================="
echo "✓ All packages meet the 2-week age requirement"
echo "========================================="
echo ""
echo "To install these packages:"
echo " npm ci # Uses exact versions from package-lock.json"
echo ""