Kubernetes Deployment
Deploy Artifact Keeper on Kubernetes for high availability, scalability, and cloud-native infrastructure.
Prerequisites
- Kubernetes 1.24+ cluster
- kubectl configured
- 100 GB storage for artifacts
- 50 GB storage for backups
- Ingress controller (nginx, traefik, etc.)
- cert-manager (optional, for TLS certificates)
Architecture Overview
┌─────────────────────────────────────────────┐│ Ingress (HTTPS) │└──────────────┬──────────────────────────────┘ │ ┌───────┴────────┐ │ │┌──────▼──────┐ ┌──────▼──────┐│ Frontend │ │ Backend ││ Deployment │ │ Deployment ││ (2 pods) │ │ (2 pods) │└─────────────┘ └──────┬──────┘ │ ┌──────▼──────┐ │ PostgreSQL │ │ StatefulSet │ │ (1 pod) │ └─────────────┘Quick Start
1. Clone Repository
git clone https://github.com/artifact-keeper/artifact-keeper.gitcd artifact-keeper/deploy/k8s2. Create Namespace
kubectl create namespace artifact-keeper3. Configure Secrets
Create secrets.yaml:
kubectl create secret generic artifact-keeper-secrets \ --from-literal=db-password=$(openssl rand -base64 32) \ --from-literal=jwt-secret=$(openssl rand -base64 64) \ --namespace artifact-keeper4. Deploy Resources
# Using Kustomizekubectl apply -k overlays/production
# Or manuallykubectl apply -f namespace.yamlkubectl apply -f configmap.yamlkubectl apply -f secrets.yamlkubectl apply -f postgres-statefulset.yamlkubectl apply -f backend-deployment.yamlkubectl apply -f frontend-deployment.yamlkubectl apply -f ingress.yaml5. Verify Deployment
kubectl get pods -n artifact-keeperkubectl get svc -n artifact-keeperkubectl get ingress -n artifact-keeperKubernetes Manifests
Namespace
namespace.yaml:
apiVersion: v1kind: Namespacemetadata: name: artifact-keeper labels: name: artifact-keeperConfigMap
configmap.yaml:
apiVersion: v1kind: ConfigMapmetadata: name: artifact-keeper-config namespace: artifact-keeperdata: STORAGE_BACKEND: "filesystem" STORAGE_PATH: "/data/artifacts" RUST_LOG: "info" PORT: "8080" BACKUP_ENABLED: "true" BACKUP_SCHEDULE: "0 2 * * *" BACKUP_PATH: "/data/backups"Secrets
secrets.yaml:
apiVersion: v1kind: Secretmetadata: name: artifact-keeper-secrets namespace: artifact-keepertype: OpaquestringData: db-password: "CHANGE-ME" jwt-secret: "CHANGE-ME" # For S3 storage (optional) s3-access-key: "" s3-secret-key: ""PostgreSQL StatefulSet
postgres-statefulset.yaml:
apiVersion: v1kind: Servicemetadata: name: postgres namespace: artifact-keeperspec: selector: app: postgres ports: - port: 5432 targetPort: 5432 clusterIP: None
---apiVersion: apps/v1kind: StatefulSetmetadata: name: postgres namespace: artifact-keeperspec: serviceName: postgres replicas: 1 selector: matchLabels: app: postgres template: metadata: labels: app: postgres spec: containers: - name: postgres image: postgres:15-alpine ports: - containerPort: 5432 env: - name: POSTGRES_DB value: artifact_registry - name: POSTGRES_USER value: registry - name: POSTGRES_PASSWORD valueFrom: secretKeyRef: name: artifact-keeper-secrets key: db-password volumeMounts: - name: postgres-data mountPath: /var/lib/postgresql/data resources: requests: memory: "1Gi" cpu: "500m" limits: memory: "2Gi" cpu: "1000m" livenessProbe: exec: command: - pg_isready - -U - registry initialDelaySeconds: 30 periodSeconds: 10 readinessProbe: exec: command: - pg_isready - -U - registry initialDelaySeconds: 5 periodSeconds: 5 volumeClaimTemplates: - metadata: name: postgres-data spec: accessModes: ["ReadWriteOnce"] storageClassName: standard resources: requests: storage: 50GiBackend Deployment
backend-deployment.yaml:
apiVersion: v1kind: Servicemetadata: name: backend namespace: artifact-keeperspec: selector: app: backend ports: - port: 8080 targetPort: 8080 type: ClusterIP
---apiVersion: apps/v1kind: Deploymentmetadata: name: backend namespace: artifact-keeperspec: replicas: 2 selector: matchLabels: app: backend template: metadata: labels: app: backend spec: containers: - name: backend image: artifact-keeper/backend:latest ports: - containerPort: 8080 env: - name: DATABASE_URL value: postgres://registry:$(DB_PASSWORD)@postgres:5432/artifact_registry - name: DB_PASSWORD valueFrom: secretKeyRef: name: artifact-keeper-secrets key: db-password - name: JWT_SECRET valueFrom: secretKeyRef: name: artifact-keeper-secrets key: jwt-secret envFrom: - configMapRef: name: artifact-keeper-config volumeMounts: - name: artifact-storage mountPath: /data/artifacts - name: backup-storage mountPath: /data/backups resources: requests: memory: "2Gi" cpu: "1000m" limits: memory: "4Gi" cpu: "2000m" livenessProbe: httpGet: path: /api/v1/health port: 8080 initialDelaySeconds: 60 periodSeconds: 10 readinessProbe: httpGet: path: /api/v1/health port: 8080 initialDelaySeconds: 10 periodSeconds: 5 volumes: - name: artifact-storage persistentVolumeClaim: claimName: artifact-storage-pvc - name: backup-storage persistentVolumeClaim: claimName: backup-storage-pvcFrontend Deployment
frontend-deployment.yaml:
apiVersion: v1kind: Servicemetadata: name: frontend namespace: artifact-keeperspec: selector: app: frontend ports: - port: 80 targetPort: 80 type: ClusterIP
---apiVersion: apps/v1kind: Deploymentmetadata: name: frontend namespace: artifact-keeperspec: replicas: 2 selector: matchLabels: app: frontend template: metadata: labels: app: frontend spec: containers: - name: frontend image: artifact-keeper/frontend:latest ports: - containerPort: 80 env: - name: REACT_APP_API_URL value: /api resources: requests: memory: "256Mi" cpu: "100m" limits: memory: "512Mi" cpu: "500m" livenessProbe: httpGet: path: / port: 80 initialDelaySeconds: 10 periodSeconds: 10 readinessProbe: httpGet: path: / port: 80 initialDelaySeconds: 5 periodSeconds: 5Persistent Volume Claims
pvc.yaml:
apiVersion: v1kind: PersistentVolumeClaimmetadata: name: artifact-storage-pvc namespace: artifact-keeperspec: accessModes: - ReadWriteMany storageClassName: standard resources: requests: storage: 100Gi
---apiVersion: v1kind: PersistentVolumeClaimmetadata: name: backup-storage-pvc namespace: artifact-keeperspec: accessModes: - ReadWriteMany storageClassName: standard resources: requests: storage: 50GiIngress
ingress.yaml:
apiVersion: networking.k8s.io/v1kind: Ingressmetadata: name: artifact-keeper namespace: artifact-keeper annotations: cert-manager.io/cluster-issuer: letsencrypt-prod nginx.ingress.kubernetes.io/proxy-body-size: "10g" nginx.ingress.kubernetes.io/proxy-read-timeout: "600" nginx.ingress.kubernetes.io/proxy-send-timeout: "600"spec: ingressClassName: nginx tls: - hosts: - registry.example.com secretName: artifact-keeper-tls rules: - host: registry.example.com http: paths: - path: /api pathType: Prefix backend: service: name: backend port: number: 8080 - path: / pathType: Prefix backend: service: name: frontend port: number: 80Kustomize Structure
Use Kustomize for environment-specific configurations:
deploy/k8s/├── base/│ ├── kustomization.yaml│ ├── namespace.yaml│ ├── configmap.yaml│ ├── postgres-statefulset.yaml│ ├── backend-deployment.yaml│ ├── frontend-deployment.yaml│ └── pvc.yaml└── overlays/ ├── development/ │ ├── kustomization.yaml │ └── patches/ ├── staging/ │ ├── kustomization.yaml │ └── patches/ └── production/ ├── kustomization.yaml ├── ingress.yaml └── patches/ ├── replica-count.yaml └── resource-limits.yamloverlays/production/kustomization.yaml:
apiVersion: kustomize.config.k8s.io/v1beta1kind: Kustomization
namespace: artifact-keeper
resources: - ../../base - ingress.yaml
patches: - path: patches/replica-count.yaml - path: patches/resource-limits.yaml
configMapGenerator: - name: artifact-keeper-config behavior: merge literals: - RUST_LOG=info - BACKUP_ENABLED=true
secretGenerator: - name: artifact-keeper-secrets files: - db-password=secrets/db-password.txt - jwt-secret=secrets/jwt-secret.txtHigh Availability
Multi-Zone Deployment
Distribute pods across availability zones:
spec: template: spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: app operator: In values: - backend topologyKey: topology.kubernetes.io/zoneHorizontal Pod Autoscaler
Auto-scale based on CPU/memory:
apiVersion: autoscaling/v2kind: HorizontalPodAutoscalermetadata: name: backend-hpa namespace: artifact-keeperspec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: backend minReplicas: 2 maxReplicas: 10 metrics: - type: Resource resource: name: cpu target: type: Utilization averageUtilization: 70 - type: Resource resource: name: memory target: type: Utilization averageUtilization: 80Storage Options
AWS EBS
apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: fast-ssdprovisioner: ebs.csi.aws.comparameters: type: gp3 iops: "3000" throughput: "125"volumeBindingMode: WaitForFirstConsumerGCP Persistent Disk
apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: fast-ssdprovisioner: pd.csi.storage.gke.ioparameters: type: pd-ssd replication-type: regional-pdvolumeBindingMode: WaitForFirstConsumerAzure Disk
apiVersion: storage.k8s.io/v1kind: StorageClassmetadata: name: fast-ssdprovisioner: disk.csi.azure.comparameters: skuName: Premium_LRSvolumeBindingMode: WaitForFirstConsumerMonitoring
Prometheus ServiceMonitor
apiVersion: monitoring.coreos.com/v1kind: ServiceMonitormetadata: name: artifact-keeper namespace: artifact-keeperspec: selector: matchLabels: app: backend endpoints: - port: metrics interval: 30sBackup Strategy
CronJob for Database Backup
apiVersion: batch/v1kind: CronJobmetadata: name: postgres-backup namespace: artifact-keeperspec: schedule: "0 2 * * *" jobTemplate: spec: template: spec: containers: - name: backup image: postgres:15-alpine command: - /bin/sh - -c - | pg_dump -h postgres -U registry artifact_registry | \ gzip > /backup/backup-$(date +%Y%m%d-%H%M%S).sql.gz env: - name: PGPASSWORD valueFrom: secretKeyRef: name: artifact-keeper-secrets key: db-password volumeMounts: - name: backup mountPath: /backup volumes: - name: backup persistentVolumeClaim: claimName: backup-storage-pvc restartPolicy: OnFailureTroubleshooting
Check Pod Status
kubectl get pods -n artifact-keeperkubectl describe pod <pod-name> -n artifact-keeperkubectl logs <pod-name> -n artifact-keeperDatabase Connection Issues
kubectl exec -it postgres-0 -n artifact-keeper -- psql -U registry -d artifact_registryStorage Issues
kubectl get pvc -n artifact-keeperkubectl describe pvc artifact-storage-pvc -n artifact-keeperScaling Issues
kubectl get hpa -n artifact-keeperkubectl describe hpa backend-hpa -n artifact-keeperUpgrading
# Update image tagskubectl set image deployment/backend backend=artifact-keeper/backend:v2.0.0 -n artifact-keeper
# Or apply updated manifestskubectl apply -k overlays/production
# Watch rolloutkubectl rollout status deployment/backend -n artifact-keeperProduction Checklist
- Configure resource limits
- Set up horizontal autoscaling
- Configure pod anti-affinity
- Enable ingress TLS
- Set up monitoring
- Configure backup CronJobs
- Test disaster recovery
- Configure network policies
- Set up log aggregation
- Enable pod security policies