Docker Compose Deployment
Deploy Artifact Keeper using Docker Compose for a complete, containerized setup.
Prerequisites
- Docker 20.10+ and Docker Compose 2.0+
- 4 GB RAM minimum (8 GB recommended)
- 100 GB disk space for artifacts
Quick Start
1. Clone Repository
git clone https://github.com/artifact-keeper/artifact-keeper.gitcd artifact-keeper2. Create Environment File
Create .env file in the project root:
# DatabaseDB_PASSWORD=change-me-in-production
# JWT AuthenticationJWT_SECRET=generate-secure-secret-here
# StorageSTORAGE_BACKEND=filesystemSTORAGE_PATH=/data/artifacts
# Admin user (first-time setup)ADMIN_USERNAME=adminADMIN_PASSWORD=admin # Change on first loginGenerate secure secrets:
# Generate JWT secretopenssl rand -base64 64
# Generate database passwordopenssl rand -base64 323. Start Services
docker-compose up -d4. Verify Deployment
# Check service statusdocker-compose ps
# View logsdocker-compose logs -f
# Access web UIopen http://localhost:3000
# Test APIcurl http://localhost:8080/api/v1/healthProduction Docker Compose Configuration
Create docker-compose.yml:
version: '3.8'
services: postgres: image: postgres:15-alpine container_name: artifact-keeper-db environment: POSTGRES_DB: artifact_registry POSTGRES_USER: registry POSTGRES_PASSWORD: ${DB_PASSWORD} volumes: - postgres_data:/var/lib/postgresql/data ports: - "5432:5432" healthcheck: test: ["CMD-SHELL", "pg_isready -U registry"] interval: 10s timeout: 5s retries: 5 restart: unless-stopped networks: - artifact-keeper
backend: image: artifact-keeper/backend:latest container_name: artifact-keeper-backend environment: DATABASE_URL: postgres://registry:${DB_PASSWORD}@postgres:5432/artifact_registry JWT_SECRET: ${JWT_SECRET} STORAGE_BACKEND: ${STORAGE_BACKEND:-filesystem} STORAGE_PATH: /data/artifacts RUST_LOG: info PORT: 8080 volumes: - artifact_data:/data/artifacts - backup_data:/data/backups ports: - "8080:8080" depends_on: postgres: condition: service_healthy healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8080/api/v1/health"] interval: 30s timeout: 10s retries: 3 start_period: 40s restart: unless-stopped networks: - artifact-keeper
frontend: image: artifact-keeper/frontend:latest container_name: artifact-keeper-frontend environment: REACT_APP_API_URL: http://localhost:8080 ports: - "3000:80" depends_on: - backend healthcheck: test: ["CMD", "curl", "-f", "http://localhost:80"] interval: 30s timeout: 10s retries: 3 restart: unless-stopped networks: - artifact-keeper
volumes: postgres_data: driver: local artifact_data: driver: local backup_data: driver: local
networks: artifact-keeper: driver: bridgeExtended Configuration with Optional Services
With Trivy Security Scanner
Add security scanning capabilities:
services: # ... existing services ...
trivy: image: aquasec/trivy:latest container_name: artifact-keeper-trivy command: ["server", "--listen", "0.0.0.0:8090"] ports: - "8090:8090" volumes: - trivy_cache:/root/.cache restart: unless-stopped networks: - artifact-keeper
backend: # ... existing config ... environment: # ... existing env vars ... TRIVY_ENABLED: "true" TRIVY_URL: http://trivy:8090
volumes: # ... existing volumes ... trivy_cache: driver: localWith MinIO S3 Storage
Use MinIO for S3-compatible object storage:
services: # ... existing services ...
minio: image: minio/minio:latest container_name: artifact-keeper-minio command: server /data --console-address ":9001" environment: MINIO_ROOT_USER: ${MINIO_ACCESS_KEY:-minioadmin} MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY:-minioadmin} volumes: - minio_data:/data ports: - "9000:9000" - "9001:9001" healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] interval: 30s timeout: 20s retries: 3 restart: unless-stopped networks: - artifact-keeper
backend: # ... existing config ... environment: # ... existing env vars ... STORAGE_BACKEND: s3 S3_ENDPOINT: http://minio:9000 S3_BUCKET: artifacts S3_REGION: us-east-1 S3_ACCESS_KEY_ID: ${MINIO_ACCESS_KEY:-minioadmin} S3_SECRET_ACCESS_KEY: ${MINIO_SECRET_KEY:-minioadmin} S3_FORCE_PATH_STYLE: "true" depends_on: - minio
volumes: # ... existing volumes ... minio_data: driver: localCreate MinIO bucket on first startup:
# Wait for MinIO to startsleep 10
# Create bucketdocker exec artifact-keeper-minio \ mc alias set local http://localhost:9000 minioadmin minioadmin
docker exec artifact-keeper-minio \ mc mb local/artifactsWith Prometheus and Grafana
Add monitoring stack:
services: # ... existing services ...
prometheus: image: prom/prometheus:latest container_name: artifact-keeper-prometheus volumes: - ./prometheus.yml:/etc/prometheus/prometheus.yml - prometheus_data:/prometheus command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus' ports: - "9090:9090" restart: unless-stopped networks: - artifact-keeper
grafana: image: grafana/grafana:latest container_name: artifact-keeper-grafana environment: GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin} volumes: - grafana_data:/var/lib/grafana - ./grafana/dashboards:/etc/grafana/provisioning/dashboards ports: - "3001:3000" depends_on: - prometheus restart: unless-stopped networks: - artifact-keeper
volumes: # ... existing volumes ... prometheus_data: driver: local grafana_data: driver: localCreate prometheus.yml:
global: scrape_interval: 15s
scrape_configs: - job_name: 'artifact-keeper' static_configs: - targets: ['backend:8080']Building Images
Build from Source
# Build backenddocker build -t artifact-keeper/backend:latest -f docker/Dockerfile.backend .
# Build frontenddocker build -t artifact-keeper/frontend:latest -f docker/Dockerfile.frontend .Use Pre-built Images
# Pull from Docker Hub (when available)docker pull artifact-keeper/backend:latestdocker pull artifact-keeper/frontend:latestManaging Services
Start Services
# Start all servicesdocker-compose up -d
# Start specific servicedocker-compose up -d backendStop Services
# Stop all servicesdocker-compose down
# Stop and remove volumes (WARNING: deletes data)docker-compose down -vView Logs
# All servicesdocker-compose logs -f
# Specific servicedocker-compose logs -f backend
# Last 100 linesdocker-compose logs --tail=100 backendRestart Services
# Restart alldocker-compose restart
# Restart specific servicedocker-compose restart backendUpdate Services
# Pull latest imagesdocker-compose pull
# Restart with new imagesdocker-compose up -dData Persistence
Volume Management
# List volumesdocker volume ls
# Inspect volumedocker volume inspect artifact-keeper_artifact_data
# Backup volumedocker run --rm \ -v artifact-keeper_artifact_data:/data \ -v $(pwd):/backup \ alpine tar czf /backup/artifacts-backup.tar.gz /data
# Restore volumedocker run --rm \ -v artifact-keeper_artifact_data:/data \ -v $(pwd):/backup \ alpine tar xzf /backup/artifacts-backup.tar.gz -C /Database Backup
# Backup databasedocker-compose exec postgres pg_dump -U registry artifact_registry | gzip > backup.sql.gz
# Restore databasegunzip < backup.sql.gz | docker-compose exec -T postgres psql -U registry artifact_registryResource Limits
Configure resource constraints:
services: backend: # ... existing config ... deploy: resources: limits: cpus: '2' memory: 4G reservations: cpus: '1' memory: 2G
postgres: # ... existing config ... deploy: resources: limits: cpus: '1' memory: 2G reservations: cpus: '0.5' memory: 1GReverse Proxy with Nginx
Add Nginx for SSL termination and routing:
services: nginx: image: nginx:alpine container_name: artifact-keeper-nginx volumes: - ./nginx.conf:/etc/nginx/nginx.conf:ro - ./ssl:/etc/nginx/ssl:ro ports: - "80:80" - "443:443" depends_on: - frontend - backend restart: unless-stopped networks: - artifact-keeperCreate nginx.conf:
upstream backend { server backend:8080;}
upstream frontend { server frontend:80;}
server { listen 80; server_name registry.example.com; return 301 https://$server_name$request_uri;}
server { listen 443 ssl http2; server_name registry.example.com;
ssl_certificate /etc/nginx/ssl/cert.pem; ssl_certificate_key /etc/nginx/ssl/key.pem;
client_max_body_size 10G;
location /api/ { proxy_pass http://backend; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; }
location / { proxy_pass http://frontend; proxy_set_header Host $host; proxy_set_header X-Real-IP $remote_addr; }}Health Checks
Monitor service health:
# Check all servicesdocker-compose ps
# Backend healthcurl http://localhost:8080/api/v1/health
# Database healthdocker-compose exec postgres pg_isready -U registry
# Frontend healthcurl http://localhost:3000Troubleshooting
Services Won’t Start
# Check logsdocker-compose logs
# Verify environment variablesdocker-compose config
# Check port conflictsnetstat -tulpn | grep -E ':(3000|8080|5432)'Database Connection Errors
# Verify database is runningdocker-compose ps postgres
# Check database logsdocker-compose logs postgres
# Test connectiondocker-compose exec postgres psql -U registry -d artifact_registry -c '\conninfo'Out of Disk Space
# Check disk usagedf -h
# Clean up Dockerdocker system prune -a --volumes
# Check volume sizesdocker system df -vProduction Checklist
- Change default passwords (database, admin user)
- Generate secure JWT secret
- Configure SSL/TLS certificates
- Set up automated backups
- Configure log rotation
- Set resource limits
- Enable monitoring
- Configure firewall rules
- Set up log aggregation
- Test disaster recovery